1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats supported by all gen */ 45 #define COMMON_PRIMARY_FORMATS \ 46 DRM_FORMAT_C8, \ 47 DRM_FORMAT_RGB565, \ 48 DRM_FORMAT_XRGB8888, \ 49 DRM_FORMAT_ARGB8888 50 51 /* Primary plane formats for gen <= 3 */ 52 static const uint32_t intel_primary_formats_gen2[] = { 53 COMMON_PRIMARY_FORMATS, 54 DRM_FORMAT_XRGB1555, 55 DRM_FORMAT_ARGB1555, 56 }; 57 58 /* Primary plane formats for gen >= 4 */ 59 static const uint32_t intel_primary_formats_gen4[] = { 60 COMMON_PRIMARY_FORMATS, \ 61 DRM_FORMAT_XBGR8888, 62 DRM_FORMAT_ABGR8888, 63 DRM_FORMAT_XRGB2101010, 64 DRM_FORMAT_ARGB2101010, 65 DRM_FORMAT_XBGR2101010, 66 DRM_FORMAT_ABGR2101010, 67 }; 68 69 /* Cursor formats */ 70 static const uint32_t intel_cursor_formats[] = { 71 DRM_FORMAT_ARGB8888, 72 }; 73 74 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 75 76 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 77 struct intel_crtc_state *pipe_config); 78 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 79 struct intel_crtc_state *pipe_config); 80 81 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 82 int x, int y, struct drm_framebuffer *old_fb); 83 static int intel_framebuffer_init(struct drm_device *dev, 84 struct intel_framebuffer *ifb, 85 struct drm_mode_fb_cmd2 *mode_cmd, 86 struct drm_i915_gem_object *obj); 87 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 88 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 89 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 90 struct intel_link_m_n *m_n, 91 struct intel_link_m_n *m2_n2); 92 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 93 static void haswell_set_pipeconf(struct drm_crtc *crtc); 94 static void intel_set_pipe_csc(struct drm_crtc *crtc); 95 static void vlv_prepare_pll(struct intel_crtc *crtc, 96 const struct intel_crtc_state *pipe_config); 97 static void chv_prepare_pll(struct intel_crtc *crtc, 98 const struct intel_crtc_state *pipe_config); 99 static void intel_begin_crtc_commit(struct drm_crtc *crtc); 100 static void intel_finish_crtc_commit(struct drm_crtc *crtc); 101 102 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 103 { 104 if (!connector->mst_port) 105 return connector->encoder; 106 else 107 return &connector->mst_port->mst_encoders[pipe]->base; 108 } 109 110 typedef struct { 111 int min, max; 112 } intel_range_t; 113 114 typedef struct { 115 int dot_limit; 116 int p2_slow, p2_fast; 117 } intel_p2_t; 118 119 typedef struct intel_limit intel_limit_t; 120 struct intel_limit { 121 intel_range_t dot, vco, n, m, m1, m2, p, p1; 122 intel_p2_t p2; 123 }; 124 125 int 126 intel_pch_rawclk(struct drm_device *dev) 127 { 128 struct drm_i915_private *dev_priv = dev->dev_private; 129 130 WARN_ON(!HAS_PCH_SPLIT(dev)); 131 132 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 133 } 134 135 static inline u32 /* units of 100MHz */ 136 intel_fdi_link_freq(struct drm_device *dev) 137 { 138 if (IS_GEN5(dev)) { 139 struct drm_i915_private *dev_priv = dev->dev_private; 140 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 141 } else 142 return 27; 143 } 144 145 static const intel_limit_t intel_limits_i8xx_dac = { 146 .dot = { .min = 25000, .max = 350000 }, 147 .vco = { .min = 908000, .max = 1512000 }, 148 .n = { .min = 2, .max = 16 }, 149 .m = { .min = 96, .max = 140 }, 150 .m1 = { .min = 18, .max = 26 }, 151 .m2 = { .min = 6, .max = 16 }, 152 .p = { .min = 4, .max = 128 }, 153 .p1 = { .min = 2, .max = 33 }, 154 .p2 = { .dot_limit = 165000, 155 .p2_slow = 4, .p2_fast = 2 }, 156 }; 157 158 static const intel_limit_t intel_limits_i8xx_dvo = { 159 .dot = { .min = 25000, .max = 350000 }, 160 .vco = { .min = 908000, .max = 1512000 }, 161 .n = { .min = 2, .max = 16 }, 162 .m = { .min = 96, .max = 140 }, 163 .m1 = { .min = 18, .max = 26 }, 164 .m2 = { .min = 6, .max = 16 }, 165 .p = { .min = 4, .max = 128 }, 166 .p1 = { .min = 2, .max = 33 }, 167 .p2 = { .dot_limit = 165000, 168 .p2_slow = 4, .p2_fast = 4 }, 169 }; 170 171 static const intel_limit_t intel_limits_i8xx_lvds = { 172 .dot = { .min = 25000, .max = 350000 }, 173 .vco = { .min = 908000, .max = 1512000 }, 174 .n = { .min = 2, .max = 16 }, 175 .m = { .min = 96, .max = 140 }, 176 .m1 = { .min = 18, .max = 26 }, 177 .m2 = { .min = 6, .max = 16 }, 178 .p = { .min = 4, .max = 128 }, 179 .p1 = { .min = 1, .max = 6 }, 180 .p2 = { .dot_limit = 165000, 181 .p2_slow = 14, .p2_fast = 7 }, 182 }; 183 184 static const intel_limit_t intel_limits_i9xx_sdvo = { 185 .dot = { .min = 20000, .max = 400000 }, 186 .vco = { .min = 1400000, .max = 2800000 }, 187 .n = { .min = 1, .max = 6 }, 188 .m = { .min = 70, .max = 120 }, 189 .m1 = { .min = 8, .max = 18 }, 190 .m2 = { .min = 3, .max = 7 }, 191 .p = { .min = 5, .max = 80 }, 192 .p1 = { .min = 1, .max = 8 }, 193 .p2 = { .dot_limit = 200000, 194 .p2_slow = 10, .p2_fast = 5 }, 195 }; 196 197 static const intel_limit_t intel_limits_i9xx_lvds = { 198 .dot = { .min = 20000, .max = 400000 }, 199 .vco = { .min = 1400000, .max = 2800000 }, 200 .n = { .min = 1, .max = 6 }, 201 .m = { .min = 70, .max = 120 }, 202 .m1 = { .min = 8, .max = 18 }, 203 .m2 = { .min = 3, .max = 7 }, 204 .p = { .min = 7, .max = 98 }, 205 .p1 = { .min = 1, .max = 8 }, 206 .p2 = { .dot_limit = 112000, 207 .p2_slow = 14, .p2_fast = 7 }, 208 }; 209 210 211 static const intel_limit_t intel_limits_g4x_sdvo = { 212 .dot = { .min = 25000, .max = 270000 }, 213 .vco = { .min = 1750000, .max = 3500000}, 214 .n = { .min = 1, .max = 4 }, 215 .m = { .min = 104, .max = 138 }, 216 .m1 = { .min = 17, .max = 23 }, 217 .m2 = { .min = 5, .max = 11 }, 218 .p = { .min = 10, .max = 30 }, 219 .p1 = { .min = 1, .max = 3}, 220 .p2 = { .dot_limit = 270000, 221 .p2_slow = 10, 222 .p2_fast = 10 223 }, 224 }; 225 226 static const intel_limit_t intel_limits_g4x_hdmi = { 227 .dot = { .min = 22000, .max = 400000 }, 228 .vco = { .min = 1750000, .max = 3500000}, 229 .n = { .min = 1, .max = 4 }, 230 .m = { .min = 104, .max = 138 }, 231 .m1 = { .min = 16, .max = 23 }, 232 .m2 = { .min = 5, .max = 11 }, 233 .p = { .min = 5, .max = 80 }, 234 .p1 = { .min = 1, .max = 8}, 235 .p2 = { .dot_limit = 165000, 236 .p2_slow = 10, .p2_fast = 5 }, 237 }; 238 239 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 240 .dot = { .min = 20000, .max = 115000 }, 241 .vco = { .min = 1750000, .max = 3500000 }, 242 .n = { .min = 1, .max = 3 }, 243 .m = { .min = 104, .max = 138 }, 244 .m1 = { .min = 17, .max = 23 }, 245 .m2 = { .min = 5, .max = 11 }, 246 .p = { .min = 28, .max = 112 }, 247 .p1 = { .min = 2, .max = 8 }, 248 .p2 = { .dot_limit = 0, 249 .p2_slow = 14, .p2_fast = 14 250 }, 251 }; 252 253 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 254 .dot = { .min = 80000, .max = 224000 }, 255 .vco = { .min = 1750000, .max = 3500000 }, 256 .n = { .min = 1, .max = 3 }, 257 .m = { .min = 104, .max = 138 }, 258 .m1 = { .min = 17, .max = 23 }, 259 .m2 = { .min = 5, .max = 11 }, 260 .p = { .min = 14, .max = 42 }, 261 .p1 = { .min = 2, .max = 6 }, 262 .p2 = { .dot_limit = 0, 263 .p2_slow = 7, .p2_fast = 7 264 }, 265 }; 266 267 static const intel_limit_t intel_limits_pineview_sdvo = { 268 .dot = { .min = 20000, .max = 400000}, 269 .vco = { .min = 1700000, .max = 3500000 }, 270 /* Pineview's Ncounter is a ring counter */ 271 .n = { .min = 3, .max = 6 }, 272 .m = { .min = 2, .max = 256 }, 273 /* Pineview only has one combined m divider, which we treat as m2. */ 274 .m1 = { .min = 0, .max = 0 }, 275 .m2 = { .min = 0, .max = 254 }, 276 .p = { .min = 5, .max = 80 }, 277 .p1 = { .min = 1, .max = 8 }, 278 .p2 = { .dot_limit = 200000, 279 .p2_slow = 10, .p2_fast = 5 }, 280 }; 281 282 static const intel_limit_t intel_limits_pineview_lvds = { 283 .dot = { .min = 20000, .max = 400000 }, 284 .vco = { .min = 1700000, .max = 3500000 }, 285 .n = { .min = 3, .max = 6 }, 286 .m = { .min = 2, .max = 256 }, 287 .m1 = { .min = 0, .max = 0 }, 288 .m2 = { .min = 0, .max = 254 }, 289 .p = { .min = 7, .max = 112 }, 290 .p1 = { .min = 1, .max = 8 }, 291 .p2 = { .dot_limit = 112000, 292 .p2_slow = 14, .p2_fast = 14 }, 293 }; 294 295 /* Ironlake / Sandybridge 296 * 297 * We calculate clock using (register_value + 2) for N/M1/M2, so here 298 * the range value for them is (actual_value - 2). 299 */ 300 static const intel_limit_t intel_limits_ironlake_dac = { 301 .dot = { .min = 25000, .max = 350000 }, 302 .vco = { .min = 1760000, .max = 3510000 }, 303 .n = { .min = 1, .max = 5 }, 304 .m = { .min = 79, .max = 127 }, 305 .m1 = { .min = 12, .max = 22 }, 306 .m2 = { .min = 5, .max = 9 }, 307 .p = { .min = 5, .max = 80 }, 308 .p1 = { .min = 1, .max = 8 }, 309 .p2 = { .dot_limit = 225000, 310 .p2_slow = 10, .p2_fast = 5 }, 311 }; 312 313 static const intel_limit_t intel_limits_ironlake_single_lvds = { 314 .dot = { .min = 25000, .max = 350000 }, 315 .vco = { .min = 1760000, .max = 3510000 }, 316 .n = { .min = 1, .max = 3 }, 317 .m = { .min = 79, .max = 118 }, 318 .m1 = { .min = 12, .max = 22 }, 319 .m2 = { .min = 5, .max = 9 }, 320 .p = { .min = 28, .max = 112 }, 321 .p1 = { .min = 2, .max = 8 }, 322 .p2 = { .dot_limit = 225000, 323 .p2_slow = 14, .p2_fast = 14 }, 324 }; 325 326 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 327 .dot = { .min = 25000, .max = 350000 }, 328 .vco = { .min = 1760000, .max = 3510000 }, 329 .n = { .min = 1, .max = 3 }, 330 .m = { .min = 79, .max = 127 }, 331 .m1 = { .min = 12, .max = 22 }, 332 .m2 = { .min = 5, .max = 9 }, 333 .p = { .min = 14, .max = 56 }, 334 .p1 = { .min = 2, .max = 8 }, 335 .p2 = { .dot_limit = 225000, 336 .p2_slow = 7, .p2_fast = 7 }, 337 }; 338 339 /* LVDS 100mhz refclk limits. */ 340 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 341 .dot = { .min = 25000, .max = 350000 }, 342 .vco = { .min = 1760000, .max = 3510000 }, 343 .n = { .min = 1, .max = 2 }, 344 .m = { .min = 79, .max = 126 }, 345 .m1 = { .min = 12, .max = 22 }, 346 .m2 = { .min = 5, .max = 9 }, 347 .p = { .min = 28, .max = 112 }, 348 .p1 = { .min = 2, .max = 8 }, 349 .p2 = { .dot_limit = 225000, 350 .p2_slow = 14, .p2_fast = 14 }, 351 }; 352 353 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 354 .dot = { .min = 25000, .max = 350000 }, 355 .vco = { .min = 1760000, .max = 3510000 }, 356 .n = { .min = 1, .max = 3 }, 357 .m = { .min = 79, .max = 126 }, 358 .m1 = { .min = 12, .max = 22 }, 359 .m2 = { .min = 5, .max = 9 }, 360 .p = { .min = 14, .max = 42 }, 361 .p1 = { .min = 2, .max = 6 }, 362 .p2 = { .dot_limit = 225000, 363 .p2_slow = 7, .p2_fast = 7 }, 364 }; 365 366 static const intel_limit_t intel_limits_vlv = { 367 /* 368 * These are the data rate limits (measured in fast clocks) 369 * since those are the strictest limits we have. The fast 370 * clock and actual rate limits are more relaxed, so checking 371 * them would make no difference. 372 */ 373 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 374 .vco = { .min = 4000000, .max = 6000000 }, 375 .n = { .min = 1, .max = 7 }, 376 .m1 = { .min = 2, .max = 3 }, 377 .m2 = { .min = 11, .max = 156 }, 378 .p1 = { .min = 2, .max = 3 }, 379 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 380 }; 381 382 static const intel_limit_t intel_limits_chv = { 383 /* 384 * These are the data rate limits (measured in fast clocks) 385 * since those are the strictest limits we have. The fast 386 * clock and actual rate limits are more relaxed, so checking 387 * them would make no difference. 388 */ 389 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 390 .vco = { .min = 4860000, .max = 6700000 }, 391 .n = { .min = 1, .max = 1 }, 392 .m1 = { .min = 2, .max = 2 }, 393 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 394 .p1 = { .min = 2, .max = 4 }, 395 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 396 }; 397 398 static void vlv_clock(int refclk, intel_clock_t *clock) 399 { 400 clock->m = clock->m1 * clock->m2; 401 clock->p = clock->p1 * clock->p2; 402 if (WARN_ON(clock->n == 0 || clock->p == 0)) 403 return; 404 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 405 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 406 } 407 408 /** 409 * Returns whether any output on the specified pipe is of the specified type 410 */ 411 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 412 { 413 struct drm_device *dev = crtc->base.dev; 414 struct intel_encoder *encoder; 415 416 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 417 if (encoder->type == type) 418 return true; 419 420 return false; 421 } 422 423 /** 424 * Returns whether any output on the specified pipe will have the specified 425 * type after a staged modeset is complete, i.e., the same as 426 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 427 * encoder->crtc. 428 */ 429 static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type) 430 { 431 struct drm_device *dev = crtc->base.dev; 432 struct intel_encoder *encoder; 433 434 for_each_intel_encoder(dev, encoder) 435 if (encoder->new_crtc == crtc && encoder->type == type) 436 return true; 437 438 return false; 439 } 440 441 static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, 442 int refclk) 443 { 444 struct drm_device *dev = crtc->base.dev; 445 const intel_limit_t *limit; 446 447 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 448 if (intel_is_dual_link_lvds(dev)) { 449 if (refclk == 100000) 450 limit = &intel_limits_ironlake_dual_lvds_100m; 451 else 452 limit = &intel_limits_ironlake_dual_lvds; 453 } else { 454 if (refclk == 100000) 455 limit = &intel_limits_ironlake_single_lvds_100m; 456 else 457 limit = &intel_limits_ironlake_single_lvds; 458 } 459 } else 460 limit = &intel_limits_ironlake_dac; 461 462 return limit; 463 } 464 465 static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) 466 { 467 struct drm_device *dev = crtc->base.dev; 468 const intel_limit_t *limit; 469 470 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 471 if (intel_is_dual_link_lvds(dev)) 472 limit = &intel_limits_g4x_dual_channel_lvds; 473 else 474 limit = &intel_limits_g4x_single_channel_lvds; 475 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) || 476 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) { 477 limit = &intel_limits_g4x_hdmi; 478 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) { 479 limit = &intel_limits_g4x_sdvo; 480 } else /* The option is for other outputs */ 481 limit = &intel_limits_i9xx_sdvo; 482 483 return limit; 484 } 485 486 static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) 487 { 488 struct drm_device *dev = crtc->base.dev; 489 const intel_limit_t *limit; 490 491 if (HAS_PCH_SPLIT(dev)) 492 limit = intel_ironlake_limit(crtc, refclk); 493 else if (IS_G4X(dev)) { 494 limit = intel_g4x_limit(crtc); 495 } else if (IS_PINEVIEW(dev)) { 496 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 497 limit = &intel_limits_pineview_lvds; 498 else 499 limit = &intel_limits_pineview_sdvo; 500 } else if (IS_CHERRYVIEW(dev)) { 501 limit = &intel_limits_chv; 502 } else if (IS_VALLEYVIEW(dev)) { 503 limit = &intel_limits_vlv; 504 } else if (!IS_GEN2(dev)) { 505 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 506 limit = &intel_limits_i9xx_lvds; 507 else 508 limit = &intel_limits_i9xx_sdvo; 509 } else { 510 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 511 limit = &intel_limits_i8xx_lvds; 512 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) 513 limit = &intel_limits_i8xx_dvo; 514 else 515 limit = &intel_limits_i8xx_dac; 516 } 517 return limit; 518 } 519 520 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 521 static void pineview_clock(int refclk, intel_clock_t *clock) 522 { 523 clock->m = clock->m2 + 2; 524 clock->p = clock->p1 * clock->p2; 525 if (WARN_ON(clock->n == 0 || clock->p == 0)) 526 return; 527 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 528 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 529 } 530 531 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 532 { 533 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 534 } 535 536 static void i9xx_clock(int refclk, intel_clock_t *clock) 537 { 538 clock->m = i9xx_dpll_compute_m(clock); 539 clock->p = clock->p1 * clock->p2; 540 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 541 return; 542 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 543 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 544 } 545 546 static void chv_clock(int refclk, intel_clock_t *clock) 547 { 548 clock->m = clock->m1 * clock->m2; 549 clock->p = clock->p1 * clock->p2; 550 if (WARN_ON(clock->n == 0 || clock->p == 0)) 551 return; 552 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 553 clock->n << 22); 554 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 555 } 556 557 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 558 /** 559 * Returns whether the given set of divisors are valid for a given refclk with 560 * the given connectors. 561 */ 562 563 static bool intel_PLL_is_valid(struct drm_device *dev, 564 const intel_limit_t *limit, 565 const intel_clock_t *clock) 566 { 567 if (clock->n < limit->n.min || limit->n.max < clock->n) 568 INTELPllInvalid("n out of range\n"); 569 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 570 INTELPllInvalid("p1 out of range\n"); 571 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 572 INTELPllInvalid("m2 out of range\n"); 573 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 574 INTELPllInvalid("m1 out of range\n"); 575 576 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) 577 if (clock->m1 <= clock->m2) 578 INTELPllInvalid("m1 <= m2\n"); 579 580 if (!IS_VALLEYVIEW(dev)) { 581 if (clock->p < limit->p.min || limit->p.max < clock->p) 582 INTELPllInvalid("p out of range\n"); 583 if (clock->m < limit->m.min || limit->m.max < clock->m) 584 INTELPllInvalid("m out of range\n"); 585 } 586 587 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 588 INTELPllInvalid("vco out of range\n"); 589 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 590 * connector, etc., rather than just a single range. 591 */ 592 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 593 INTELPllInvalid("dot out of range\n"); 594 595 return true; 596 } 597 598 static bool 599 i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 600 int target, int refclk, intel_clock_t *match_clock, 601 intel_clock_t *best_clock) 602 { 603 struct drm_device *dev = crtc->base.dev; 604 intel_clock_t clock; 605 int err = target; 606 607 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 608 /* 609 * For LVDS just rely on its current settings for dual-channel. 610 * We haven't figured out how to reliably set up different 611 * single/dual channel state, if we even can. 612 */ 613 if (intel_is_dual_link_lvds(dev)) 614 clock.p2 = limit->p2.p2_fast; 615 else 616 clock.p2 = limit->p2.p2_slow; 617 } else { 618 if (target < limit->p2.dot_limit) 619 clock.p2 = limit->p2.p2_slow; 620 else 621 clock.p2 = limit->p2.p2_fast; 622 } 623 624 memset(best_clock, 0, sizeof(*best_clock)); 625 626 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 627 clock.m1++) { 628 for (clock.m2 = limit->m2.min; 629 clock.m2 <= limit->m2.max; clock.m2++) { 630 if (clock.m2 >= clock.m1) 631 break; 632 for (clock.n = limit->n.min; 633 clock.n <= limit->n.max; clock.n++) { 634 for (clock.p1 = limit->p1.min; 635 clock.p1 <= limit->p1.max; clock.p1++) { 636 int this_err; 637 638 i9xx_clock(refclk, &clock); 639 if (!intel_PLL_is_valid(dev, limit, 640 &clock)) 641 continue; 642 if (match_clock && 643 clock.p != match_clock->p) 644 continue; 645 646 this_err = abs(clock.dot - target); 647 if (this_err < err) { 648 *best_clock = clock; 649 err = this_err; 650 } 651 } 652 } 653 } 654 } 655 656 return (err != target); 657 } 658 659 static bool 660 pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 661 int target, int refclk, intel_clock_t *match_clock, 662 intel_clock_t *best_clock) 663 { 664 struct drm_device *dev = crtc->base.dev; 665 intel_clock_t clock; 666 int err = target; 667 668 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 669 /* 670 * For LVDS just rely on its current settings for dual-channel. 671 * We haven't figured out how to reliably set up different 672 * single/dual channel state, if we even can. 673 */ 674 if (intel_is_dual_link_lvds(dev)) 675 clock.p2 = limit->p2.p2_fast; 676 else 677 clock.p2 = limit->p2.p2_slow; 678 } else { 679 if (target < limit->p2.dot_limit) 680 clock.p2 = limit->p2.p2_slow; 681 else 682 clock.p2 = limit->p2.p2_fast; 683 } 684 685 memset(best_clock, 0, sizeof(*best_clock)); 686 687 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 688 clock.m1++) { 689 for (clock.m2 = limit->m2.min; 690 clock.m2 <= limit->m2.max; clock.m2++) { 691 for (clock.n = limit->n.min; 692 clock.n <= limit->n.max; clock.n++) { 693 for (clock.p1 = limit->p1.min; 694 clock.p1 <= limit->p1.max; clock.p1++) { 695 int this_err; 696 697 pineview_clock(refclk, &clock); 698 if (!intel_PLL_is_valid(dev, limit, 699 &clock)) 700 continue; 701 if (match_clock && 702 clock.p != match_clock->p) 703 continue; 704 705 this_err = abs(clock.dot - target); 706 if (this_err < err) { 707 *best_clock = clock; 708 err = this_err; 709 } 710 } 711 } 712 } 713 } 714 715 return (err != target); 716 } 717 718 static bool 719 g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 720 int target, int refclk, intel_clock_t *match_clock, 721 intel_clock_t *best_clock) 722 { 723 struct drm_device *dev = crtc->base.dev; 724 intel_clock_t clock; 725 int max_n; 726 bool found; 727 /* approximately equals target * 0.00585 */ 728 int err_most = (target >> 8) + (target >> 9); 729 found = false; 730 731 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 732 if (intel_is_dual_link_lvds(dev)) 733 clock.p2 = limit->p2.p2_fast; 734 else 735 clock.p2 = limit->p2.p2_slow; 736 } else { 737 if (target < limit->p2.dot_limit) 738 clock.p2 = limit->p2.p2_slow; 739 else 740 clock.p2 = limit->p2.p2_fast; 741 } 742 743 memset(best_clock, 0, sizeof(*best_clock)); 744 max_n = limit->n.max; 745 /* based on hardware requirement, prefer smaller n to precision */ 746 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 747 /* based on hardware requirement, prefere larger m1,m2 */ 748 for (clock.m1 = limit->m1.max; 749 clock.m1 >= limit->m1.min; clock.m1--) { 750 for (clock.m2 = limit->m2.max; 751 clock.m2 >= limit->m2.min; clock.m2--) { 752 for (clock.p1 = limit->p1.max; 753 clock.p1 >= limit->p1.min; clock.p1--) { 754 int this_err; 755 756 i9xx_clock(refclk, &clock); 757 if (!intel_PLL_is_valid(dev, limit, 758 &clock)) 759 continue; 760 761 this_err = abs(clock.dot - target); 762 if (this_err < err_most) { 763 *best_clock = clock; 764 err_most = this_err; 765 max_n = clock.n; 766 found = true; 767 } 768 } 769 } 770 } 771 } 772 return found; 773 } 774 775 static bool 776 vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 777 int target, int refclk, intel_clock_t *match_clock, 778 intel_clock_t *best_clock) 779 { 780 struct drm_device *dev = crtc->base.dev; 781 intel_clock_t clock; 782 unsigned int bestppm = 1000000; 783 /* min update 19.2 MHz */ 784 int max_n = min(limit->n.max, refclk / 19200); 785 bool found = false; 786 787 target *= 5; /* fast clock */ 788 789 memset(best_clock, 0, sizeof(*best_clock)); 790 791 /* based on hardware requirement, prefer smaller n to precision */ 792 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 793 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 794 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 795 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 796 clock.p = clock.p1 * clock.p2; 797 /* based on hardware requirement, prefer bigger m1,m2 values */ 798 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 799 unsigned int ppm, diff; 800 801 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 802 refclk * clock.m1); 803 804 vlv_clock(refclk, &clock); 805 806 if (!intel_PLL_is_valid(dev, limit, 807 &clock)) 808 continue; 809 810 diff = abs(clock.dot - target); 811 ppm = div_u64(1000000ULL * diff, target); 812 813 if (ppm < 100 && clock.p > best_clock->p) { 814 bestppm = 0; 815 *best_clock = clock; 816 found = true; 817 } 818 819 if (bestppm >= 10 && ppm < bestppm - 10) { 820 bestppm = ppm; 821 *best_clock = clock; 822 found = true; 823 } 824 } 825 } 826 } 827 } 828 829 return found; 830 } 831 832 static bool 833 chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, 834 int target, int refclk, intel_clock_t *match_clock, 835 intel_clock_t *best_clock) 836 { 837 struct drm_device *dev = crtc->base.dev; 838 intel_clock_t clock; 839 uint64_t m2; 840 int found = false; 841 842 memset(best_clock, 0, sizeof(*best_clock)); 843 844 /* 845 * Based on hardware doc, the n always set to 1, and m1 always 846 * set to 2. If requires to support 200Mhz refclk, we need to 847 * revisit this because n may not 1 anymore. 848 */ 849 clock.n = 1, clock.m1 = 2; 850 target *= 5; /* fast clock */ 851 852 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 853 for (clock.p2 = limit->p2.p2_fast; 854 clock.p2 >= limit->p2.p2_slow; 855 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 856 857 clock.p = clock.p1 * clock.p2; 858 859 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 860 clock.n) << 22, refclk * clock.m1); 861 862 if (m2 > INT_MAX/clock.m1) 863 continue; 864 865 clock.m2 = m2; 866 867 chv_clock(refclk, &clock); 868 869 if (!intel_PLL_is_valid(dev, limit, &clock)) 870 continue; 871 872 /* based on hardware requirement, prefer bigger p 873 */ 874 if (clock.p > best_clock->p) { 875 *best_clock = clock; 876 found = true; 877 } 878 } 879 } 880 881 return found; 882 } 883 884 bool intel_crtc_active(struct drm_crtc *crtc) 885 { 886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 887 888 /* Be paranoid as we can arrive here with only partial 889 * state retrieved from the hardware during setup. 890 * 891 * We can ditch the adjusted_mode.crtc_clock check as soon 892 * as Haswell has gained clock readout/fastboot support. 893 * 894 * We can ditch the crtc->primary->fb check as soon as we can 895 * properly reconstruct framebuffers. 896 */ 897 return intel_crtc->active && crtc->primary->fb && 898 intel_crtc->config->base.adjusted_mode.crtc_clock; 899 } 900 901 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 902 enum i915_pipe pipe) 903 { 904 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 905 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 906 907 return intel_crtc->config->cpu_transcoder; 908 } 909 910 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 911 { 912 struct drm_i915_private *dev_priv = dev->dev_private; 913 u32 reg = PIPEDSL(pipe); 914 u32 line1, line2; 915 u32 line_mask; 916 917 if (IS_GEN2(dev)) 918 line_mask = DSL_LINEMASK_GEN2; 919 else 920 line_mask = DSL_LINEMASK_GEN3; 921 922 line1 = I915_READ(reg) & line_mask; 923 mdelay(5); 924 line2 = I915_READ(reg) & line_mask; 925 926 return line1 == line2; 927 } 928 929 /* 930 * intel_wait_for_pipe_off - wait for pipe to turn off 931 * @crtc: crtc whose pipe to wait for 932 * 933 * After disabling a pipe, we can't wait for vblank in the usual way, 934 * spinning on the vblank interrupt status bit, since we won't actually 935 * see an interrupt when the pipe is disabled. 936 * 937 * On Gen4 and above: 938 * wait for the pipe register state bit to turn off 939 * 940 * Otherwise: 941 * wait for the display line value to settle (it usually 942 * ends up stopping at the start of the next frame). 943 * 944 */ 945 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 946 { 947 struct drm_device *dev = crtc->base.dev; 948 struct drm_i915_private *dev_priv = dev->dev_private; 949 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 950 enum i915_pipe pipe = crtc->pipe; 951 952 if (INTEL_INFO(dev)->gen >= 4) { 953 int reg = PIPECONF(cpu_transcoder); 954 955 /* Wait for the Pipe State to go off */ 956 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 957 100)) 958 WARN(1, "pipe_off wait timed out\n"); 959 } else { 960 /* Wait for the display line to settle */ 961 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 962 WARN(1, "pipe_off wait timed out\n"); 963 } 964 } 965 966 /* 967 * ibx_digital_port_connected - is the specified port connected? 968 * @dev_priv: i915 private structure 969 * @port: the port to test 970 * 971 * Returns true if @port is connected, false otherwise. 972 */ 973 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 974 struct intel_digital_port *port) 975 { 976 u32 bit; 977 978 if (HAS_PCH_IBX(dev_priv->dev)) { 979 switch (port->port) { 980 case PORT_B: 981 bit = SDE_PORTB_HOTPLUG; 982 break; 983 case PORT_C: 984 bit = SDE_PORTC_HOTPLUG; 985 break; 986 case PORT_D: 987 bit = SDE_PORTD_HOTPLUG; 988 break; 989 default: 990 return true; 991 } 992 } else { 993 switch (port->port) { 994 case PORT_B: 995 bit = SDE_PORTB_HOTPLUG_CPT; 996 break; 997 case PORT_C: 998 bit = SDE_PORTC_HOTPLUG_CPT; 999 break; 1000 case PORT_D: 1001 bit = SDE_PORTD_HOTPLUG_CPT; 1002 break; 1003 default: 1004 return true; 1005 } 1006 } 1007 1008 return I915_READ(SDEISR) & bit; 1009 } 1010 1011 static const char *state_string(bool enabled) 1012 { 1013 return enabled ? "on" : "off"; 1014 } 1015 1016 /* Only for pre-ILK configs */ 1017 void assert_pll(struct drm_i915_private *dev_priv, 1018 enum i915_pipe pipe, bool state) 1019 { 1020 int reg; 1021 u32 val; 1022 bool cur_state; 1023 1024 reg = DPLL(pipe); 1025 val = I915_READ(reg); 1026 cur_state = !!(val & DPLL_VCO_ENABLE); 1027 I915_STATE_WARN(cur_state != state, 1028 "PLL state assertion failure (expected %s, current %s)\n", 1029 state_string(state), state_string(cur_state)); 1030 } 1031 1032 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1033 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1034 { 1035 u32 val; 1036 bool cur_state; 1037 1038 mutex_lock(&dev_priv->dpio_lock); 1039 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1040 mutex_unlock(&dev_priv->dpio_lock); 1041 1042 cur_state = val & DSI_PLL_VCO_EN; 1043 I915_STATE_WARN(cur_state != state, 1044 "DSI PLL state assertion failure (expected %s, current %s)\n", 1045 state_string(state), state_string(cur_state)); 1046 } 1047 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1048 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1049 1050 struct intel_shared_dpll * 1051 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1052 { 1053 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1054 1055 if (crtc->config->shared_dpll < 0) 1056 return NULL; 1057 1058 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1059 } 1060 1061 /* For ILK+ */ 1062 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1063 struct intel_shared_dpll *pll, 1064 bool state) 1065 { 1066 bool cur_state; 1067 struct intel_dpll_hw_state hw_state; 1068 1069 if (WARN (!pll, 1070 "asserting DPLL %s with no DPLL\n", state_string(state))) 1071 return; 1072 1073 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1074 I915_STATE_WARN(cur_state != state, 1075 "%s assertion failure (expected %s, current %s)\n", 1076 pll->name, state_string(state), state_string(cur_state)); 1077 } 1078 1079 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1080 enum i915_pipe pipe, bool state) 1081 { 1082 int reg; 1083 u32 val; 1084 bool cur_state; 1085 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1086 pipe); 1087 1088 if (HAS_DDI(dev_priv->dev)) { 1089 /* DDI does not have a specific FDI_TX register */ 1090 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1091 val = I915_READ(reg); 1092 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1093 } else { 1094 reg = FDI_TX_CTL(pipe); 1095 val = I915_READ(reg); 1096 cur_state = !!(val & FDI_TX_ENABLE); 1097 } 1098 I915_STATE_WARN(cur_state != state, 1099 "FDI TX state assertion failure (expected %s, current %s)\n", 1100 state_string(state), state_string(cur_state)); 1101 } 1102 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1103 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1104 1105 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1106 enum i915_pipe pipe, bool state) 1107 { 1108 int reg; 1109 u32 val; 1110 bool cur_state; 1111 1112 reg = FDI_RX_CTL(pipe); 1113 val = I915_READ(reg); 1114 cur_state = !!(val & FDI_RX_ENABLE); 1115 I915_STATE_WARN(cur_state != state, 1116 "FDI RX state assertion failure (expected %s, current %s)\n", 1117 state_string(state), state_string(cur_state)); 1118 } 1119 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1120 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1121 1122 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1123 enum i915_pipe pipe) 1124 { 1125 int reg; 1126 u32 val; 1127 1128 /* ILK FDI PLL is always enabled */ 1129 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1130 return; 1131 1132 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1133 if (HAS_DDI(dev_priv->dev)) 1134 return; 1135 1136 reg = FDI_TX_CTL(pipe); 1137 val = I915_READ(reg); 1138 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1139 } 1140 1141 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1142 enum i915_pipe pipe, bool state) 1143 { 1144 int reg; 1145 u32 val; 1146 bool cur_state; 1147 1148 reg = FDI_RX_CTL(pipe); 1149 val = I915_READ(reg); 1150 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1151 I915_STATE_WARN(cur_state != state, 1152 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1153 state_string(state), state_string(cur_state)); 1154 } 1155 1156 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1157 enum i915_pipe pipe) 1158 { 1159 struct drm_device *dev = dev_priv->dev; 1160 int pp_reg; 1161 u32 val; 1162 enum i915_pipe panel_pipe = PIPE_A; 1163 bool locked = true; 1164 1165 if (WARN_ON(HAS_DDI(dev))) 1166 return; 1167 1168 if (HAS_PCH_SPLIT(dev)) { 1169 u32 port_sel; 1170 1171 pp_reg = PCH_PP_CONTROL; 1172 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1173 1174 if (port_sel == PANEL_PORT_SELECT_LVDS && 1175 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1176 panel_pipe = PIPE_B; 1177 /* XXX: else fix for eDP */ 1178 } else if (IS_VALLEYVIEW(dev)) { 1179 /* presumably write lock depends on pipe, not port select */ 1180 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1181 panel_pipe = pipe; 1182 } else { 1183 pp_reg = PP_CONTROL; 1184 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1185 panel_pipe = PIPE_B; 1186 } 1187 1188 val = I915_READ(pp_reg); 1189 if (!(val & PANEL_POWER_ON) || 1190 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1191 locked = false; 1192 1193 I915_STATE_WARN(panel_pipe == pipe && locked, 1194 "panel assertion failure, pipe %c regs locked\n", 1195 pipe_name(pipe)); 1196 } 1197 1198 static void assert_cursor(struct drm_i915_private *dev_priv, 1199 enum i915_pipe pipe, bool state) 1200 { 1201 struct drm_device *dev = dev_priv->dev; 1202 bool cur_state; 1203 1204 if (IS_845G(dev) || IS_I865G(dev)) 1205 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1206 else 1207 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1208 1209 I915_STATE_WARN(cur_state != state, 1210 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1211 pipe_name(pipe), state_string(state), state_string(cur_state)); 1212 } 1213 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1214 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1215 1216 void assert_pipe(struct drm_i915_private *dev_priv, 1217 enum i915_pipe pipe, bool state) 1218 { 1219 int reg; 1220 u32 val; 1221 bool cur_state; 1222 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1223 pipe); 1224 1225 /* if we need the pipe quirk it must be always on */ 1226 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1227 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1228 state = true; 1229 1230 if (!intel_display_power_is_enabled(dev_priv, 1231 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1232 cur_state = false; 1233 } else { 1234 reg = PIPECONF(cpu_transcoder); 1235 val = I915_READ(reg); 1236 cur_state = !!(val & PIPECONF_ENABLE); 1237 } 1238 1239 I915_STATE_WARN(cur_state != state, 1240 "pipe %c assertion failure (expected %s, current %s)\n", 1241 pipe_name(pipe), state_string(state), state_string(cur_state)); 1242 } 1243 1244 static void assert_plane(struct drm_i915_private *dev_priv, 1245 enum plane plane, bool state) 1246 { 1247 int reg; 1248 u32 val; 1249 bool cur_state; 1250 1251 reg = DSPCNTR(plane); 1252 val = I915_READ(reg); 1253 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1254 I915_STATE_WARN(cur_state != state, 1255 "plane %c assertion failure (expected %s, current %s)\n", 1256 plane_name(plane), state_string(state), state_string(cur_state)); 1257 } 1258 1259 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1260 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1261 1262 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1263 enum i915_pipe pipe) 1264 { 1265 struct drm_device *dev = dev_priv->dev; 1266 int reg, i; 1267 u32 val; 1268 int cur_pipe; 1269 1270 /* Primary planes are fixed to pipes on gen4+ */ 1271 if (INTEL_INFO(dev)->gen >= 4) { 1272 reg = DSPCNTR(pipe); 1273 val = I915_READ(reg); 1274 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1275 "plane %c assertion failure, should be disabled but not\n", 1276 plane_name(pipe)); 1277 return; 1278 } 1279 1280 /* Need to check both planes against the pipe */ 1281 for_each_pipe(dev_priv, i) { 1282 reg = DSPCNTR(i); 1283 val = I915_READ(reg); 1284 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1285 DISPPLANE_SEL_PIPE_SHIFT; 1286 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1287 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1288 plane_name(i), pipe_name(pipe)); 1289 } 1290 } 1291 1292 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1293 enum i915_pipe pipe) 1294 { 1295 struct drm_device *dev = dev_priv->dev; 1296 int reg, sprite; 1297 u32 val; 1298 1299 if (INTEL_INFO(dev)->gen >= 9) { 1300 for_each_sprite(pipe, sprite) { 1301 val = I915_READ(PLANE_CTL(pipe, sprite)); 1302 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1303 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1304 sprite, pipe_name(pipe)); 1305 } 1306 } else if (IS_VALLEYVIEW(dev)) { 1307 for_each_sprite(pipe, sprite) { 1308 reg = SPCNTR(pipe, sprite); 1309 val = I915_READ(reg); 1310 I915_STATE_WARN(val & SP_ENABLE, 1311 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1312 sprite_name(pipe, sprite), pipe_name(pipe)); 1313 } 1314 } else if (INTEL_INFO(dev)->gen >= 7) { 1315 reg = SPRCTL(pipe); 1316 val = I915_READ(reg); 1317 I915_STATE_WARN(val & SPRITE_ENABLE, 1318 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1319 plane_name(pipe), pipe_name(pipe)); 1320 } else if (INTEL_INFO(dev)->gen >= 5) { 1321 reg = DVSCNTR(pipe); 1322 val = I915_READ(reg); 1323 I915_STATE_WARN(val & DVS_ENABLE, 1324 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1325 plane_name(pipe), pipe_name(pipe)); 1326 } 1327 } 1328 1329 static void assert_vblank_disabled(struct drm_crtc *crtc) 1330 { 1331 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1332 drm_crtc_vblank_put(crtc); 1333 } 1334 1335 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1336 { 1337 u32 val; 1338 bool enabled; 1339 1340 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1341 1342 val = I915_READ(PCH_DREF_CONTROL); 1343 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1344 DREF_SUPERSPREAD_SOURCE_MASK)); 1345 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1346 } 1347 1348 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1349 enum i915_pipe pipe) 1350 { 1351 int reg; 1352 u32 val; 1353 bool enabled; 1354 1355 reg = PCH_TRANSCONF(pipe); 1356 val = I915_READ(reg); 1357 enabled = !!(val & TRANS_ENABLE); 1358 I915_STATE_WARN(enabled, 1359 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1360 pipe_name(pipe)); 1361 } 1362 1363 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1364 enum i915_pipe pipe, u32 port_sel, u32 val) 1365 { 1366 if ((val & DP_PORT_EN) == 0) 1367 return false; 1368 1369 if (HAS_PCH_CPT(dev_priv->dev)) { 1370 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1371 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1372 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1373 return false; 1374 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1375 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1376 return false; 1377 } else { 1378 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1379 return false; 1380 } 1381 return true; 1382 } 1383 1384 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1385 enum i915_pipe pipe, u32 val) 1386 { 1387 if ((val & SDVO_ENABLE) == 0) 1388 return false; 1389 1390 if (HAS_PCH_CPT(dev_priv->dev)) { 1391 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1392 return false; 1393 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1394 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1395 return false; 1396 } else { 1397 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1398 return false; 1399 } 1400 return true; 1401 } 1402 1403 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1404 enum i915_pipe pipe, u32 val) 1405 { 1406 if ((val & LVDS_PORT_EN) == 0) 1407 return false; 1408 1409 if (HAS_PCH_CPT(dev_priv->dev)) { 1410 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1411 return false; 1412 } else { 1413 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1414 return false; 1415 } 1416 return true; 1417 } 1418 1419 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1420 enum i915_pipe pipe, u32 val) 1421 { 1422 if ((val & ADPA_DAC_ENABLE) == 0) 1423 return false; 1424 if (HAS_PCH_CPT(dev_priv->dev)) { 1425 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1426 return false; 1427 } else { 1428 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1429 return false; 1430 } 1431 return true; 1432 } 1433 1434 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1435 enum i915_pipe pipe, int reg, u32 port_sel) 1436 { 1437 u32 val = I915_READ(reg); 1438 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1439 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1440 reg, pipe_name(pipe)); 1441 1442 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1443 && (val & DP_PIPEB_SELECT), 1444 "IBX PCH dp port still using transcoder B\n"); 1445 } 1446 1447 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1448 enum i915_pipe pipe, int reg) 1449 { 1450 u32 val = I915_READ(reg); 1451 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1452 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1453 reg, pipe_name(pipe)); 1454 1455 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1456 && (val & SDVO_PIPE_B_SELECT), 1457 "IBX PCH hdmi port still using transcoder B\n"); 1458 } 1459 1460 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1461 enum i915_pipe pipe) 1462 { 1463 int reg; 1464 u32 val; 1465 1466 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1467 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1468 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1469 1470 reg = PCH_ADPA; 1471 val = I915_READ(reg); 1472 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1473 "PCH VGA enabled on transcoder %c, should be disabled\n", 1474 pipe_name(pipe)); 1475 1476 reg = PCH_LVDS; 1477 val = I915_READ(reg); 1478 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1479 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1480 pipe_name(pipe)); 1481 1482 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1483 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1484 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1485 } 1486 1487 static void intel_init_dpio(struct drm_device *dev) 1488 { 1489 struct drm_i915_private *dev_priv = dev->dev_private; 1490 1491 if (!IS_VALLEYVIEW(dev)) 1492 return; 1493 1494 /* 1495 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1496 * CHV x1 PHY (DP/HDMI D) 1497 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1498 */ 1499 if (IS_CHERRYVIEW(dev)) { 1500 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1501 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1502 } else { 1503 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1504 } 1505 } 1506 1507 static void vlv_enable_pll(struct intel_crtc *crtc, 1508 const struct intel_crtc_state *pipe_config) 1509 { 1510 struct drm_device *dev = crtc->base.dev; 1511 struct drm_i915_private *dev_priv = dev->dev_private; 1512 int reg = DPLL(crtc->pipe); 1513 u32 dpll = pipe_config->dpll_hw_state.dpll; 1514 1515 assert_pipe_disabled(dev_priv, crtc->pipe); 1516 1517 /* No really, not for ILK+ */ 1518 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1519 1520 /* PLL is protected by panel, make sure we can write it */ 1521 if (IS_MOBILE(dev_priv->dev)) 1522 assert_panel_unlocked(dev_priv, crtc->pipe); 1523 1524 I915_WRITE(reg, dpll); 1525 POSTING_READ(reg); 1526 udelay(150); 1527 1528 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1529 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1530 1531 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1532 POSTING_READ(DPLL_MD(crtc->pipe)); 1533 1534 /* We do this three times for luck */ 1535 I915_WRITE(reg, dpll); 1536 POSTING_READ(reg); 1537 udelay(150); /* wait for warmup */ 1538 I915_WRITE(reg, dpll); 1539 POSTING_READ(reg); 1540 udelay(150); /* wait for warmup */ 1541 I915_WRITE(reg, dpll); 1542 POSTING_READ(reg); 1543 udelay(150); /* wait for warmup */ 1544 } 1545 1546 static void chv_enable_pll(struct intel_crtc *crtc, 1547 const struct intel_crtc_state *pipe_config) 1548 { 1549 struct drm_device *dev = crtc->base.dev; 1550 struct drm_i915_private *dev_priv = dev->dev_private; 1551 int pipe = crtc->pipe; 1552 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1553 u32 tmp; 1554 1555 assert_pipe_disabled(dev_priv, crtc->pipe); 1556 1557 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1558 1559 mutex_lock(&dev_priv->dpio_lock); 1560 1561 /* Enable back the 10bit clock to display controller */ 1562 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1563 tmp |= DPIO_DCLKP_EN; 1564 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1565 1566 /* 1567 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1568 */ 1569 udelay(1); 1570 1571 /* Enable PLL */ 1572 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1573 1574 /* Check PLL is locked */ 1575 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1576 DRM_ERROR("PLL %d failed to lock\n", pipe); 1577 1578 /* not sure when this should be written */ 1579 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1580 POSTING_READ(DPLL_MD(pipe)); 1581 1582 mutex_unlock(&dev_priv->dpio_lock); 1583 } 1584 1585 static int intel_num_dvo_pipes(struct drm_device *dev) 1586 { 1587 struct intel_crtc *crtc; 1588 int count = 0; 1589 1590 for_each_intel_crtc(dev, crtc) 1591 count += crtc->active && 1592 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1593 1594 return count; 1595 } 1596 1597 static void i9xx_enable_pll(struct intel_crtc *crtc) 1598 { 1599 struct drm_device *dev = crtc->base.dev; 1600 struct drm_i915_private *dev_priv = dev->dev_private; 1601 int reg = DPLL(crtc->pipe); 1602 u32 dpll = crtc->config->dpll_hw_state.dpll; 1603 1604 assert_pipe_disabled(dev_priv, crtc->pipe); 1605 1606 /* No really, not for ILK+ */ 1607 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1608 1609 /* PLL is protected by panel, make sure we can write it */ 1610 if (IS_MOBILE(dev) && !IS_I830(dev)) 1611 assert_panel_unlocked(dev_priv, crtc->pipe); 1612 1613 /* Enable DVO 2x clock on both PLLs if necessary */ 1614 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1615 /* 1616 * It appears to be important that we don't enable this 1617 * for the current pipe before otherwise configuring the 1618 * PLL. No idea how this should be handled if multiple 1619 * DVO outputs are enabled simultaneosly. 1620 */ 1621 dpll |= DPLL_DVO_2X_MODE; 1622 I915_WRITE(DPLL(!crtc->pipe), 1623 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1624 } 1625 1626 /* Wait for the clocks to stabilize. */ 1627 POSTING_READ(reg); 1628 udelay(150); 1629 1630 if (INTEL_INFO(dev)->gen >= 4) { 1631 I915_WRITE(DPLL_MD(crtc->pipe), 1632 crtc->config->dpll_hw_state.dpll_md); 1633 } else { 1634 /* The pixel multiplier can only be updated once the 1635 * DPLL is enabled and the clocks are stable. 1636 * 1637 * So write it again. 1638 */ 1639 I915_WRITE(reg, dpll); 1640 } 1641 1642 /* We do this three times for luck */ 1643 I915_WRITE(reg, dpll); 1644 POSTING_READ(reg); 1645 udelay(150); /* wait for warmup */ 1646 I915_WRITE(reg, dpll); 1647 POSTING_READ(reg); 1648 udelay(150); /* wait for warmup */ 1649 I915_WRITE(reg, dpll); 1650 POSTING_READ(reg); 1651 udelay(150); /* wait for warmup */ 1652 } 1653 1654 /** 1655 * i9xx_disable_pll - disable a PLL 1656 * @dev_priv: i915 private structure 1657 * @pipe: pipe PLL to disable 1658 * 1659 * Disable the PLL for @pipe, making sure the pipe is off first. 1660 * 1661 * Note! This is for pre-ILK only. 1662 */ 1663 static void i9xx_disable_pll(struct intel_crtc *crtc) 1664 { 1665 struct drm_device *dev = crtc->base.dev; 1666 struct drm_i915_private *dev_priv = dev->dev_private; 1667 enum i915_pipe pipe = crtc->pipe; 1668 1669 /* Disable DVO 2x clock on both PLLs if necessary */ 1670 if (IS_I830(dev) && 1671 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1672 intel_num_dvo_pipes(dev) == 1) { 1673 I915_WRITE(DPLL(PIPE_B), 1674 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1675 I915_WRITE(DPLL(PIPE_A), 1676 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1677 } 1678 1679 /* Don't disable pipe or pipe PLLs if needed */ 1680 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1681 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1682 return; 1683 1684 /* Make sure the pipe isn't still relying on us */ 1685 assert_pipe_disabled(dev_priv, pipe); 1686 1687 I915_WRITE(DPLL(pipe), 0); 1688 POSTING_READ(DPLL(pipe)); 1689 } 1690 1691 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1692 { 1693 u32 val = 0; 1694 1695 /* Make sure the pipe isn't still relying on us */ 1696 assert_pipe_disabled(dev_priv, pipe); 1697 1698 /* 1699 * Leave integrated clock source and reference clock enabled for pipe B. 1700 * The latter is needed for VGA hotplug / manual detection. 1701 */ 1702 if (pipe == PIPE_B) 1703 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1704 I915_WRITE(DPLL(pipe), val); 1705 POSTING_READ(DPLL(pipe)); 1706 1707 } 1708 1709 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1710 { 1711 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1712 u32 val; 1713 1714 /* Make sure the pipe isn't still relying on us */ 1715 assert_pipe_disabled(dev_priv, pipe); 1716 1717 /* Set PLL en = 0 */ 1718 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; 1719 if (pipe != PIPE_A) 1720 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1721 I915_WRITE(DPLL(pipe), val); 1722 POSTING_READ(DPLL(pipe)); 1723 1724 mutex_lock(&dev_priv->dpio_lock); 1725 1726 /* Disable 10bit clock to display controller */ 1727 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1728 val &= ~DPIO_DCLKP_EN; 1729 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1730 1731 /* disable left/right clock distribution */ 1732 if (pipe != PIPE_B) { 1733 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1734 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1735 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1736 } else { 1737 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1738 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1739 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1740 } 1741 1742 mutex_unlock(&dev_priv->dpio_lock); 1743 } 1744 1745 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1746 struct intel_digital_port *dport) 1747 { 1748 u32 port_mask; 1749 int dpll_reg; 1750 1751 switch (dport->port) { 1752 case PORT_B: 1753 port_mask = DPLL_PORTB_READY_MASK; 1754 dpll_reg = DPLL(0); 1755 break; 1756 case PORT_C: 1757 port_mask = DPLL_PORTC_READY_MASK; 1758 dpll_reg = DPLL(0); 1759 break; 1760 case PORT_D: 1761 port_mask = DPLL_PORTD_READY_MASK; 1762 dpll_reg = DPIO_PHY_STATUS; 1763 break; 1764 default: 1765 BUG(); 1766 } 1767 1768 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000)) 1769 WARN(1, "timed out waiting for port %c ready: 0x%08x\n", 1770 port_name(dport->port), I915_READ(dpll_reg)); 1771 } 1772 1773 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1774 { 1775 struct drm_device *dev = crtc->base.dev; 1776 struct drm_i915_private *dev_priv = dev->dev_private; 1777 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1778 1779 if (WARN_ON(pll == NULL)) 1780 return; 1781 1782 WARN_ON(!pll->config.crtc_mask); 1783 if (pll->active == 0) { 1784 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1785 WARN_ON(pll->on); 1786 assert_shared_dpll_disabled(dev_priv, pll); 1787 1788 pll->mode_set(dev_priv, pll); 1789 } 1790 } 1791 1792 /** 1793 * intel_enable_shared_dpll - enable PCH PLL 1794 * @dev_priv: i915 private structure 1795 * @pipe: pipe PLL to enable 1796 * 1797 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1798 * drives the transcoder clock. 1799 */ 1800 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1801 { 1802 struct drm_device *dev = crtc->base.dev; 1803 struct drm_i915_private *dev_priv = dev->dev_private; 1804 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1805 1806 if (WARN_ON(pll == NULL)) 1807 return; 1808 1809 if (WARN_ON(pll->config.crtc_mask == 0)) 1810 return; 1811 1812 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1813 pll->name, pll->active, pll->on, 1814 crtc->base.base.id); 1815 1816 if (pll->active++) { 1817 WARN_ON(!pll->on); 1818 assert_shared_dpll_enabled(dev_priv, pll); 1819 return; 1820 } 1821 WARN_ON(pll->on); 1822 1823 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1824 1825 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1826 pll->enable(dev_priv, pll); 1827 pll->on = true; 1828 } 1829 1830 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1831 { 1832 struct drm_device *dev = crtc->base.dev; 1833 struct drm_i915_private *dev_priv = dev->dev_private; 1834 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1835 1836 /* PCH only available on ILK+ */ 1837 BUG_ON(INTEL_INFO(dev)->gen < 5); 1838 if (WARN_ON(pll == NULL)) 1839 return; 1840 1841 if (WARN_ON(pll->config.crtc_mask == 0)) 1842 return; 1843 1844 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1845 pll->name, pll->active, pll->on, 1846 crtc->base.base.id); 1847 1848 if (WARN_ON(pll->active == 0)) { 1849 assert_shared_dpll_disabled(dev_priv, pll); 1850 return; 1851 } 1852 1853 assert_shared_dpll_enabled(dev_priv, pll); 1854 WARN_ON(!pll->on); 1855 if (--pll->active) 1856 return; 1857 1858 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1859 pll->disable(dev_priv, pll); 1860 pll->on = false; 1861 1862 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1863 } 1864 1865 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1866 enum i915_pipe pipe) 1867 { 1868 struct drm_device *dev = dev_priv->dev; 1869 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1871 uint32_t reg, val, pipeconf_val; 1872 1873 /* PCH only available on ILK+ */ 1874 BUG_ON(!HAS_PCH_SPLIT(dev)); 1875 1876 /* Make sure PCH DPLL is enabled */ 1877 assert_shared_dpll_enabled(dev_priv, 1878 intel_crtc_to_shared_dpll(intel_crtc)); 1879 1880 /* FDI must be feeding us bits for PCH ports */ 1881 assert_fdi_tx_enabled(dev_priv, pipe); 1882 assert_fdi_rx_enabled(dev_priv, pipe); 1883 1884 if (HAS_PCH_CPT(dev)) { 1885 /* Workaround: Set the timing override bit before enabling the 1886 * pch transcoder. */ 1887 reg = TRANS_CHICKEN2(pipe); 1888 val = I915_READ(reg); 1889 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1890 I915_WRITE(reg, val); 1891 } 1892 1893 reg = PCH_TRANSCONF(pipe); 1894 val = I915_READ(reg); 1895 pipeconf_val = I915_READ(PIPECONF(pipe)); 1896 1897 if (HAS_PCH_IBX(dev_priv->dev)) { 1898 /* 1899 * make the BPC in transcoder be consistent with 1900 * that in pipeconf reg. 1901 */ 1902 val &= ~PIPECONF_BPC_MASK; 1903 val |= pipeconf_val & PIPECONF_BPC_MASK; 1904 } 1905 1906 val &= ~TRANS_INTERLACE_MASK; 1907 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1908 if (HAS_PCH_IBX(dev_priv->dev) && 1909 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1910 val |= TRANS_LEGACY_INTERLACED_ILK; 1911 else 1912 val |= TRANS_INTERLACED; 1913 else 1914 val |= TRANS_PROGRESSIVE; 1915 1916 I915_WRITE(reg, val | TRANS_ENABLE); 1917 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1918 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1919 } 1920 1921 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1922 enum transcoder cpu_transcoder) 1923 { 1924 u32 val, pipeconf_val; 1925 1926 /* PCH only available on ILK+ */ 1927 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 1928 1929 /* FDI must be feeding us bits for PCH ports */ 1930 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1931 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1932 1933 /* Workaround: set timing override bit. */ 1934 val = I915_READ(_TRANSA_CHICKEN2); 1935 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1936 I915_WRITE(_TRANSA_CHICKEN2, val); 1937 1938 val = TRANS_ENABLE; 1939 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1940 1941 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1942 PIPECONF_INTERLACED_ILK) 1943 val |= TRANS_INTERLACED; 1944 else 1945 val |= TRANS_PROGRESSIVE; 1946 1947 I915_WRITE(LPT_TRANSCONF, val); 1948 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 1949 DRM_ERROR("Failed to enable PCH transcoder\n"); 1950 } 1951 1952 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1953 enum i915_pipe pipe) 1954 { 1955 struct drm_device *dev = dev_priv->dev; 1956 uint32_t reg, val; 1957 1958 /* FDI relies on the transcoder */ 1959 assert_fdi_tx_disabled(dev_priv, pipe); 1960 assert_fdi_rx_disabled(dev_priv, pipe); 1961 1962 /* Ports must be off as well */ 1963 assert_pch_ports_disabled(dev_priv, pipe); 1964 1965 reg = PCH_TRANSCONF(pipe); 1966 val = I915_READ(reg); 1967 val &= ~TRANS_ENABLE; 1968 I915_WRITE(reg, val); 1969 /* wait for PCH transcoder off, transcoder state */ 1970 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1971 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1972 1973 if (!HAS_PCH_IBX(dev)) { 1974 /* Workaround: Clear the timing override chicken bit again. */ 1975 reg = TRANS_CHICKEN2(pipe); 1976 val = I915_READ(reg); 1977 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1978 I915_WRITE(reg, val); 1979 } 1980 } 1981 1982 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1983 { 1984 u32 val; 1985 1986 val = I915_READ(LPT_TRANSCONF); 1987 val &= ~TRANS_ENABLE; 1988 I915_WRITE(LPT_TRANSCONF, val); 1989 /* wait for PCH transcoder off, transcoder state */ 1990 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 1991 DRM_ERROR("Failed to disable PCH transcoder\n"); 1992 1993 /* Workaround: clear timing override bit. */ 1994 val = I915_READ(_TRANSA_CHICKEN2); 1995 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1996 I915_WRITE(_TRANSA_CHICKEN2, val); 1997 } 1998 1999 /** 2000 * intel_enable_pipe - enable a pipe, asserting requirements 2001 * @crtc: crtc responsible for the pipe 2002 * 2003 * Enable @crtc's pipe, making sure that various hardware specific requirements 2004 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2005 */ 2006 static void intel_enable_pipe(struct intel_crtc *crtc) 2007 { 2008 struct drm_device *dev = crtc->base.dev; 2009 struct drm_i915_private *dev_priv = dev->dev_private; 2010 enum i915_pipe pipe = crtc->pipe; 2011 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2012 pipe); 2013 enum i915_pipe pch_transcoder; 2014 int reg; 2015 u32 val; 2016 2017 assert_planes_disabled(dev_priv, pipe); 2018 assert_cursor_disabled(dev_priv, pipe); 2019 assert_sprites_disabled(dev_priv, pipe); 2020 2021 if (HAS_PCH_LPT(dev_priv->dev)) 2022 pch_transcoder = TRANSCODER_A; 2023 else 2024 pch_transcoder = pipe; 2025 2026 /* 2027 * A pipe without a PLL won't actually be able to drive bits from 2028 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2029 * need the check. 2030 */ 2031 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2032 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2033 assert_dsi_pll_enabled(dev_priv); 2034 else 2035 assert_pll_enabled(dev_priv, pipe); 2036 else { 2037 if (crtc->config->has_pch_encoder) { 2038 /* if driving the PCH, we need FDI enabled */ 2039 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2040 assert_fdi_tx_pll_enabled(dev_priv, 2041 (enum i915_pipe) cpu_transcoder); 2042 } 2043 /* FIXME: assert CPU port conditions for SNB+ */ 2044 } 2045 2046 reg = PIPECONF(cpu_transcoder); 2047 val = I915_READ(reg); 2048 if (val & PIPECONF_ENABLE) { 2049 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2050 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2051 return; 2052 } 2053 2054 I915_WRITE(reg, val | PIPECONF_ENABLE); 2055 POSTING_READ(reg); 2056 } 2057 2058 /** 2059 * intel_disable_pipe - disable a pipe, asserting requirements 2060 * @crtc: crtc whose pipes is to be disabled 2061 * 2062 * Disable the pipe of @crtc, making sure that various hardware 2063 * specific requirements are met, if applicable, e.g. plane 2064 * disabled, panel fitter off, etc. 2065 * 2066 * Will wait until the pipe has shut down before returning. 2067 */ 2068 static void intel_disable_pipe(struct intel_crtc *crtc) 2069 { 2070 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2071 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2072 enum i915_pipe pipe = crtc->pipe; 2073 int reg; 2074 u32 val; 2075 2076 /* 2077 * Make sure planes won't keep trying to pump pixels to us, 2078 * or we might hang the display. 2079 */ 2080 assert_planes_disabled(dev_priv, pipe); 2081 assert_cursor_disabled(dev_priv, pipe); 2082 assert_sprites_disabled(dev_priv, pipe); 2083 2084 reg = PIPECONF(cpu_transcoder); 2085 val = I915_READ(reg); 2086 if ((val & PIPECONF_ENABLE) == 0) 2087 return; 2088 2089 /* 2090 * Double wide has implications for planes 2091 * so best keep it disabled when not needed. 2092 */ 2093 if (crtc->config->double_wide) 2094 val &= ~PIPECONF_DOUBLE_WIDE; 2095 2096 /* Don't disable pipe or pipe PLLs if needed */ 2097 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2098 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2099 val &= ~PIPECONF_ENABLE; 2100 2101 I915_WRITE(reg, val); 2102 if ((val & PIPECONF_ENABLE) == 0) 2103 intel_wait_for_pipe_off(crtc); 2104 } 2105 2106 /* 2107 * Plane regs are double buffered, going from enabled->disabled needs a 2108 * trigger in order to latch. The display address reg provides this. 2109 */ 2110 void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 2111 enum plane plane) 2112 { 2113 struct drm_device *dev = dev_priv->dev; 2114 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 2115 2116 I915_WRITE(reg, I915_READ(reg)); 2117 POSTING_READ(reg); 2118 } 2119 2120 /** 2121 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2122 * @plane: plane to be enabled 2123 * @crtc: crtc for the plane 2124 * 2125 * Enable @plane on @crtc, making sure that the pipe is running first. 2126 */ 2127 static void intel_enable_primary_hw_plane(struct drm_plane *plane, 2128 struct drm_crtc *crtc) 2129 { 2130 struct drm_device *dev = plane->dev; 2131 struct drm_i915_private *dev_priv = dev->dev_private; 2132 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2133 2134 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2135 assert_pipe_enabled(dev_priv, intel_crtc->pipe); 2136 2137 if (intel_crtc->primary_enabled) 2138 return; 2139 2140 intel_crtc->primary_enabled = true; 2141 2142 dev_priv->display.update_primary_plane(crtc, plane->fb, 2143 crtc->x, crtc->y); 2144 2145 /* 2146 * BDW signals flip done immediately if the plane 2147 * is disabled, even if the plane enable is already 2148 * armed to occur at the next vblank :( 2149 */ 2150 if (IS_BROADWELL(dev)) 2151 intel_wait_for_vblank(dev, intel_crtc->pipe); 2152 } 2153 2154 /** 2155 * intel_disable_primary_hw_plane - disable the primary hardware plane 2156 * @plane: plane to be disabled 2157 * @crtc: crtc for the plane 2158 * 2159 * Disable @plane on @crtc, making sure that the pipe is running first. 2160 */ 2161 static void intel_disable_primary_hw_plane(struct drm_plane *plane, 2162 struct drm_crtc *crtc) 2163 { 2164 struct drm_device *dev = plane->dev; 2165 struct drm_i915_private *dev_priv = dev->dev_private; 2166 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2167 2168 if (WARN_ON(!intel_crtc->active)) 2169 return; 2170 2171 if (!intel_crtc->primary_enabled) 2172 return; 2173 2174 intel_crtc->primary_enabled = false; 2175 2176 dev_priv->display.update_primary_plane(crtc, plane->fb, 2177 crtc->x, crtc->y); 2178 } 2179 2180 static bool need_vtd_wa(struct drm_device *dev) 2181 { 2182 #ifdef CONFIG_INTEL_IOMMU 2183 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2184 return true; 2185 #endif 2186 return false; 2187 } 2188 2189 int 2190 intel_fb_align_height(struct drm_device *dev, int height, unsigned int tiling) 2191 { 2192 int tile_height; 2193 2194 tile_height = tiling ? (IS_GEN2(dev) ? 16 : 8) : 1; 2195 return ALIGN(height, tile_height); 2196 } 2197 2198 int 2199 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2200 struct drm_framebuffer *fb, 2201 struct intel_engine_cs *pipelined) 2202 { 2203 struct drm_device *dev = fb->dev; 2204 struct drm_i915_private *dev_priv = dev->dev_private; 2205 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2206 u32 alignment; 2207 int ret; 2208 2209 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2210 2211 switch (obj->tiling_mode) { 2212 case I915_TILING_NONE: 2213 if (INTEL_INFO(dev)->gen >= 9) 2214 alignment = 256 * 1024; 2215 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2216 alignment = 128 * 1024; 2217 else if (INTEL_INFO(dev)->gen >= 4) 2218 alignment = 4 * 1024; 2219 else 2220 alignment = 64 * 1024; 2221 break; 2222 case I915_TILING_X: 2223 if (INTEL_INFO(dev)->gen >= 9) 2224 alignment = 256 * 1024; 2225 else { 2226 /* pin() will align the object as required by fence */ 2227 alignment = 0; 2228 } 2229 break; 2230 case I915_TILING_Y: 2231 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2232 return -EINVAL; 2233 default: 2234 BUG(); 2235 } 2236 2237 /* Note that the w/a also requires 64 PTE of padding following the 2238 * bo. We currently fill all unused PTE with the shadow page and so 2239 * we should always have valid PTE following the scanout preventing 2240 * the VT-d warning. 2241 */ 2242 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2243 alignment = 256 * 1024; 2244 2245 /* 2246 * Global gtt pte registers are special registers which actually forward 2247 * writes to a chunk of system memory. Which means that there is no risk 2248 * that the register values disappear as soon as we call 2249 * intel_runtime_pm_put(), so it is correct to wrap only the 2250 * pin/unpin/fence and not more. 2251 */ 2252 intel_runtime_pm_get(dev_priv); 2253 2254 dev_priv->mm.interruptible = false; 2255 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); 2256 if (ret) 2257 goto err_interruptible; 2258 2259 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2260 * fence, whereas 965+ only requires a fence if using 2261 * framebuffer compression. For simplicity, we always install 2262 * a fence as the cost is not that onerous. 2263 */ 2264 ret = i915_gem_object_get_fence(obj); 2265 if (ret) 2266 goto err_unpin; 2267 2268 i915_gem_object_pin_fence(obj); 2269 2270 dev_priv->mm.interruptible = true; 2271 intel_runtime_pm_put(dev_priv); 2272 return 0; 2273 2274 err_unpin: 2275 i915_gem_object_unpin_from_display_plane(obj); 2276 err_interruptible: 2277 dev_priv->mm.interruptible = true; 2278 intel_runtime_pm_put(dev_priv); 2279 return ret; 2280 } 2281 2282 void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) 2283 { 2284 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2285 2286 i915_gem_object_unpin_fence(obj); 2287 i915_gem_object_unpin_from_display_plane(obj); 2288 } 2289 2290 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2291 * is assumed to be a power-of-two. */ 2292 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2293 unsigned int tiling_mode, 2294 unsigned int cpp, 2295 unsigned int pitch) 2296 { 2297 if (tiling_mode != I915_TILING_NONE) { 2298 unsigned int tile_rows, tiles; 2299 2300 tile_rows = *y / 8; 2301 *y %= 8; 2302 2303 tiles = *x / (512/cpp); 2304 *x %= 512/cpp; 2305 2306 return tile_rows * pitch * 8 + tiles * 4096; 2307 } else { 2308 unsigned int offset; 2309 2310 offset = *y * pitch + *x * cpp; 2311 *y = 0; 2312 *x = (offset & 4095) / cpp; 2313 return offset & -4096; 2314 } 2315 } 2316 2317 static int i9xx_format_to_fourcc(int format) 2318 { 2319 switch (format) { 2320 case DISPPLANE_8BPP: 2321 return DRM_FORMAT_C8; 2322 case DISPPLANE_BGRX555: 2323 return DRM_FORMAT_XRGB1555; 2324 case DISPPLANE_BGRX565: 2325 return DRM_FORMAT_RGB565; 2326 default: 2327 case DISPPLANE_BGRX888: 2328 return DRM_FORMAT_XRGB8888; 2329 case DISPPLANE_RGBX888: 2330 return DRM_FORMAT_XBGR8888; 2331 case DISPPLANE_BGRX101010: 2332 return DRM_FORMAT_XRGB2101010; 2333 case DISPPLANE_RGBX101010: 2334 return DRM_FORMAT_XBGR2101010; 2335 } 2336 } 2337 2338 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2339 { 2340 switch (format) { 2341 case PLANE_CTL_FORMAT_RGB_565: 2342 return DRM_FORMAT_RGB565; 2343 default: 2344 case PLANE_CTL_FORMAT_XRGB_8888: 2345 if (rgb_order) { 2346 if (alpha) 2347 return DRM_FORMAT_ABGR8888; 2348 else 2349 return DRM_FORMAT_XBGR8888; 2350 } else { 2351 if (alpha) 2352 return DRM_FORMAT_ARGB8888; 2353 else 2354 return DRM_FORMAT_XRGB8888; 2355 } 2356 case PLANE_CTL_FORMAT_XRGB_2101010: 2357 if (rgb_order) 2358 return DRM_FORMAT_XBGR2101010; 2359 else 2360 return DRM_FORMAT_XRGB2101010; 2361 } 2362 } 2363 2364 static bool 2365 intel_alloc_plane_obj(struct intel_crtc *crtc, 2366 struct intel_initial_plane_config *plane_config) 2367 { 2368 struct drm_device *dev = crtc->base.dev; 2369 struct drm_i915_gem_object *obj = NULL; 2370 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2371 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2372 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2373 PAGE_SIZE); 2374 2375 size_aligned -= base_aligned; 2376 2377 if (plane_config->size == 0) 2378 return false; 2379 2380 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2381 base_aligned, 2382 base_aligned, 2383 size_aligned); 2384 if (!obj) 2385 return false; 2386 2387 obj->tiling_mode = plane_config->tiling; 2388 if (obj->tiling_mode == I915_TILING_X) 2389 obj->stride = crtc->base.primary->fb->pitches[0]; 2390 2391 mode_cmd.pixel_format = crtc->base.primary->fb->pixel_format; 2392 mode_cmd.width = crtc->base.primary->fb->width; 2393 mode_cmd.height = crtc->base.primary->fb->height; 2394 mode_cmd.pitches[0] = crtc->base.primary->fb->pitches[0]; 2395 2396 mutex_lock(&dev->struct_mutex); 2397 2398 if (intel_framebuffer_init(dev, to_intel_framebuffer(crtc->base.primary->fb), 2399 &mode_cmd, obj)) { 2400 DRM_DEBUG_KMS("intel fb init failed\n"); 2401 goto out_unref_obj; 2402 } 2403 2404 obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); 2405 mutex_unlock(&dev->struct_mutex); 2406 2407 DRM_DEBUG_KMS("plane fb obj %p\n", obj); 2408 return true; 2409 2410 out_unref_obj: 2411 drm_gem_object_unreference(&obj->base); 2412 mutex_unlock(&dev->struct_mutex); 2413 return false; 2414 } 2415 2416 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2417 static void 2418 update_state_fb(struct drm_plane *plane) 2419 { 2420 if (plane->fb != plane->state->fb) 2421 drm_atomic_set_fb_for_plane(plane->state, plane->fb); 2422 } 2423 2424 static void 2425 intel_find_plane_obj(struct intel_crtc *intel_crtc, 2426 struct intel_initial_plane_config *plane_config) 2427 { 2428 struct drm_device *dev = intel_crtc->base.dev; 2429 struct drm_i915_private *dev_priv = dev->dev_private; 2430 struct drm_crtc *c; 2431 struct intel_crtc *i; 2432 struct drm_i915_gem_object *obj; 2433 2434 if (!intel_crtc->base.primary->fb) 2435 return; 2436 2437 if (intel_alloc_plane_obj(intel_crtc, plane_config)) { 2438 struct drm_plane *primary = intel_crtc->base.primary; 2439 2440 primary->state->crtc = &intel_crtc->base; 2441 primary->crtc = &intel_crtc->base; 2442 update_state_fb(primary); 2443 2444 return; 2445 } 2446 2447 kfree(intel_crtc->base.primary->fb); 2448 intel_crtc->base.primary->fb = NULL; 2449 2450 /* 2451 * Failed to alloc the obj, check to see if we should share 2452 * an fb with another CRTC instead 2453 */ 2454 for_each_crtc(dev, c) { 2455 i = to_intel_crtc(c); 2456 2457 if (c == &intel_crtc->base) 2458 continue; 2459 2460 if (!i->active) 2461 continue; 2462 2463 obj = intel_fb_obj(c->primary->fb); 2464 if (obj == NULL) 2465 continue; 2466 2467 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2468 struct drm_plane *primary = intel_crtc->base.primary; 2469 2470 if (obj->tiling_mode != I915_TILING_NONE) 2471 dev_priv->preserve_bios_swizzle = true; 2472 2473 drm_framebuffer_reference(c->primary->fb); 2474 primary->fb = c->primary->fb; 2475 primary->state->crtc = &intel_crtc->base; 2476 primary->crtc = &intel_crtc->base; 2477 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2478 break; 2479 } 2480 } 2481 2482 update_state_fb(intel_crtc->base.primary); 2483 } 2484 2485 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2486 struct drm_framebuffer *fb, 2487 int x, int y) 2488 { 2489 struct drm_device *dev = crtc->dev; 2490 struct drm_i915_private *dev_priv = dev->dev_private; 2491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2492 struct drm_i915_gem_object *obj; 2493 int plane = intel_crtc->plane; 2494 unsigned long linear_offset; 2495 u32 dspcntr; 2496 u32 reg = DSPCNTR(plane); 2497 int pixel_size; 2498 2499 if (!intel_crtc->primary_enabled) { 2500 I915_WRITE(reg, 0); 2501 if (INTEL_INFO(dev)->gen >= 4) 2502 I915_WRITE(DSPSURF(plane), 0); 2503 else 2504 I915_WRITE(DSPADDR(plane), 0); 2505 POSTING_READ(reg); 2506 return; 2507 } 2508 2509 obj = intel_fb_obj(fb); 2510 if (WARN_ON(obj == NULL)) 2511 return; 2512 2513 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2514 2515 dspcntr = DISPPLANE_GAMMA_ENABLE; 2516 2517 dspcntr |= DISPLAY_PLANE_ENABLE; 2518 2519 if (INTEL_INFO(dev)->gen < 4) { 2520 if (intel_crtc->pipe == PIPE_B) 2521 dspcntr |= DISPPLANE_SEL_PIPE_B; 2522 2523 /* pipesrc and dspsize control the size that is scaled from, 2524 * which should always be the user's requested size. 2525 */ 2526 I915_WRITE(DSPSIZE(plane), 2527 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2528 (intel_crtc->config->pipe_src_w - 1)); 2529 I915_WRITE(DSPPOS(plane), 0); 2530 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2531 I915_WRITE(PRIMSIZE(plane), 2532 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2533 (intel_crtc->config->pipe_src_w - 1)); 2534 I915_WRITE(PRIMPOS(plane), 0); 2535 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2536 } 2537 2538 switch (fb->pixel_format) { 2539 case DRM_FORMAT_C8: 2540 dspcntr |= DISPPLANE_8BPP; 2541 break; 2542 case DRM_FORMAT_XRGB1555: 2543 case DRM_FORMAT_ARGB1555: 2544 dspcntr |= DISPPLANE_BGRX555; 2545 break; 2546 case DRM_FORMAT_RGB565: 2547 dspcntr |= DISPPLANE_BGRX565; 2548 break; 2549 case DRM_FORMAT_XRGB8888: 2550 case DRM_FORMAT_ARGB8888: 2551 dspcntr |= DISPPLANE_BGRX888; 2552 break; 2553 case DRM_FORMAT_XBGR8888: 2554 case DRM_FORMAT_ABGR8888: 2555 dspcntr |= DISPPLANE_RGBX888; 2556 break; 2557 case DRM_FORMAT_XRGB2101010: 2558 case DRM_FORMAT_ARGB2101010: 2559 dspcntr |= DISPPLANE_BGRX101010; 2560 break; 2561 case DRM_FORMAT_XBGR2101010: 2562 case DRM_FORMAT_ABGR2101010: 2563 dspcntr |= DISPPLANE_RGBX101010; 2564 break; 2565 default: 2566 BUG(); 2567 } 2568 2569 if (INTEL_INFO(dev)->gen >= 4 && 2570 obj->tiling_mode != I915_TILING_NONE) 2571 dspcntr |= DISPPLANE_TILED; 2572 2573 if (IS_G4X(dev)) 2574 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2575 2576 linear_offset = y * fb->pitches[0] + x * pixel_size; 2577 2578 if (INTEL_INFO(dev)->gen >= 4) { 2579 intel_crtc->dspaddr_offset = 2580 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2581 pixel_size, 2582 fb->pitches[0]); 2583 linear_offset -= intel_crtc->dspaddr_offset; 2584 } else { 2585 intel_crtc->dspaddr_offset = linear_offset; 2586 } 2587 2588 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2589 dspcntr |= DISPPLANE_ROTATE_180; 2590 2591 x += (intel_crtc->config->pipe_src_w - 1); 2592 y += (intel_crtc->config->pipe_src_h - 1); 2593 2594 /* Finding the last pixel of the last line of the display 2595 data and adding to linear_offset*/ 2596 linear_offset += 2597 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2598 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2599 } 2600 2601 I915_WRITE(reg, dspcntr); 2602 2603 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2604 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2605 fb->pitches[0]); 2606 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2607 if (INTEL_INFO(dev)->gen >= 4) { 2608 I915_WRITE(DSPSURF(plane), 2609 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2610 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2611 I915_WRITE(DSPLINOFF(plane), linear_offset); 2612 } else 2613 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2614 POSTING_READ(reg); 2615 } 2616 2617 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2618 struct drm_framebuffer *fb, 2619 int x, int y) 2620 { 2621 struct drm_device *dev = crtc->dev; 2622 struct drm_i915_private *dev_priv = dev->dev_private; 2623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2624 struct drm_i915_gem_object *obj; 2625 int plane = intel_crtc->plane; 2626 unsigned long linear_offset; 2627 u32 dspcntr; 2628 u32 reg = DSPCNTR(plane); 2629 int pixel_size; 2630 2631 if (!intel_crtc->primary_enabled) { 2632 I915_WRITE(reg, 0); 2633 I915_WRITE(DSPSURF(plane), 0); 2634 POSTING_READ(reg); 2635 return; 2636 } 2637 2638 obj = intel_fb_obj(fb); 2639 if (WARN_ON(obj == NULL)) 2640 return; 2641 2642 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2643 2644 dspcntr = DISPPLANE_GAMMA_ENABLE; 2645 2646 dspcntr |= DISPLAY_PLANE_ENABLE; 2647 2648 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2649 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2650 2651 switch (fb->pixel_format) { 2652 case DRM_FORMAT_C8: 2653 dspcntr |= DISPPLANE_8BPP; 2654 break; 2655 case DRM_FORMAT_RGB565: 2656 dspcntr |= DISPPLANE_BGRX565; 2657 break; 2658 case DRM_FORMAT_XRGB8888: 2659 case DRM_FORMAT_ARGB8888: 2660 dspcntr |= DISPPLANE_BGRX888; 2661 break; 2662 case DRM_FORMAT_XBGR8888: 2663 case DRM_FORMAT_ABGR8888: 2664 dspcntr |= DISPPLANE_RGBX888; 2665 break; 2666 case DRM_FORMAT_XRGB2101010: 2667 case DRM_FORMAT_ARGB2101010: 2668 dspcntr |= DISPPLANE_BGRX101010; 2669 break; 2670 case DRM_FORMAT_XBGR2101010: 2671 case DRM_FORMAT_ABGR2101010: 2672 dspcntr |= DISPPLANE_RGBX101010; 2673 break; 2674 default: 2675 BUG(); 2676 } 2677 2678 if (obj->tiling_mode != I915_TILING_NONE) 2679 dspcntr |= DISPPLANE_TILED; 2680 2681 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2682 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2683 2684 linear_offset = y * fb->pitches[0] + x * pixel_size; 2685 intel_crtc->dspaddr_offset = 2686 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2687 pixel_size, 2688 fb->pitches[0]); 2689 linear_offset -= intel_crtc->dspaddr_offset; 2690 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2691 dspcntr |= DISPPLANE_ROTATE_180; 2692 2693 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2694 x += (intel_crtc->config->pipe_src_w - 1); 2695 y += (intel_crtc->config->pipe_src_h - 1); 2696 2697 /* Finding the last pixel of the last line of the display 2698 data and adding to linear_offset*/ 2699 linear_offset += 2700 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2701 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2702 } 2703 } 2704 2705 I915_WRITE(reg, dspcntr); 2706 2707 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 2708 i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, 2709 fb->pitches[0]); 2710 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2711 I915_WRITE(DSPSURF(plane), 2712 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2713 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2714 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2715 } else { 2716 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2717 I915_WRITE(DSPLINOFF(plane), linear_offset); 2718 } 2719 POSTING_READ(reg); 2720 } 2721 2722 static void skylake_update_primary_plane(struct drm_crtc *crtc, 2723 struct drm_framebuffer *fb, 2724 int x, int y) 2725 { 2726 struct drm_device *dev = crtc->dev; 2727 struct drm_i915_private *dev_priv = dev->dev_private; 2728 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2729 struct intel_framebuffer *intel_fb; 2730 struct drm_i915_gem_object *obj; 2731 int pipe = intel_crtc->pipe; 2732 u32 plane_ctl, stride; 2733 2734 if (!intel_crtc->primary_enabled) { 2735 I915_WRITE(PLANE_CTL(pipe, 0), 0); 2736 I915_WRITE(PLANE_SURF(pipe, 0), 0); 2737 POSTING_READ(PLANE_CTL(pipe, 0)); 2738 return; 2739 } 2740 2741 plane_ctl = PLANE_CTL_ENABLE | 2742 PLANE_CTL_PIPE_GAMMA_ENABLE | 2743 PLANE_CTL_PIPE_CSC_ENABLE; 2744 2745 switch (fb->pixel_format) { 2746 case DRM_FORMAT_RGB565: 2747 plane_ctl |= PLANE_CTL_FORMAT_RGB_565; 2748 break; 2749 case DRM_FORMAT_XRGB8888: 2750 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2751 break; 2752 case DRM_FORMAT_ARGB8888: 2753 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2754 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2755 break; 2756 case DRM_FORMAT_XBGR8888: 2757 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2758 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2759 break; 2760 case DRM_FORMAT_ABGR8888: 2761 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2762 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2763 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2764 break; 2765 case DRM_FORMAT_XRGB2101010: 2766 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2767 break; 2768 case DRM_FORMAT_XBGR2101010: 2769 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2770 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2771 break; 2772 default: 2773 BUG(); 2774 } 2775 2776 intel_fb = to_intel_framebuffer(fb); 2777 obj = intel_fb->obj; 2778 2779 /* 2780 * The stride is either expressed as a multiple of 64 bytes chunks for 2781 * linear buffers or in number of tiles for tiled buffers. 2782 */ 2783 switch (obj->tiling_mode) { 2784 case I915_TILING_NONE: 2785 stride = fb->pitches[0] >> 6; 2786 break; 2787 case I915_TILING_X: 2788 plane_ctl |= PLANE_CTL_TILED_X; 2789 stride = fb->pitches[0] >> 9; 2790 break; 2791 default: 2792 BUG(); 2793 } 2794 2795 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 2796 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) 2797 plane_ctl |= PLANE_CTL_ROTATE_180; 2798 2799 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 2800 2801 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", 2802 i915_gem_obj_ggtt_offset(obj), 2803 x, y, fb->width, fb->height, 2804 fb->pitches[0]); 2805 2806 I915_WRITE(PLANE_POS(pipe, 0), 0); 2807 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); 2808 I915_WRITE(PLANE_SIZE(pipe, 0), 2809 (intel_crtc->config->pipe_src_h - 1) << 16 | 2810 (intel_crtc->config->pipe_src_w - 1)); 2811 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 2812 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); 2813 2814 POSTING_READ(PLANE_SURF(pipe, 0)); 2815 } 2816 2817 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 2818 static int 2819 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2820 int x, int y, enum mode_set_atomic state) 2821 { 2822 struct drm_device *dev = crtc->dev; 2823 struct drm_i915_private *dev_priv = dev->dev_private; 2824 2825 if (dev_priv->display.disable_fbc) 2826 dev_priv->display.disable_fbc(dev); 2827 2828 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2829 2830 return 0; 2831 } 2832 2833 static void intel_complete_page_flips(struct drm_device *dev) 2834 { 2835 struct drm_crtc *crtc; 2836 2837 for_each_crtc(dev, crtc) { 2838 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2839 enum plane plane = intel_crtc->plane; 2840 2841 intel_prepare_page_flip(dev, plane); 2842 intel_finish_page_flip_plane(dev, plane); 2843 } 2844 } 2845 2846 static void intel_update_primary_planes(struct drm_device *dev) 2847 { 2848 struct drm_i915_private *dev_priv = dev->dev_private; 2849 struct drm_crtc *crtc; 2850 2851 for_each_crtc(dev, crtc) { 2852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2853 2854 drm_modeset_lock(&crtc->mutex, NULL); 2855 /* 2856 * FIXME: Once we have proper support for primary planes (and 2857 * disabling them without disabling the entire crtc) allow again 2858 * a NULL crtc->primary->fb. 2859 */ 2860 if (intel_crtc->active && crtc->primary->fb) 2861 dev_priv->display.update_primary_plane(crtc, 2862 crtc->primary->fb, 2863 crtc->x, 2864 crtc->y); 2865 drm_modeset_unlock(&crtc->mutex); 2866 } 2867 } 2868 2869 void intel_prepare_reset(struct drm_device *dev) 2870 { 2871 struct drm_i915_private *dev_priv = to_i915(dev); 2872 struct intel_crtc *crtc; 2873 2874 /* no reset support for gen2 */ 2875 if (IS_GEN2(dev)) 2876 return; 2877 2878 /* reset doesn't touch the display */ 2879 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 2880 return; 2881 2882 drm_modeset_lock_all(dev); 2883 2884 /* 2885 * Disabling the crtcs gracefully seems nicer. Also the 2886 * g33 docs say we should at least disable all the planes. 2887 */ 2888 for_each_intel_crtc(dev, crtc) { 2889 if (crtc->active) 2890 dev_priv->display.crtc_disable(&crtc->base); 2891 } 2892 } 2893 2894 void intel_finish_reset(struct drm_device *dev) 2895 { 2896 struct drm_i915_private *dev_priv = to_i915(dev); 2897 2898 /* 2899 * Flips in the rings will be nuked by the reset, 2900 * so complete all pending flips so that user space 2901 * will get its events and not get stuck. 2902 */ 2903 intel_complete_page_flips(dev); 2904 2905 /* no reset support for gen2 */ 2906 if (IS_GEN2(dev)) 2907 return; 2908 2909 /* reset doesn't touch the display */ 2910 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 2911 /* 2912 * Flips in the rings have been nuked by the reset, 2913 * so update the base address of all primary 2914 * planes to the the last fb to make sure we're 2915 * showing the correct fb after a reset. 2916 */ 2917 intel_update_primary_planes(dev); 2918 return; 2919 } 2920 2921 /* 2922 * The display has been reset as well, 2923 * so need a full re-initialization. 2924 */ 2925 intel_runtime_pm_disable_interrupts(dev_priv); 2926 intel_runtime_pm_enable_interrupts(dev_priv); 2927 2928 intel_modeset_init_hw(dev); 2929 2930 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2931 if (dev_priv->display.hpd_irq_setup) 2932 dev_priv->display.hpd_irq_setup(dev); 2933 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2934 2935 intel_modeset_setup_hw_state(dev, true); 2936 2937 intel_hpd_init(dev_priv); 2938 2939 drm_modeset_unlock_all(dev); 2940 } 2941 2942 static int 2943 intel_finish_fb(struct drm_framebuffer *old_fb) 2944 { 2945 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 2946 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2947 bool was_interruptible = dev_priv->mm.interruptible; 2948 int ret; 2949 2950 /* Big Hammer, we also need to ensure that any pending 2951 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 2952 * current scanout is retired before unpinning the old 2953 * framebuffer. 2954 * 2955 * This should only fail upon a hung GPU, in which case we 2956 * can safely continue. 2957 */ 2958 dev_priv->mm.interruptible = false; 2959 ret = i915_gem_object_finish_gpu(obj); 2960 dev_priv->mm.interruptible = was_interruptible; 2961 2962 return ret; 2963 } 2964 2965 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 2966 { 2967 struct drm_device *dev = crtc->dev; 2968 struct drm_i915_private *dev_priv = dev->dev_private; 2969 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2970 bool pending; 2971 2972 if (i915_reset_in_progress(&dev_priv->gpu_error) || 2973 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2974 return false; 2975 2976 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2977 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2978 lockmgr(&dev->event_lock, LK_RELEASE); 2979 2980 return pending; 2981 } 2982 2983 static void intel_update_pipe_size(struct intel_crtc *crtc) 2984 { 2985 struct drm_device *dev = crtc->base.dev; 2986 struct drm_i915_private *dev_priv = dev->dev_private; 2987 const struct drm_display_mode *adjusted_mode; 2988 2989 if (!i915.fastboot) 2990 return; 2991 2992 /* 2993 * Update pipe size and adjust fitter if needed: the reason for this is 2994 * that in compute_mode_changes we check the native mode (not the pfit 2995 * mode) to see if we can flip rather than do a full mode set. In the 2996 * fastboot case, we'll flip, but if we don't update the pipesrc and 2997 * pfit state, we'll end up with a big fb scanned out into the wrong 2998 * sized surface. 2999 * 3000 * To fix this properly, we need to hoist the checks up into 3001 * compute_mode_changes (or above), check the actual pfit state and 3002 * whether the platform allows pfit disable with pipe active, and only 3003 * then update the pipesrc and pfit state, even on the flip path. 3004 */ 3005 3006 adjusted_mode = &crtc->config->base.adjusted_mode; 3007 3008 I915_WRITE(PIPESRC(crtc->pipe), 3009 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 3010 (adjusted_mode->crtc_vdisplay - 1)); 3011 if (!crtc->config->pch_pfit.enabled && 3012 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3013 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3014 I915_WRITE(PF_CTL(crtc->pipe), 0); 3015 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 3016 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 3017 } 3018 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay; 3019 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay; 3020 } 3021 3022 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3023 { 3024 struct drm_device *dev = crtc->dev; 3025 struct drm_i915_private *dev_priv = dev->dev_private; 3026 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3027 int pipe = intel_crtc->pipe; 3028 u32 reg, temp; 3029 3030 /* enable normal train */ 3031 reg = FDI_TX_CTL(pipe); 3032 temp = I915_READ(reg); 3033 if (IS_IVYBRIDGE(dev)) { 3034 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3035 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3036 } else { 3037 temp &= ~FDI_LINK_TRAIN_NONE; 3038 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3039 } 3040 I915_WRITE(reg, temp); 3041 3042 reg = FDI_RX_CTL(pipe); 3043 temp = I915_READ(reg); 3044 if (HAS_PCH_CPT(dev)) { 3045 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3046 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3047 } else { 3048 temp &= ~FDI_LINK_TRAIN_NONE; 3049 temp |= FDI_LINK_TRAIN_NONE; 3050 } 3051 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3052 3053 /* wait one idle pattern time */ 3054 POSTING_READ(reg); 3055 udelay(1000); 3056 3057 /* IVB wants error correction enabled */ 3058 if (IS_IVYBRIDGE(dev)) 3059 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3060 FDI_FE_ERRC_ENABLE); 3061 } 3062 3063 static bool pipe_has_enabled_pch(struct intel_crtc *crtc) 3064 { 3065 return crtc->base.enabled && crtc->active && 3066 crtc->config->has_pch_encoder; 3067 } 3068 3069 static void ivb_modeset_global_resources(struct drm_device *dev) 3070 { 3071 struct drm_i915_private *dev_priv = dev->dev_private; 3072 struct intel_crtc *pipe_B_crtc = 3073 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 3074 struct intel_crtc *pipe_C_crtc = 3075 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); 3076 uint32_t temp; 3077 3078 /* 3079 * When everything is off disable fdi C so that we could enable fdi B 3080 * with all lanes. Note that we don't care about enabled pipes without 3081 * an enabled pch encoder. 3082 */ 3083 if (!pipe_has_enabled_pch(pipe_B_crtc) && 3084 !pipe_has_enabled_pch(pipe_C_crtc)) { 3085 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 3086 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 3087 3088 temp = I915_READ(SOUTH_CHICKEN1); 3089 temp &= ~FDI_BC_BIFURCATION_SELECT; 3090 DRM_DEBUG_KMS("disabling fdi C rx\n"); 3091 I915_WRITE(SOUTH_CHICKEN1, temp); 3092 } 3093 } 3094 3095 /* The FDI link training functions for ILK/Ibexpeak. */ 3096 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3097 { 3098 struct drm_device *dev = crtc->dev; 3099 struct drm_i915_private *dev_priv = dev->dev_private; 3100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3101 int pipe = intel_crtc->pipe; 3102 u32 reg, temp, tries; 3103 3104 /* FDI needs bits from pipe first */ 3105 assert_pipe_enabled(dev_priv, pipe); 3106 3107 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3108 for train result */ 3109 reg = FDI_RX_IMR(pipe); 3110 temp = I915_READ(reg); 3111 temp &= ~FDI_RX_SYMBOL_LOCK; 3112 temp &= ~FDI_RX_BIT_LOCK; 3113 I915_WRITE(reg, temp); 3114 I915_READ(reg); 3115 udelay(150); 3116 3117 /* enable CPU FDI TX and PCH FDI RX */ 3118 reg = FDI_TX_CTL(pipe); 3119 temp = I915_READ(reg); 3120 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3121 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3122 temp &= ~FDI_LINK_TRAIN_NONE; 3123 temp |= FDI_LINK_TRAIN_PATTERN_1; 3124 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3125 3126 reg = FDI_RX_CTL(pipe); 3127 temp = I915_READ(reg); 3128 temp &= ~FDI_LINK_TRAIN_NONE; 3129 temp |= FDI_LINK_TRAIN_PATTERN_1; 3130 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3131 3132 POSTING_READ(reg); 3133 udelay(150); 3134 3135 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3136 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3137 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3138 FDI_RX_PHASE_SYNC_POINTER_EN); 3139 3140 reg = FDI_RX_IIR(pipe); 3141 for (tries = 0; tries < 5; tries++) { 3142 temp = I915_READ(reg); 3143 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3144 3145 if ((temp & FDI_RX_BIT_LOCK)) { 3146 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3147 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3148 break; 3149 } 3150 } 3151 if (tries == 5) 3152 DRM_ERROR("FDI train 1 fail!\n"); 3153 3154 /* Train 2 */ 3155 reg = FDI_TX_CTL(pipe); 3156 temp = I915_READ(reg); 3157 temp &= ~FDI_LINK_TRAIN_NONE; 3158 temp |= FDI_LINK_TRAIN_PATTERN_2; 3159 I915_WRITE(reg, temp); 3160 3161 reg = FDI_RX_CTL(pipe); 3162 temp = I915_READ(reg); 3163 temp &= ~FDI_LINK_TRAIN_NONE; 3164 temp |= FDI_LINK_TRAIN_PATTERN_2; 3165 I915_WRITE(reg, temp); 3166 3167 POSTING_READ(reg); 3168 udelay(150); 3169 3170 reg = FDI_RX_IIR(pipe); 3171 for (tries = 0; tries < 5; tries++) { 3172 temp = I915_READ(reg); 3173 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3174 3175 if (temp & FDI_RX_SYMBOL_LOCK) { 3176 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3177 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3178 break; 3179 } 3180 } 3181 if (tries == 5) 3182 DRM_ERROR("FDI train 2 fail!\n"); 3183 3184 DRM_DEBUG_KMS("FDI train done\n"); 3185 3186 } 3187 3188 static const int snb_b_fdi_train_param[] = { 3189 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3190 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3191 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3192 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3193 }; 3194 3195 /* The FDI link training functions for SNB/Cougarpoint. */ 3196 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3197 { 3198 struct drm_device *dev = crtc->dev; 3199 struct drm_i915_private *dev_priv = dev->dev_private; 3200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3201 int pipe = intel_crtc->pipe; 3202 u32 reg, temp, i, retry; 3203 3204 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3205 for train result */ 3206 reg = FDI_RX_IMR(pipe); 3207 temp = I915_READ(reg); 3208 temp &= ~FDI_RX_SYMBOL_LOCK; 3209 temp &= ~FDI_RX_BIT_LOCK; 3210 I915_WRITE(reg, temp); 3211 3212 POSTING_READ(reg); 3213 udelay(150); 3214 3215 /* enable CPU FDI TX and PCH FDI RX */ 3216 reg = FDI_TX_CTL(pipe); 3217 temp = I915_READ(reg); 3218 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3219 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3220 temp &= ~FDI_LINK_TRAIN_NONE; 3221 temp |= FDI_LINK_TRAIN_PATTERN_1; 3222 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3223 /* SNB-B */ 3224 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3225 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3226 3227 I915_WRITE(FDI_RX_MISC(pipe), 3228 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3229 3230 reg = FDI_RX_CTL(pipe); 3231 temp = I915_READ(reg); 3232 if (HAS_PCH_CPT(dev)) { 3233 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3234 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3235 } else { 3236 temp &= ~FDI_LINK_TRAIN_NONE; 3237 temp |= FDI_LINK_TRAIN_PATTERN_1; 3238 } 3239 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3240 3241 POSTING_READ(reg); 3242 udelay(150); 3243 3244 for (i = 0; i < 4; i++) { 3245 reg = FDI_TX_CTL(pipe); 3246 temp = I915_READ(reg); 3247 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3248 temp |= snb_b_fdi_train_param[i]; 3249 I915_WRITE(reg, temp); 3250 3251 POSTING_READ(reg); 3252 udelay(500); 3253 3254 for (retry = 0; retry < 5; retry++) { 3255 reg = FDI_RX_IIR(pipe); 3256 temp = I915_READ(reg); 3257 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3258 if (temp & FDI_RX_BIT_LOCK) { 3259 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3260 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3261 break; 3262 } 3263 udelay(50); 3264 } 3265 if (retry < 5) 3266 break; 3267 } 3268 if (i == 4) 3269 DRM_ERROR("FDI train 1 fail!\n"); 3270 3271 /* Train 2 */ 3272 reg = FDI_TX_CTL(pipe); 3273 temp = I915_READ(reg); 3274 temp &= ~FDI_LINK_TRAIN_NONE; 3275 temp |= FDI_LINK_TRAIN_PATTERN_2; 3276 if (IS_GEN6(dev)) { 3277 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3278 /* SNB-B */ 3279 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3280 } 3281 I915_WRITE(reg, temp); 3282 3283 reg = FDI_RX_CTL(pipe); 3284 temp = I915_READ(reg); 3285 if (HAS_PCH_CPT(dev)) { 3286 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3287 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3288 } else { 3289 temp &= ~FDI_LINK_TRAIN_NONE; 3290 temp |= FDI_LINK_TRAIN_PATTERN_2; 3291 } 3292 I915_WRITE(reg, temp); 3293 3294 POSTING_READ(reg); 3295 udelay(150); 3296 3297 for (i = 0; i < 4; i++) { 3298 reg = FDI_TX_CTL(pipe); 3299 temp = I915_READ(reg); 3300 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3301 temp |= snb_b_fdi_train_param[i]; 3302 I915_WRITE(reg, temp); 3303 3304 POSTING_READ(reg); 3305 udelay(500); 3306 3307 for (retry = 0; retry < 5; retry++) { 3308 reg = FDI_RX_IIR(pipe); 3309 temp = I915_READ(reg); 3310 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3311 if (temp & FDI_RX_SYMBOL_LOCK) { 3312 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3313 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3314 break; 3315 } 3316 udelay(50); 3317 } 3318 if (retry < 5) 3319 break; 3320 } 3321 if (i == 4) 3322 DRM_ERROR("FDI train 2 fail!\n"); 3323 3324 DRM_DEBUG_KMS("FDI train done.\n"); 3325 } 3326 3327 /* Manual link training for Ivy Bridge A0 parts */ 3328 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3329 { 3330 struct drm_device *dev = crtc->dev; 3331 struct drm_i915_private *dev_priv = dev->dev_private; 3332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3333 int pipe = intel_crtc->pipe; 3334 u32 reg, temp, i, j; 3335 3336 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3337 for train result */ 3338 reg = FDI_RX_IMR(pipe); 3339 temp = I915_READ(reg); 3340 temp &= ~FDI_RX_SYMBOL_LOCK; 3341 temp &= ~FDI_RX_BIT_LOCK; 3342 I915_WRITE(reg, temp); 3343 3344 POSTING_READ(reg); 3345 udelay(150); 3346 3347 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3348 I915_READ(FDI_RX_IIR(pipe))); 3349 3350 /* Try each vswing and preemphasis setting twice before moving on */ 3351 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3352 /* disable first in case we need to retry */ 3353 reg = FDI_TX_CTL(pipe); 3354 temp = I915_READ(reg); 3355 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3356 temp &= ~FDI_TX_ENABLE; 3357 I915_WRITE(reg, temp); 3358 3359 reg = FDI_RX_CTL(pipe); 3360 temp = I915_READ(reg); 3361 temp &= ~FDI_LINK_TRAIN_AUTO; 3362 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3363 temp &= ~FDI_RX_ENABLE; 3364 I915_WRITE(reg, temp); 3365 3366 /* enable CPU FDI TX and PCH FDI RX */ 3367 reg = FDI_TX_CTL(pipe); 3368 temp = I915_READ(reg); 3369 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3370 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3371 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3372 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3373 temp |= snb_b_fdi_train_param[j/2]; 3374 temp |= FDI_COMPOSITE_SYNC; 3375 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3376 3377 I915_WRITE(FDI_RX_MISC(pipe), 3378 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3379 3380 reg = FDI_RX_CTL(pipe); 3381 temp = I915_READ(reg); 3382 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3383 temp |= FDI_COMPOSITE_SYNC; 3384 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3385 3386 POSTING_READ(reg); 3387 udelay(1); /* should be 0.5us */ 3388 3389 for (i = 0; i < 4; i++) { 3390 reg = FDI_RX_IIR(pipe); 3391 temp = I915_READ(reg); 3392 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3393 3394 if (temp & FDI_RX_BIT_LOCK || 3395 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3396 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3397 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3398 i); 3399 break; 3400 } 3401 udelay(1); /* should be 0.5us */ 3402 } 3403 if (i == 4) { 3404 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3405 continue; 3406 } 3407 3408 /* Train 2 */ 3409 reg = FDI_TX_CTL(pipe); 3410 temp = I915_READ(reg); 3411 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3412 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3413 I915_WRITE(reg, temp); 3414 3415 reg = FDI_RX_CTL(pipe); 3416 temp = I915_READ(reg); 3417 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3418 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3419 I915_WRITE(reg, temp); 3420 3421 POSTING_READ(reg); 3422 udelay(2); /* should be 1.5us */ 3423 3424 for (i = 0; i < 4; i++) { 3425 reg = FDI_RX_IIR(pipe); 3426 temp = I915_READ(reg); 3427 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3428 3429 if (temp & FDI_RX_SYMBOL_LOCK || 3430 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3431 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3432 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3433 i); 3434 goto train_done; 3435 } 3436 udelay(2); /* should be 1.5us */ 3437 } 3438 if (i == 4) 3439 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3440 } 3441 3442 train_done: 3443 DRM_DEBUG_KMS("FDI train done.\n"); 3444 } 3445 3446 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3447 { 3448 struct drm_device *dev = intel_crtc->base.dev; 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 int pipe = intel_crtc->pipe; 3451 u32 reg, temp; 3452 3453 3454 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3455 reg = FDI_RX_CTL(pipe); 3456 temp = I915_READ(reg); 3457 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3458 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3459 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3460 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3461 3462 POSTING_READ(reg); 3463 udelay(200); 3464 3465 /* Switch from Rawclk to PCDclk */ 3466 temp = I915_READ(reg); 3467 I915_WRITE(reg, temp | FDI_PCDCLK); 3468 3469 POSTING_READ(reg); 3470 udelay(200); 3471 3472 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3473 reg = FDI_TX_CTL(pipe); 3474 temp = I915_READ(reg); 3475 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3476 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3477 3478 POSTING_READ(reg); 3479 udelay(100); 3480 } 3481 } 3482 3483 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3484 { 3485 struct drm_device *dev = intel_crtc->base.dev; 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 int pipe = intel_crtc->pipe; 3488 u32 reg, temp; 3489 3490 /* Switch from PCDclk to Rawclk */ 3491 reg = FDI_RX_CTL(pipe); 3492 temp = I915_READ(reg); 3493 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3494 3495 /* Disable CPU FDI TX PLL */ 3496 reg = FDI_TX_CTL(pipe); 3497 temp = I915_READ(reg); 3498 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3499 3500 POSTING_READ(reg); 3501 udelay(100); 3502 3503 reg = FDI_RX_CTL(pipe); 3504 temp = I915_READ(reg); 3505 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3506 3507 /* Wait for the clocks to turn off. */ 3508 POSTING_READ(reg); 3509 udelay(100); 3510 } 3511 3512 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3513 { 3514 struct drm_device *dev = crtc->dev; 3515 struct drm_i915_private *dev_priv = dev->dev_private; 3516 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3517 int pipe = intel_crtc->pipe; 3518 u32 reg, temp; 3519 3520 /* disable CPU FDI tx and PCH FDI rx */ 3521 reg = FDI_TX_CTL(pipe); 3522 temp = I915_READ(reg); 3523 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3524 POSTING_READ(reg); 3525 3526 reg = FDI_RX_CTL(pipe); 3527 temp = I915_READ(reg); 3528 temp &= ~(0x7 << 16); 3529 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3530 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3531 3532 POSTING_READ(reg); 3533 udelay(100); 3534 3535 /* Ironlake workaround, disable clock pointer after downing FDI */ 3536 if (HAS_PCH_IBX(dev)) 3537 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3538 3539 /* still set train pattern 1 */ 3540 reg = FDI_TX_CTL(pipe); 3541 temp = I915_READ(reg); 3542 temp &= ~FDI_LINK_TRAIN_NONE; 3543 temp |= FDI_LINK_TRAIN_PATTERN_1; 3544 I915_WRITE(reg, temp); 3545 3546 reg = FDI_RX_CTL(pipe); 3547 temp = I915_READ(reg); 3548 if (HAS_PCH_CPT(dev)) { 3549 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3550 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3551 } else { 3552 temp &= ~FDI_LINK_TRAIN_NONE; 3553 temp |= FDI_LINK_TRAIN_PATTERN_1; 3554 } 3555 /* BPC in FDI rx is consistent with that in PIPECONF */ 3556 temp &= ~(0x07 << 16); 3557 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3558 I915_WRITE(reg, temp); 3559 3560 POSTING_READ(reg); 3561 udelay(100); 3562 } 3563 3564 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3565 { 3566 struct intel_crtc *crtc; 3567 3568 /* Note that we don't need to be called with mode_config.lock here 3569 * as our list of CRTC objects is static for the lifetime of the 3570 * device and so cannot disappear as we iterate. Similarly, we can 3571 * happily treat the predicates as racy, atomic checks as userspace 3572 * cannot claim and pin a new fb without at least acquring the 3573 * struct_mutex and so serialising with us. 3574 */ 3575 for_each_intel_crtc(dev, crtc) { 3576 if (atomic_read(&crtc->unpin_work_count) == 0) 3577 continue; 3578 3579 if (crtc->unpin_work) 3580 intel_wait_for_vblank(dev, crtc->pipe); 3581 3582 return true; 3583 } 3584 3585 return false; 3586 } 3587 3588 static void page_flip_completed(struct intel_crtc *intel_crtc) 3589 { 3590 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3591 struct intel_unpin_work *work = intel_crtc->unpin_work; 3592 3593 /* ensure that the unpin work is consistent wrt ->pending. */ 3594 smp_rmb(); 3595 intel_crtc->unpin_work = NULL; 3596 3597 if (work->event) 3598 drm_send_vblank_event(intel_crtc->base.dev, 3599 intel_crtc->pipe, 3600 work->event); 3601 3602 drm_crtc_vblank_put(&intel_crtc->base); 3603 3604 wake_up_all(&dev_priv->pending_flip_queue); 3605 queue_work(dev_priv->wq, &work->work); 3606 3607 trace_i915_flip_complete(intel_crtc->plane, 3608 work->pending_flip_obj); 3609 } 3610 3611 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3612 { 3613 struct drm_device *dev = crtc->dev; 3614 struct drm_i915_private *dev_priv = dev->dev_private; 3615 3616 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3617 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3618 !intel_crtc_has_pending_flip(crtc), 3619 60*HZ) == 0)) { 3620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3621 3622 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3623 if (intel_crtc->unpin_work) { 3624 WARN_ONCE(1, "Removing stuck page flip\n"); 3625 page_flip_completed(intel_crtc); 3626 } 3627 lockmgr(&dev->event_lock, LK_RELEASE); 3628 } 3629 3630 if (crtc->primary->fb) { 3631 mutex_lock(&dev->struct_mutex); 3632 intel_finish_fb(crtc->primary->fb); 3633 mutex_unlock(&dev->struct_mutex); 3634 } 3635 } 3636 3637 /* Program iCLKIP clock to the desired frequency */ 3638 static void lpt_program_iclkip(struct drm_crtc *crtc) 3639 { 3640 struct drm_device *dev = crtc->dev; 3641 struct drm_i915_private *dev_priv = dev->dev_private; 3642 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3643 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3644 u32 temp; 3645 3646 mutex_lock(&dev_priv->dpio_lock); 3647 3648 /* It is necessary to ungate the pixclk gate prior to programming 3649 * the divisors, and gate it back when it is done. 3650 */ 3651 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3652 3653 /* Disable SSCCTL */ 3654 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3655 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3656 SBI_SSCCTL_DISABLE, 3657 SBI_ICLK); 3658 3659 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3660 if (clock == 20000) { 3661 auxdiv = 1; 3662 divsel = 0x41; 3663 phaseinc = 0x20; 3664 } else { 3665 /* The iCLK virtual clock root frequency is in MHz, 3666 * but the adjusted_mode->crtc_clock in in KHz. To get the 3667 * divisors, it is necessary to divide one by another, so we 3668 * convert the virtual clock precision to KHz here for higher 3669 * precision. 3670 */ 3671 u32 iclk_virtual_root_freq = 172800 * 1000; 3672 u32 iclk_pi_range = 64; 3673 u32 desired_divisor, msb_divisor_value, pi_value; 3674 3675 desired_divisor = (iclk_virtual_root_freq / clock); 3676 msb_divisor_value = desired_divisor / iclk_pi_range; 3677 pi_value = desired_divisor % iclk_pi_range; 3678 3679 auxdiv = 0; 3680 divsel = msb_divisor_value - 2; 3681 phaseinc = pi_value; 3682 } 3683 3684 /* This should not happen with any sane values */ 3685 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3686 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3687 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3688 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3689 3690 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3691 clock, 3692 auxdiv, 3693 divsel, 3694 phasedir, 3695 phaseinc); 3696 3697 /* Program SSCDIVINTPHASE6 */ 3698 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3699 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3700 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3701 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3702 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3703 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3704 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3705 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3706 3707 /* Program SSCAUXDIV */ 3708 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3709 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3710 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3711 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3712 3713 /* Enable modulator and associated divider */ 3714 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3715 temp &= ~SBI_SSCCTL_DISABLE; 3716 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3717 3718 /* Wait for initialization time */ 3719 udelay(24); 3720 3721 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3722 3723 mutex_unlock(&dev_priv->dpio_lock); 3724 } 3725 3726 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 3727 enum i915_pipe pch_transcoder) 3728 { 3729 struct drm_device *dev = crtc->base.dev; 3730 struct drm_i915_private *dev_priv = dev->dev_private; 3731 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 3732 3733 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3734 I915_READ(HTOTAL(cpu_transcoder))); 3735 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 3736 I915_READ(HBLANK(cpu_transcoder))); 3737 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 3738 I915_READ(HSYNC(cpu_transcoder))); 3739 3740 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 3741 I915_READ(VTOTAL(cpu_transcoder))); 3742 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 3743 I915_READ(VBLANK(cpu_transcoder))); 3744 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 3745 I915_READ(VSYNC(cpu_transcoder))); 3746 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 3747 I915_READ(VSYNCSHIFT(cpu_transcoder))); 3748 } 3749 3750 static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) 3751 { 3752 struct drm_i915_private *dev_priv = dev->dev_private; 3753 uint32_t temp; 3754 3755 temp = I915_READ(SOUTH_CHICKEN1); 3756 if (temp & FDI_BC_BIFURCATION_SELECT) 3757 return; 3758 3759 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 3760 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 3761 3762 temp |= FDI_BC_BIFURCATION_SELECT; 3763 DRM_DEBUG_KMS("enabling fdi C rx\n"); 3764 I915_WRITE(SOUTH_CHICKEN1, temp); 3765 POSTING_READ(SOUTH_CHICKEN1); 3766 } 3767 3768 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 3769 { 3770 struct drm_device *dev = intel_crtc->base.dev; 3771 struct drm_i915_private *dev_priv = dev->dev_private; 3772 3773 switch (intel_crtc->pipe) { 3774 case PIPE_A: 3775 break; 3776 case PIPE_B: 3777 if (intel_crtc->config->fdi_lanes > 2) 3778 WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); 3779 else 3780 cpt_enable_fdi_bc_bifurcation(dev); 3781 3782 break; 3783 case PIPE_C: 3784 cpt_enable_fdi_bc_bifurcation(dev); 3785 3786 break; 3787 default: 3788 BUG(); 3789 } 3790 } 3791 3792 /* 3793 * Enable PCH resources required for PCH ports: 3794 * - PCH PLLs 3795 * - FDI training & RX/TX 3796 * - update transcoder timings 3797 * - DP transcoding bits 3798 * - transcoder 3799 */ 3800 static void ironlake_pch_enable(struct drm_crtc *crtc) 3801 { 3802 struct drm_device *dev = crtc->dev; 3803 struct drm_i915_private *dev_priv = dev->dev_private; 3804 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3805 int pipe = intel_crtc->pipe; 3806 u32 reg, temp; 3807 3808 assert_pch_transcoder_disabled(dev_priv, pipe); 3809 3810 if (IS_IVYBRIDGE(dev)) 3811 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 3812 3813 /* Write the TU size bits before fdi link training, so that error 3814 * detection works. */ 3815 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3816 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 3817 3818 /* For PCH output, training FDI link */ 3819 dev_priv->display.fdi_link_train(crtc); 3820 3821 /* We need to program the right clock selection before writing the pixel 3822 * mutliplier into the DPLL. */ 3823 if (HAS_PCH_CPT(dev)) { 3824 u32 sel; 3825 3826 temp = I915_READ(PCH_DPLL_SEL); 3827 temp |= TRANS_DPLL_ENABLE(pipe); 3828 sel = TRANS_DPLLB_SEL(pipe); 3829 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 3830 temp |= sel; 3831 else 3832 temp &= ~sel; 3833 I915_WRITE(PCH_DPLL_SEL, temp); 3834 } 3835 3836 /* XXX: pch pll's can be enabled any time before we enable the PCH 3837 * transcoder, and we actually should do this to not upset any PCH 3838 * transcoder that already use the clock when we share it. 3839 * 3840 * Note that enable_shared_dpll tries to do the right thing, but 3841 * get_shared_dpll unconditionally resets the pll - we need that to have 3842 * the right LVDS enable sequence. */ 3843 intel_enable_shared_dpll(intel_crtc); 3844 3845 /* set transcoder timing, panel must allow it */ 3846 assert_panel_unlocked(dev_priv, pipe); 3847 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 3848 3849 intel_fdi_normal_train(crtc); 3850 3851 /* For PCH DP, enable TRANS_DP_CTL */ 3852 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 3853 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3854 reg = TRANS_DP_CTL(pipe); 3855 temp = I915_READ(reg); 3856 temp &= ~(TRANS_DP_PORT_SEL_MASK | 3857 TRANS_DP_SYNC_MASK | 3858 TRANS_DP_BPC_MASK); 3859 temp |= (TRANS_DP_OUTPUT_ENABLE | 3860 TRANS_DP_ENH_FRAMING); 3861 temp |= bpc << 9; /* same format but at 11:9 */ 3862 3863 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 3864 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 3865 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 3866 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 3867 3868 switch (intel_trans_dp_port_sel(crtc)) { 3869 case PCH_DP_B: 3870 temp |= TRANS_DP_PORT_SEL_B; 3871 break; 3872 case PCH_DP_C: 3873 temp |= TRANS_DP_PORT_SEL_C; 3874 break; 3875 case PCH_DP_D: 3876 temp |= TRANS_DP_PORT_SEL_D; 3877 break; 3878 default: 3879 BUG(); 3880 } 3881 3882 I915_WRITE(reg, temp); 3883 } 3884 3885 ironlake_enable_pch_transcoder(dev_priv, pipe); 3886 } 3887 3888 static void lpt_pch_enable(struct drm_crtc *crtc) 3889 { 3890 struct drm_device *dev = crtc->dev; 3891 struct drm_i915_private *dev_priv = dev->dev_private; 3892 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3893 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 3894 3895 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 3896 3897 lpt_program_iclkip(crtc); 3898 3899 /* Set transcoder timing. */ 3900 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 3901 3902 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 3903 } 3904 3905 void intel_put_shared_dpll(struct intel_crtc *crtc) 3906 { 3907 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3908 3909 if (pll == NULL) 3910 return; 3911 3912 if (!(pll->config.crtc_mask & (1 << crtc->pipe))) { 3913 WARN(1, "bad %s crtc mask\n", pll->name); 3914 return; 3915 } 3916 3917 pll->config.crtc_mask &= ~(1 << crtc->pipe); 3918 if (pll->config.crtc_mask == 0) { 3919 WARN_ON(pll->on); 3920 WARN_ON(pll->active); 3921 } 3922 3923 crtc->config->shared_dpll = DPLL_ID_PRIVATE; 3924 } 3925 3926 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 3927 struct intel_crtc_state *crtc_state) 3928 { 3929 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3930 struct intel_shared_dpll *pll; 3931 enum intel_dpll_id i; 3932 3933 if (HAS_PCH_IBX(dev_priv->dev)) { 3934 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3935 i = (enum intel_dpll_id) crtc->pipe; 3936 pll = &dev_priv->shared_dplls[i]; 3937 3938 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3939 crtc->base.base.id, pll->name); 3940 3941 WARN_ON(pll->new_config->crtc_mask); 3942 3943 goto found; 3944 } 3945 3946 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3947 pll = &dev_priv->shared_dplls[i]; 3948 3949 /* Only want to check enabled timings first */ 3950 if (pll->new_config->crtc_mask == 0) 3951 continue; 3952 3953 if (memcmp(&crtc_state->dpll_hw_state, 3954 &pll->new_config->hw_state, 3955 sizeof(pll->new_config->hw_state)) == 0) { 3956 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 3957 crtc->base.base.id, pll->name, 3958 pll->new_config->crtc_mask, 3959 pll->active); 3960 goto found; 3961 } 3962 } 3963 3964 /* Ok no matching timings, maybe there's a free one? */ 3965 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3966 pll = &dev_priv->shared_dplls[i]; 3967 if (pll->new_config->crtc_mask == 0) { 3968 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 3969 crtc->base.base.id, pll->name); 3970 goto found; 3971 } 3972 } 3973 3974 return NULL; 3975 3976 found: 3977 if (pll->new_config->crtc_mask == 0) 3978 pll->new_config->hw_state = crtc_state->dpll_hw_state; 3979 3980 crtc_state->shared_dpll = i; 3981 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 3982 pipe_name(crtc->pipe)); 3983 3984 pll->new_config->crtc_mask |= 1 << crtc->pipe; 3985 3986 return pll; 3987 } 3988 3989 /** 3990 * intel_shared_dpll_start_config - start a new PLL staged config 3991 * @dev_priv: DRM device 3992 * @clear_pipes: mask of pipes that will have their PLLs freed 3993 * 3994 * Starts a new PLL staged config, copying the current config but 3995 * releasing the references of pipes specified in clear_pipes. 3996 */ 3997 static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv, 3998 unsigned clear_pipes) 3999 { 4000 struct intel_shared_dpll *pll; 4001 enum intel_dpll_id i; 4002 4003 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4004 pll = &dev_priv->shared_dplls[i]; 4005 4006 pll->new_config = kmemdup(&pll->config, sizeof pll->config, 4007 GFP_KERNEL); 4008 if (!pll->new_config) 4009 goto cleanup; 4010 4011 pll->new_config->crtc_mask &= ~clear_pipes; 4012 } 4013 4014 return 0; 4015 4016 cleanup: 4017 while (--i >= 0) { 4018 pll = &dev_priv->shared_dplls[i]; 4019 kfree(pll->new_config); 4020 pll->new_config = NULL; 4021 } 4022 4023 return -ENOMEM; 4024 } 4025 4026 static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv) 4027 { 4028 struct intel_shared_dpll *pll; 4029 enum intel_dpll_id i; 4030 4031 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4032 pll = &dev_priv->shared_dplls[i]; 4033 4034 WARN_ON(pll->new_config == &pll->config); 4035 4036 pll->config = *pll->new_config; 4037 kfree(pll->new_config); 4038 pll->new_config = NULL; 4039 } 4040 } 4041 4042 static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv) 4043 { 4044 struct intel_shared_dpll *pll; 4045 enum intel_dpll_id i; 4046 4047 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4048 pll = &dev_priv->shared_dplls[i]; 4049 4050 WARN_ON(pll->new_config == &pll->config); 4051 4052 kfree(pll->new_config); 4053 pll->new_config = NULL; 4054 } 4055 } 4056 4057 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4058 { 4059 struct drm_i915_private *dev_priv = dev->dev_private; 4060 int dslreg = PIPEDSL(pipe); 4061 u32 temp; 4062 4063 temp = I915_READ(dslreg); 4064 udelay(500); 4065 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4066 if (wait_for(I915_READ(dslreg) != temp, 5)) 4067 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4068 } 4069 } 4070 4071 static void skylake_pfit_enable(struct intel_crtc *crtc) 4072 { 4073 struct drm_device *dev = crtc->base.dev; 4074 struct drm_i915_private *dev_priv = dev->dev_private; 4075 int pipe = crtc->pipe; 4076 4077 if (crtc->config->pch_pfit.enabled) { 4078 I915_WRITE(PS_CTL(pipe), PS_ENABLE); 4079 I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4080 I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4081 } 4082 } 4083 4084 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4085 { 4086 struct drm_device *dev = crtc->base.dev; 4087 struct drm_i915_private *dev_priv = dev->dev_private; 4088 int pipe = crtc->pipe; 4089 4090 if (crtc->config->pch_pfit.enabled) { 4091 /* Force use of hard-coded filter coefficients 4092 * as some pre-programmed values are broken, 4093 * e.g. x201. 4094 */ 4095 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4096 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4097 PF_PIPE_SEL_IVB(pipe)); 4098 else 4099 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4100 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4101 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4102 } 4103 } 4104 4105 static void intel_enable_sprite_planes(struct drm_crtc *crtc) 4106 { 4107 struct drm_device *dev = crtc->dev; 4108 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4109 struct drm_plane *plane; 4110 struct intel_plane *intel_plane; 4111 4112 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 4113 intel_plane = to_intel_plane(plane); 4114 if (intel_plane->pipe == pipe) 4115 intel_plane_restore(&intel_plane->base); 4116 } 4117 } 4118 4119 static void intel_disable_sprite_planes(struct drm_crtc *crtc) 4120 { 4121 struct drm_device *dev = crtc->dev; 4122 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4123 struct drm_plane *plane; 4124 struct intel_plane *intel_plane; 4125 4126 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 4127 intel_plane = to_intel_plane(plane); 4128 if (intel_plane->pipe == pipe) 4129 plane->funcs->disable_plane(plane); 4130 } 4131 } 4132 4133 void hsw_enable_ips(struct intel_crtc *crtc) 4134 { 4135 struct drm_device *dev = crtc->base.dev; 4136 struct drm_i915_private *dev_priv = dev->dev_private; 4137 4138 if (!crtc->config->ips_enabled) 4139 return; 4140 4141 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4142 intel_wait_for_vblank(dev, crtc->pipe); 4143 4144 assert_plane_enabled(dev_priv, crtc->plane); 4145 if (IS_BROADWELL(dev)) { 4146 mutex_lock(&dev_priv->rps.hw_lock); 4147 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4148 mutex_unlock(&dev_priv->rps.hw_lock); 4149 /* Quoting Art Runyan: "its not safe to expect any particular 4150 * value in IPS_CTL bit 31 after enabling IPS through the 4151 * mailbox." Moreover, the mailbox may return a bogus state, 4152 * so we need to just enable it and continue on. 4153 */ 4154 } else { 4155 I915_WRITE(IPS_CTL, IPS_ENABLE); 4156 /* The bit only becomes 1 in the next vblank, so this wait here 4157 * is essentially intel_wait_for_vblank. If we don't have this 4158 * and don't wait for vblanks until the end of crtc_enable, then 4159 * the HW state readout code will complain that the expected 4160 * IPS_CTL value is not the one we read. */ 4161 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4162 DRM_ERROR("Timed out waiting for IPS enable\n"); 4163 } 4164 } 4165 4166 void hsw_disable_ips(struct intel_crtc *crtc) 4167 { 4168 struct drm_device *dev = crtc->base.dev; 4169 struct drm_i915_private *dev_priv = dev->dev_private; 4170 4171 if (!crtc->config->ips_enabled) 4172 return; 4173 4174 assert_plane_enabled(dev_priv, crtc->plane); 4175 if (IS_BROADWELL(dev)) { 4176 mutex_lock(&dev_priv->rps.hw_lock); 4177 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4178 mutex_unlock(&dev_priv->rps.hw_lock); 4179 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4180 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4181 DRM_ERROR("Timed out waiting for IPS disable\n"); 4182 } else { 4183 I915_WRITE(IPS_CTL, 0); 4184 POSTING_READ(IPS_CTL); 4185 } 4186 4187 /* We need to wait for a vblank before we can disable the plane. */ 4188 intel_wait_for_vblank(dev, crtc->pipe); 4189 } 4190 4191 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4192 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4193 { 4194 struct drm_device *dev = crtc->dev; 4195 struct drm_i915_private *dev_priv = dev->dev_private; 4196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4197 enum i915_pipe pipe = intel_crtc->pipe; 4198 int palreg = PALETTE(pipe); 4199 int i; 4200 bool reenable_ips = false; 4201 4202 /* The clocks have to be on to load the palette. */ 4203 if (!crtc->enabled || !intel_crtc->active) 4204 return; 4205 4206 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4207 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4208 assert_dsi_pll_enabled(dev_priv); 4209 else 4210 assert_pll_enabled(dev_priv, pipe); 4211 } 4212 4213 /* use legacy palette for Ironlake */ 4214 if (!HAS_GMCH_DISPLAY(dev)) 4215 palreg = LGC_PALETTE(pipe); 4216 4217 /* Workaround : Do not read or write the pipe palette/gamma data while 4218 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4219 */ 4220 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4221 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4222 GAMMA_MODE_MODE_SPLIT)) { 4223 hsw_disable_ips(intel_crtc); 4224 reenable_ips = true; 4225 } 4226 4227 for (i = 0; i < 256; i++) { 4228 I915_WRITE(palreg + 4 * i, 4229 (intel_crtc->lut_r[i] << 16) | 4230 (intel_crtc->lut_g[i] << 8) | 4231 intel_crtc->lut_b[i]); 4232 } 4233 4234 if (reenable_ips) 4235 hsw_enable_ips(intel_crtc); 4236 } 4237 4238 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 4239 { 4240 if (!enable && intel_crtc->overlay) { 4241 struct drm_device *dev = intel_crtc->base.dev; 4242 struct drm_i915_private *dev_priv = dev->dev_private; 4243 4244 mutex_lock(&dev->struct_mutex); 4245 dev_priv->mm.interruptible = false; 4246 (void) intel_overlay_switch_off(intel_crtc->overlay); 4247 dev_priv->mm.interruptible = true; 4248 mutex_unlock(&dev->struct_mutex); 4249 } 4250 4251 /* Let userspace switch the overlay on again. In most cases userspace 4252 * has to recompute where to put it anyway. 4253 */ 4254 } 4255 4256 static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4257 { 4258 struct drm_device *dev = crtc->dev; 4259 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4260 int pipe = intel_crtc->pipe; 4261 4262 intel_enable_primary_hw_plane(crtc->primary, crtc); 4263 intel_enable_sprite_planes(crtc); 4264 intel_crtc_update_cursor(crtc, true); 4265 intel_crtc_dpms_overlay(intel_crtc, true); 4266 4267 hsw_enable_ips(intel_crtc); 4268 4269 mutex_lock(&dev->struct_mutex); 4270 intel_fbc_update(dev); 4271 mutex_unlock(&dev->struct_mutex); 4272 4273 /* 4274 * FIXME: Once we grow proper nuclear flip support out of this we need 4275 * to compute the mask of flip planes precisely. For the time being 4276 * consider this a flip from a NULL plane. 4277 */ 4278 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4279 } 4280 4281 static void intel_crtc_disable_planes(struct drm_crtc *crtc) 4282 { 4283 struct drm_device *dev = crtc->dev; 4284 struct drm_i915_private *dev_priv = dev->dev_private; 4285 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4286 int pipe = intel_crtc->pipe; 4287 int plane = intel_crtc->plane; 4288 4289 intel_crtc_wait_for_pending_flips(crtc); 4290 4291 if (dev_priv->fbc.plane == plane) 4292 intel_fbc_disable(dev); 4293 4294 hsw_disable_ips(intel_crtc); 4295 4296 intel_crtc_dpms_overlay(intel_crtc, false); 4297 intel_crtc_update_cursor(crtc, false); 4298 intel_disable_sprite_planes(crtc); 4299 intel_disable_primary_hw_plane(crtc->primary, crtc); 4300 4301 /* 4302 * FIXME: Once we grow proper nuclear flip support out of this we need 4303 * to compute the mask of flip planes precisely. For the time being 4304 * consider this a flip to a NULL plane. 4305 */ 4306 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4307 } 4308 4309 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4310 { 4311 struct drm_device *dev = crtc->dev; 4312 struct drm_i915_private *dev_priv = dev->dev_private; 4313 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4314 struct intel_encoder *encoder; 4315 int pipe = intel_crtc->pipe; 4316 4317 WARN_ON(!crtc->enabled); 4318 4319 if (intel_crtc->active) 4320 return; 4321 4322 if (intel_crtc->config->has_pch_encoder) 4323 intel_prepare_shared_dpll(intel_crtc); 4324 4325 if (intel_crtc->config->has_dp_encoder) 4326 intel_dp_set_m_n(intel_crtc); 4327 4328 intel_set_pipe_timings(intel_crtc); 4329 4330 if (intel_crtc->config->has_pch_encoder) { 4331 intel_cpu_transcoder_set_m_n(intel_crtc, 4332 &intel_crtc->config->fdi_m_n, NULL); 4333 } 4334 4335 ironlake_set_pipeconf(crtc); 4336 4337 intel_crtc->active = true; 4338 4339 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4340 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4341 4342 for_each_encoder_on_crtc(dev, crtc, encoder) 4343 if (encoder->pre_enable) 4344 encoder->pre_enable(encoder); 4345 4346 if (intel_crtc->config->has_pch_encoder) { 4347 /* Note: FDI PLL enabling _must_ be done before we enable the 4348 * cpu pipes, hence this is separate from all the other fdi/pch 4349 * enabling. */ 4350 ironlake_fdi_pll_enable(intel_crtc); 4351 } else { 4352 assert_fdi_tx_disabled(dev_priv, pipe); 4353 assert_fdi_rx_disabled(dev_priv, pipe); 4354 } 4355 4356 ironlake_pfit_enable(intel_crtc); 4357 4358 /* 4359 * On ILK+ LUT must be loaded before the pipe is running but with 4360 * clocks enabled 4361 */ 4362 intel_crtc_load_lut(crtc); 4363 4364 intel_update_watermarks(crtc); 4365 intel_enable_pipe(intel_crtc); 4366 4367 if (intel_crtc->config->has_pch_encoder) 4368 ironlake_pch_enable(crtc); 4369 4370 assert_vblank_disabled(crtc); 4371 drm_crtc_vblank_on(crtc); 4372 4373 for_each_encoder_on_crtc(dev, crtc, encoder) 4374 encoder->enable(encoder); 4375 4376 if (HAS_PCH_CPT(dev)) 4377 cpt_verify_modeset(dev, intel_crtc->pipe); 4378 4379 intel_crtc_enable_planes(crtc); 4380 } 4381 4382 /* IPS only exists on ULT machines and is tied to pipe A. */ 4383 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4384 { 4385 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4386 } 4387 4388 /* 4389 * This implements the workaround described in the "notes" section of the mode 4390 * set sequence documentation. When going from no pipes or single pipe to 4391 * multiple pipes, and planes are enabled after the pipe, we need to wait at 4392 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 4393 */ 4394 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) 4395 { 4396 struct drm_device *dev = crtc->base.dev; 4397 struct intel_crtc *crtc_it, *other_active_crtc = NULL; 4398 4399 /* We want to get the other_active_crtc only if there's only 1 other 4400 * active crtc. */ 4401 for_each_intel_crtc(dev, crtc_it) { 4402 if (!crtc_it->active || crtc_it == crtc) 4403 continue; 4404 4405 if (other_active_crtc) 4406 return; 4407 4408 other_active_crtc = crtc_it; 4409 } 4410 if (!other_active_crtc) 4411 return; 4412 4413 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4414 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4415 } 4416 4417 static void haswell_crtc_enable(struct drm_crtc *crtc) 4418 { 4419 struct drm_device *dev = crtc->dev; 4420 struct drm_i915_private *dev_priv = dev->dev_private; 4421 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4422 struct intel_encoder *encoder; 4423 int pipe = intel_crtc->pipe; 4424 4425 WARN_ON(!crtc->enabled); 4426 4427 if (intel_crtc->active) 4428 return; 4429 4430 if (intel_crtc_to_shared_dpll(intel_crtc)) 4431 intel_enable_shared_dpll(intel_crtc); 4432 4433 if (intel_crtc->config->has_dp_encoder) 4434 intel_dp_set_m_n(intel_crtc); 4435 4436 intel_set_pipe_timings(intel_crtc); 4437 4438 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 4439 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 4440 intel_crtc->config->pixel_multiplier - 1); 4441 } 4442 4443 if (intel_crtc->config->has_pch_encoder) { 4444 intel_cpu_transcoder_set_m_n(intel_crtc, 4445 &intel_crtc->config->fdi_m_n, NULL); 4446 } 4447 4448 haswell_set_pipeconf(crtc); 4449 4450 intel_set_pipe_csc(crtc); 4451 4452 intel_crtc->active = true; 4453 4454 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4455 for_each_encoder_on_crtc(dev, crtc, encoder) 4456 if (encoder->pre_enable) 4457 encoder->pre_enable(encoder); 4458 4459 if (intel_crtc->config->has_pch_encoder) { 4460 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4461 true); 4462 dev_priv->display.fdi_link_train(crtc); 4463 } 4464 4465 intel_ddi_enable_pipe_clock(intel_crtc); 4466 4467 if (IS_SKYLAKE(dev)) 4468 skylake_pfit_enable(intel_crtc); 4469 else 4470 ironlake_pfit_enable(intel_crtc); 4471 4472 /* 4473 * On ILK+ LUT must be loaded before the pipe is running but with 4474 * clocks enabled 4475 */ 4476 intel_crtc_load_lut(crtc); 4477 4478 intel_ddi_set_pipe_settings(crtc); 4479 intel_ddi_enable_transcoder_func(crtc); 4480 4481 intel_update_watermarks(crtc); 4482 intel_enable_pipe(intel_crtc); 4483 4484 if (intel_crtc->config->has_pch_encoder) 4485 lpt_pch_enable(crtc); 4486 4487 if (intel_crtc->config->dp_encoder_is_mst) 4488 intel_ddi_set_vc_payload_alloc(crtc, true); 4489 4490 assert_vblank_disabled(crtc); 4491 drm_crtc_vblank_on(crtc); 4492 4493 for_each_encoder_on_crtc(dev, crtc, encoder) { 4494 encoder->enable(encoder); 4495 intel_opregion_notify_encoder(encoder, true); 4496 } 4497 4498 /* If we change the relative order between pipe/planes enabling, we need 4499 * to change the workaround. */ 4500 haswell_mode_set_planes_workaround(intel_crtc); 4501 intel_crtc_enable_planes(crtc); 4502 } 4503 4504 static void skylake_pfit_disable(struct intel_crtc *crtc) 4505 { 4506 struct drm_device *dev = crtc->base.dev; 4507 struct drm_i915_private *dev_priv = dev->dev_private; 4508 int pipe = crtc->pipe; 4509 4510 /* To avoid upsetting the power well on haswell only disable the pfit if 4511 * it's in use. The hw state code will make sure we get this right. */ 4512 if (crtc->config->pch_pfit.enabled) { 4513 I915_WRITE(PS_CTL(pipe), 0); 4514 I915_WRITE(PS_WIN_POS(pipe), 0); 4515 I915_WRITE(PS_WIN_SZ(pipe), 0); 4516 } 4517 } 4518 4519 static void ironlake_pfit_disable(struct intel_crtc *crtc) 4520 { 4521 struct drm_device *dev = crtc->base.dev; 4522 struct drm_i915_private *dev_priv = dev->dev_private; 4523 int pipe = crtc->pipe; 4524 4525 /* To avoid upsetting the power well on haswell only disable the pfit if 4526 * it's in use. The hw state code will make sure we get this right. */ 4527 if (crtc->config->pch_pfit.enabled) { 4528 I915_WRITE(PF_CTL(pipe), 0); 4529 I915_WRITE(PF_WIN_POS(pipe), 0); 4530 I915_WRITE(PF_WIN_SZ(pipe), 0); 4531 } 4532 } 4533 4534 static void ironlake_crtc_disable(struct drm_crtc *crtc) 4535 { 4536 struct drm_device *dev = crtc->dev; 4537 struct drm_i915_private *dev_priv = dev->dev_private; 4538 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4539 struct intel_encoder *encoder; 4540 int pipe = intel_crtc->pipe; 4541 u32 reg, temp; 4542 4543 if (!intel_crtc->active) 4544 return; 4545 4546 intel_crtc_disable_planes(crtc); 4547 4548 for_each_encoder_on_crtc(dev, crtc, encoder) 4549 encoder->disable(encoder); 4550 4551 drm_crtc_vblank_off(crtc); 4552 assert_vblank_disabled(crtc); 4553 4554 if (intel_crtc->config->has_pch_encoder) 4555 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4556 4557 intel_disable_pipe(intel_crtc); 4558 4559 ironlake_pfit_disable(intel_crtc); 4560 4561 for_each_encoder_on_crtc(dev, crtc, encoder) 4562 if (encoder->post_disable) 4563 encoder->post_disable(encoder); 4564 4565 if (intel_crtc->config->has_pch_encoder) { 4566 ironlake_fdi_disable(crtc); 4567 4568 ironlake_disable_pch_transcoder(dev_priv, pipe); 4569 4570 if (HAS_PCH_CPT(dev)) { 4571 /* disable TRANS_DP_CTL */ 4572 reg = TRANS_DP_CTL(pipe); 4573 temp = I915_READ(reg); 4574 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 4575 TRANS_DP_PORT_SEL_MASK); 4576 temp |= TRANS_DP_PORT_SEL_NONE; 4577 I915_WRITE(reg, temp); 4578 4579 /* disable DPLL_SEL */ 4580 temp = I915_READ(PCH_DPLL_SEL); 4581 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 4582 I915_WRITE(PCH_DPLL_SEL, temp); 4583 } 4584 4585 /* disable PCH DPLL */ 4586 intel_disable_shared_dpll(intel_crtc); 4587 4588 ironlake_fdi_pll_disable(intel_crtc); 4589 } 4590 4591 intel_crtc->active = false; 4592 intel_update_watermarks(crtc); 4593 4594 mutex_lock(&dev->struct_mutex); 4595 intel_fbc_update(dev); 4596 mutex_unlock(&dev->struct_mutex); 4597 } 4598 4599 static void haswell_crtc_disable(struct drm_crtc *crtc) 4600 { 4601 struct drm_device *dev = crtc->dev; 4602 struct drm_i915_private *dev_priv = dev->dev_private; 4603 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4604 struct intel_encoder *encoder; 4605 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4606 4607 if (!intel_crtc->active) 4608 return; 4609 4610 intel_crtc_disable_planes(crtc); 4611 4612 for_each_encoder_on_crtc(dev, crtc, encoder) { 4613 intel_opregion_notify_encoder(encoder, false); 4614 encoder->disable(encoder); 4615 } 4616 4617 drm_crtc_vblank_off(crtc); 4618 assert_vblank_disabled(crtc); 4619 4620 if (intel_crtc->config->has_pch_encoder) 4621 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4622 false); 4623 intel_disable_pipe(intel_crtc); 4624 4625 if (intel_crtc->config->dp_encoder_is_mst) 4626 intel_ddi_set_vc_payload_alloc(crtc, false); 4627 4628 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4629 4630 if (IS_SKYLAKE(dev)) 4631 skylake_pfit_disable(intel_crtc); 4632 else 4633 ironlake_pfit_disable(intel_crtc); 4634 4635 intel_ddi_disable_pipe_clock(intel_crtc); 4636 4637 if (intel_crtc->config->has_pch_encoder) { 4638 lpt_disable_pch_transcoder(dev_priv); 4639 intel_ddi_fdi_disable(crtc); 4640 } 4641 4642 for_each_encoder_on_crtc(dev, crtc, encoder) 4643 if (encoder->post_disable) 4644 encoder->post_disable(encoder); 4645 4646 intel_crtc->active = false; 4647 intel_update_watermarks(crtc); 4648 4649 mutex_lock(&dev->struct_mutex); 4650 intel_fbc_update(dev); 4651 mutex_unlock(&dev->struct_mutex); 4652 4653 if (intel_crtc_to_shared_dpll(intel_crtc)) 4654 intel_disable_shared_dpll(intel_crtc); 4655 } 4656 4657 static void ironlake_crtc_off(struct drm_crtc *crtc) 4658 { 4659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4660 intel_put_shared_dpll(intel_crtc); 4661 } 4662 4663 4664 static void i9xx_pfit_enable(struct intel_crtc *crtc) 4665 { 4666 struct drm_device *dev = crtc->base.dev; 4667 struct drm_i915_private *dev_priv = dev->dev_private; 4668 struct intel_crtc_state *pipe_config = crtc->config; 4669 4670 if (!pipe_config->gmch_pfit.control) 4671 return; 4672 4673 /* 4674 * The panel fitter should only be adjusted whilst the pipe is disabled, 4675 * according to register description and PRM. 4676 */ 4677 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 4678 assert_pipe_disabled(dev_priv, crtc->pipe); 4679 4680 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 4681 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 4682 4683 /* Border color in case we don't scale up to the full screen. Black by 4684 * default, change to something else for debugging. */ 4685 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4686 } 4687 4688 static enum intel_display_power_domain port_to_power_domain(enum port port) 4689 { 4690 switch (port) { 4691 case PORT_A: 4692 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 4693 case PORT_B: 4694 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 4695 case PORT_C: 4696 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 4697 case PORT_D: 4698 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 4699 default: 4700 WARN_ON_ONCE(1); 4701 return POWER_DOMAIN_PORT_OTHER; 4702 } 4703 } 4704 4705 #define for_each_power_domain(domain, mask) \ 4706 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 4707 if ((1 << (domain)) & (mask)) 4708 4709 enum intel_display_power_domain 4710 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 4711 { 4712 struct drm_device *dev = intel_encoder->base.dev; 4713 struct intel_digital_port *intel_dig_port; 4714 4715 switch (intel_encoder->type) { 4716 case INTEL_OUTPUT_UNKNOWN: 4717 /* Only DDI platforms should ever use this output type */ 4718 WARN_ON_ONCE(!HAS_DDI(dev)); 4719 case INTEL_OUTPUT_DISPLAYPORT: 4720 case INTEL_OUTPUT_HDMI: 4721 case INTEL_OUTPUT_EDP: 4722 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 4723 return port_to_power_domain(intel_dig_port->port); 4724 case INTEL_OUTPUT_ANALOG: 4725 return POWER_DOMAIN_PORT_CRT; 4726 case INTEL_OUTPUT_DSI: 4727 return POWER_DOMAIN_PORT_DSI; 4728 default: 4729 return POWER_DOMAIN_PORT_OTHER; 4730 } 4731 } 4732 4733 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 4734 { 4735 struct drm_device *dev = crtc->dev; 4736 struct intel_encoder *intel_encoder; 4737 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4738 enum i915_pipe pipe = intel_crtc->pipe; 4739 unsigned long mask; 4740 enum transcoder transcoder; 4741 4742 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 4743 4744 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4745 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4746 if (intel_crtc->config->pch_pfit.enabled || 4747 intel_crtc->config->pch_pfit.force_thru) 4748 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4749 4750 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4751 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 4752 4753 return mask; 4754 } 4755 4756 static void modeset_update_crtc_power_domains(struct drm_device *dev) 4757 { 4758 struct drm_i915_private *dev_priv = dev->dev_private; 4759 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 4760 struct intel_crtc *crtc; 4761 4762 /* 4763 * First get all needed power domains, then put all unneeded, to avoid 4764 * any unnecessary toggling of the power wells. 4765 */ 4766 for_each_intel_crtc(dev, crtc) { 4767 enum intel_display_power_domain domain; 4768 4769 if (!crtc->base.enabled) 4770 continue; 4771 4772 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4773 4774 for_each_power_domain(domain, pipe_domains[crtc->pipe]) 4775 intel_display_power_get(dev_priv, domain); 4776 } 4777 4778 if (dev_priv->display.modeset_global_resources) 4779 dev_priv->display.modeset_global_resources(dev); 4780 4781 for_each_intel_crtc(dev, crtc) { 4782 enum intel_display_power_domain domain; 4783 4784 for_each_power_domain(domain, crtc->enabled_power_domains) 4785 intel_display_power_put(dev_priv, domain); 4786 4787 crtc->enabled_power_domains = pipe_domains[crtc->pipe]; 4788 } 4789 4790 intel_display_set_init_power(dev_priv, false); 4791 } 4792 4793 /* returns HPLL frequency in kHz */ 4794 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 4795 { 4796 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4797 4798 /* Obtain SKU information */ 4799 mutex_lock(&dev_priv->dpio_lock); 4800 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 4801 CCK_FUSE_HPLL_FREQ_MASK; 4802 mutex_unlock(&dev_priv->dpio_lock); 4803 4804 return vco_freq[hpll_freq] * 1000; 4805 } 4806 4807 static void vlv_update_cdclk(struct drm_device *dev) 4808 { 4809 struct drm_i915_private *dev_priv = dev->dev_private; 4810 4811 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4812 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 4813 dev_priv->vlv_cdclk_freq); 4814 4815 /* 4816 * Program the gmbus_freq based on the cdclk frequency. 4817 * BSpec erroneously claims we should aim for 4MHz, but 4818 * in fact 1MHz is the correct frequency. 4819 */ 4820 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000)); 4821 } 4822 4823 /* Adjust CDclk dividers to allow high res or save power if possible */ 4824 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 4825 { 4826 struct drm_i915_private *dev_priv = dev->dev_private; 4827 u32 val, cmd; 4828 4829 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 4830 4831 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 4832 cmd = 2; 4833 else if (cdclk == 266667) 4834 cmd = 1; 4835 else 4836 cmd = 0; 4837 4838 mutex_lock(&dev_priv->rps.hw_lock); 4839 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4840 val &= ~DSPFREQGUAR_MASK; 4841 val |= (cmd << DSPFREQGUAR_SHIFT); 4842 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 4843 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 4844 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 4845 50)) { 4846 DRM_ERROR("timed out waiting for CDclk change\n"); 4847 } 4848 mutex_unlock(&dev_priv->rps.hw_lock); 4849 4850 if (cdclk == 400000) { 4851 u32 divider; 4852 4853 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 4854 4855 mutex_lock(&dev_priv->dpio_lock); 4856 /* adjust cdclk divider */ 4857 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 4858 val &= ~DISPLAY_FREQUENCY_VALUES; 4859 val |= divider; 4860 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 4861 4862 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 4863 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 4864 50)) 4865 DRM_ERROR("timed out waiting for CDclk change\n"); 4866 mutex_unlock(&dev_priv->dpio_lock); 4867 } 4868 4869 mutex_lock(&dev_priv->dpio_lock); 4870 /* adjust self-refresh exit latency value */ 4871 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 4872 val &= ~0x7f; 4873 4874 /* 4875 * For high bandwidth configs, we set a higher latency in the bunit 4876 * so that the core display fetch happens in time to avoid underruns. 4877 */ 4878 if (cdclk == 400000) 4879 val |= 4500 / 250; /* 4.5 usec */ 4880 else 4881 val |= 3000 / 250; /* 3.0 usec */ 4882 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 4883 mutex_unlock(&dev_priv->dpio_lock); 4884 4885 vlv_update_cdclk(dev); 4886 } 4887 4888 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 4889 { 4890 struct drm_i915_private *dev_priv = dev->dev_private; 4891 u32 val, cmd; 4892 4893 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 4894 4895 switch (cdclk) { 4896 case 400000: 4897 cmd = 3; 4898 break; 4899 case 333333: 4900 case 320000: 4901 cmd = 2; 4902 break; 4903 case 266667: 4904 cmd = 1; 4905 break; 4906 case 200000: 4907 cmd = 0; 4908 break; 4909 default: 4910 MISSING_CASE(cdclk); 4911 return; 4912 } 4913 4914 mutex_lock(&dev_priv->rps.hw_lock); 4915 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 4916 val &= ~DSPFREQGUAR_MASK_CHV; 4917 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 4918 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 4919 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 4920 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 4921 50)) { 4922 DRM_ERROR("timed out waiting for CDclk change\n"); 4923 } 4924 mutex_unlock(&dev_priv->rps.hw_lock); 4925 4926 vlv_update_cdclk(dev); 4927 } 4928 4929 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4930 int max_pixclk) 4931 { 4932 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 4933 4934 /* FIXME: Punit isn't quite ready yet */ 4935 if (IS_CHERRYVIEW(dev_priv->dev)) 4936 return 400000; 4937 4938 /* 4939 * Really only a few cases to deal with, as only 4 CDclks are supported: 4940 * 200MHz 4941 * 267MHz 4942 * 320/333MHz (depends on HPLL freq) 4943 * 400MHz 4944 * So we check to see whether we're above 90% of the lower bin and 4945 * adjust if needed. 4946 * 4947 * We seem to get an unstable or solid color picture at 200MHz. 4948 * Not sure what's wrong. For now use 200MHz only when all pipes 4949 * are off. 4950 */ 4951 if (max_pixclk > freq_320*9/10) 4952 return 400000; 4953 else if (max_pixclk > 266667*9/10) 4954 return freq_320; 4955 else if (max_pixclk > 0) 4956 return 266667; 4957 else 4958 return 200000; 4959 } 4960 4961 /* compute the max pixel clock for new configuration */ 4962 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) 4963 { 4964 struct drm_device *dev = dev_priv->dev; 4965 struct intel_crtc *intel_crtc; 4966 int max_pixclk = 0; 4967 4968 for_each_intel_crtc(dev, intel_crtc) { 4969 if (intel_crtc->new_enabled) 4970 max_pixclk = max(max_pixclk, 4971 intel_crtc->new_config->base.adjusted_mode.crtc_clock); 4972 } 4973 4974 return max_pixclk; 4975 } 4976 4977 static void valleyview_modeset_global_pipes(struct drm_device *dev, 4978 unsigned *prepare_pipes) 4979 { 4980 struct drm_i915_private *dev_priv = dev->dev_private; 4981 struct intel_crtc *intel_crtc; 4982 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4983 4984 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == 4985 dev_priv->vlv_cdclk_freq) 4986 return; 4987 4988 /* disable/enable all currently active pipes while we change cdclk */ 4989 for_each_intel_crtc(dev, intel_crtc) 4990 if (intel_crtc->base.enabled) 4991 *prepare_pipes |= (1 << intel_crtc->pipe); 4992 } 4993 4994 static void valleyview_modeset_global_resources(struct drm_device *dev) 4995 { 4996 struct drm_i915_private *dev_priv = dev->dev_private; 4997 int max_pixclk = intel_mode_max_pixclk(dev_priv); 4998 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 4999 5000 if (req_cdclk != dev_priv->vlv_cdclk_freq) { 5001 /* 5002 * FIXME: We can end up here with all power domains off, yet 5003 * with a CDCLK frequency other than the minimum. To account 5004 * for this take the PIPE-A power domain, which covers the HW 5005 * blocks needed for the following programming. This can be 5006 * removed once it's guaranteed that we get here either with 5007 * the minimum CDCLK set, or the required power domains 5008 * enabled. 5009 */ 5010 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 5011 5012 if (IS_CHERRYVIEW(dev)) 5013 cherryview_set_cdclk(dev, req_cdclk); 5014 else 5015 valleyview_set_cdclk(dev, req_cdclk); 5016 5017 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 5018 } 5019 } 5020 5021 static void valleyview_crtc_enable(struct drm_crtc *crtc) 5022 { 5023 struct drm_device *dev = crtc->dev; 5024 struct drm_i915_private *dev_priv = to_i915(dev); 5025 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5026 struct intel_encoder *encoder; 5027 int pipe = intel_crtc->pipe; 5028 bool is_dsi; 5029 5030 WARN_ON(!crtc->enabled); 5031 5032 if (intel_crtc->active) 5033 return; 5034 5035 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 5036 5037 if (!is_dsi) { 5038 if (IS_CHERRYVIEW(dev)) 5039 chv_prepare_pll(intel_crtc, intel_crtc->config); 5040 else 5041 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5042 } 5043 5044 if (intel_crtc->config->has_dp_encoder) 5045 intel_dp_set_m_n(intel_crtc); 5046 5047 intel_set_pipe_timings(intel_crtc); 5048 5049 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 5050 struct drm_i915_private *dev_priv = dev->dev_private; 5051 5052 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5053 I915_WRITE(CHV_CANVAS(pipe), 0); 5054 } 5055 5056 i9xx_set_pipeconf(intel_crtc); 5057 5058 intel_crtc->active = true; 5059 5060 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5061 5062 for_each_encoder_on_crtc(dev, crtc, encoder) 5063 if (encoder->pre_pll_enable) 5064 encoder->pre_pll_enable(encoder); 5065 5066 if (!is_dsi) { 5067 if (IS_CHERRYVIEW(dev)) 5068 chv_enable_pll(intel_crtc, intel_crtc->config); 5069 else 5070 vlv_enable_pll(intel_crtc, intel_crtc->config); 5071 } 5072 5073 for_each_encoder_on_crtc(dev, crtc, encoder) 5074 if (encoder->pre_enable) 5075 encoder->pre_enable(encoder); 5076 5077 i9xx_pfit_enable(intel_crtc); 5078 5079 intel_crtc_load_lut(crtc); 5080 5081 intel_update_watermarks(crtc); 5082 intel_enable_pipe(intel_crtc); 5083 5084 assert_vblank_disabled(crtc); 5085 drm_crtc_vblank_on(crtc); 5086 5087 for_each_encoder_on_crtc(dev, crtc, encoder) 5088 encoder->enable(encoder); 5089 5090 intel_crtc_enable_planes(crtc); 5091 5092 /* Underruns don't raise interrupts, so check manually. */ 5093 i9xx_check_fifo_underruns(dev_priv); 5094 } 5095 5096 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5097 { 5098 struct drm_device *dev = crtc->base.dev; 5099 struct drm_i915_private *dev_priv = dev->dev_private; 5100 5101 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5102 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5103 } 5104 5105 static void i9xx_crtc_enable(struct drm_crtc *crtc) 5106 { 5107 struct drm_device *dev = crtc->dev; 5108 struct drm_i915_private *dev_priv = to_i915(dev); 5109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5110 struct intel_encoder *encoder; 5111 int pipe = intel_crtc->pipe; 5112 5113 WARN_ON(!crtc->enabled); 5114 5115 if (intel_crtc->active) 5116 return; 5117 5118 i9xx_set_pll_dividers(intel_crtc); 5119 5120 if (intel_crtc->config->has_dp_encoder) 5121 intel_dp_set_m_n(intel_crtc); 5122 5123 intel_set_pipe_timings(intel_crtc); 5124 5125 i9xx_set_pipeconf(intel_crtc); 5126 5127 intel_crtc->active = true; 5128 5129 if (!IS_GEN2(dev)) 5130 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5131 5132 for_each_encoder_on_crtc(dev, crtc, encoder) 5133 if (encoder->pre_enable) 5134 encoder->pre_enable(encoder); 5135 5136 i9xx_enable_pll(intel_crtc); 5137 5138 i9xx_pfit_enable(intel_crtc); 5139 5140 intel_crtc_load_lut(crtc); 5141 5142 intel_update_watermarks(crtc); 5143 intel_enable_pipe(intel_crtc); 5144 5145 assert_vblank_disabled(crtc); 5146 drm_crtc_vblank_on(crtc); 5147 5148 for_each_encoder_on_crtc(dev, crtc, encoder) 5149 encoder->enable(encoder); 5150 5151 intel_crtc_enable_planes(crtc); 5152 5153 /* 5154 * Gen2 reports pipe underruns whenever all planes are disabled. 5155 * So don't enable underrun reporting before at least some planes 5156 * are enabled. 5157 * FIXME: Need to fix the logic to work when we turn off all planes 5158 * but leave the pipe running. 5159 */ 5160 if (IS_GEN2(dev)) 5161 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5162 5163 /* Underruns don't raise interrupts, so check manually. */ 5164 i9xx_check_fifo_underruns(dev_priv); 5165 } 5166 5167 static void i9xx_pfit_disable(struct intel_crtc *crtc) 5168 { 5169 struct drm_device *dev = crtc->base.dev; 5170 struct drm_i915_private *dev_priv = dev->dev_private; 5171 5172 if (!crtc->config->gmch_pfit.control) 5173 return; 5174 5175 assert_pipe_disabled(dev_priv, crtc->pipe); 5176 5177 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5178 I915_READ(PFIT_CONTROL)); 5179 I915_WRITE(PFIT_CONTROL, 0); 5180 } 5181 5182 static void i9xx_crtc_disable(struct drm_crtc *crtc) 5183 { 5184 struct drm_device *dev = crtc->dev; 5185 struct drm_i915_private *dev_priv = dev->dev_private; 5186 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5187 struct intel_encoder *encoder; 5188 int pipe = intel_crtc->pipe; 5189 5190 if (!intel_crtc->active) 5191 return; 5192 5193 /* 5194 * Gen2 reports pipe underruns whenever all planes are disabled. 5195 * So diasble underrun reporting before all the planes get disabled. 5196 * FIXME: Need to fix the logic to work when we turn off all planes 5197 * but leave the pipe running. 5198 */ 5199 if (IS_GEN2(dev)) 5200 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5201 5202 /* 5203 * Vblank time updates from the shadow to live plane control register 5204 * are blocked if the memory self-refresh mode is active at that 5205 * moment. So to make sure the plane gets truly disabled, disable 5206 * first the self-refresh mode. The self-refresh enable bit in turn 5207 * will be checked/applied by the HW only at the next frame start 5208 * event which is after the vblank start event, so we need to have a 5209 * wait-for-vblank between disabling the plane and the pipe. 5210 */ 5211 intel_set_memory_cxsr(dev_priv, false); 5212 intel_crtc_disable_planes(crtc); 5213 5214 /* 5215 * On gen2 planes are double buffered but the pipe isn't, so we must 5216 * wait for planes to fully turn off before disabling the pipe. 5217 * We also need to wait on all gmch platforms because of the 5218 * self-refresh mode constraint explained above. 5219 */ 5220 intel_wait_for_vblank(dev, pipe); 5221 5222 for_each_encoder_on_crtc(dev, crtc, encoder) 5223 encoder->disable(encoder); 5224 5225 drm_crtc_vblank_off(crtc); 5226 assert_vblank_disabled(crtc); 5227 5228 intel_disable_pipe(intel_crtc); 5229 5230 i9xx_pfit_disable(intel_crtc); 5231 5232 for_each_encoder_on_crtc(dev, crtc, encoder) 5233 if (encoder->post_disable) 5234 encoder->post_disable(encoder); 5235 5236 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 5237 if (IS_CHERRYVIEW(dev)) 5238 chv_disable_pll(dev_priv, pipe); 5239 else if (IS_VALLEYVIEW(dev)) 5240 vlv_disable_pll(dev_priv, pipe); 5241 else 5242 i9xx_disable_pll(intel_crtc); 5243 } 5244 5245 if (!IS_GEN2(dev)) 5246 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5247 5248 intel_crtc->active = false; 5249 intel_update_watermarks(crtc); 5250 5251 mutex_lock(&dev->struct_mutex); 5252 intel_fbc_update(dev); 5253 mutex_unlock(&dev->struct_mutex); 5254 } 5255 5256 static void i9xx_crtc_off(struct drm_crtc *crtc) 5257 { 5258 } 5259 5260 /* Master function to enable/disable CRTC and corresponding power wells */ 5261 void intel_crtc_control(struct drm_crtc *crtc, bool enable) 5262 { 5263 struct drm_device *dev = crtc->dev; 5264 struct drm_i915_private *dev_priv = dev->dev_private; 5265 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5266 enum intel_display_power_domain domain; 5267 unsigned long domains; 5268 5269 if (enable) { 5270 if (!intel_crtc->active) { 5271 domains = get_crtc_power_domains(crtc); 5272 for_each_power_domain(domain, domains) 5273 intel_display_power_get(dev_priv, domain); 5274 intel_crtc->enabled_power_domains = domains; 5275 5276 dev_priv->display.crtc_enable(crtc); 5277 } 5278 } else { 5279 if (intel_crtc->active) { 5280 dev_priv->display.crtc_disable(crtc); 5281 5282 domains = intel_crtc->enabled_power_domains; 5283 for_each_power_domain(domain, domains) 5284 intel_display_power_put(dev_priv, domain); 5285 intel_crtc->enabled_power_domains = 0; 5286 } 5287 } 5288 } 5289 5290 /** 5291 * Sets the power management mode of the pipe and plane. 5292 */ 5293 void intel_crtc_update_dpms(struct drm_crtc *crtc) 5294 { 5295 struct drm_device *dev = crtc->dev; 5296 struct intel_encoder *intel_encoder; 5297 bool enable = false; 5298 5299 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5300 enable |= intel_encoder->connectors_active; 5301 5302 intel_crtc_control(crtc, enable); 5303 } 5304 5305 static void intel_crtc_disable(struct drm_crtc *crtc) 5306 { 5307 struct drm_device *dev = crtc->dev; 5308 struct drm_connector *connector; 5309 struct drm_i915_private *dev_priv = dev->dev_private; 5310 5311 /* crtc should still be enabled when we disable it. */ 5312 WARN_ON(!crtc->enabled); 5313 5314 dev_priv->display.crtc_disable(crtc); 5315 dev_priv->display.off(crtc); 5316 5317 crtc->primary->funcs->disable_plane(crtc->primary); 5318 5319 /* Update computed state. */ 5320 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5321 if (!connector->encoder || !connector->encoder->crtc) 5322 continue; 5323 5324 if (connector->encoder->crtc != crtc) 5325 continue; 5326 5327 connector->dpms = DRM_MODE_DPMS_OFF; 5328 to_intel_encoder(connector->encoder)->connectors_active = false; 5329 } 5330 } 5331 5332 void intel_encoder_destroy(struct drm_encoder *encoder) 5333 { 5334 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5335 5336 drm_encoder_cleanup(encoder); 5337 kfree(intel_encoder); 5338 } 5339 5340 /* Simple dpms helper for encoders with just one connector, no cloning and only 5341 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 5342 * state of the entire output pipe. */ 5343 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 5344 { 5345 if (mode == DRM_MODE_DPMS_ON) { 5346 encoder->connectors_active = true; 5347 5348 intel_crtc_update_dpms(encoder->base.crtc); 5349 } else { 5350 encoder->connectors_active = false; 5351 5352 intel_crtc_update_dpms(encoder->base.crtc); 5353 } 5354 } 5355 5356 /* Cross check the actual hw state with our own modeset state tracking (and it's 5357 * internal consistency). */ 5358 static void intel_connector_check_state(struct intel_connector *connector) 5359 { 5360 if (connector->get_hw_state(connector)) { 5361 struct intel_encoder *encoder = connector->encoder; 5362 struct drm_crtc *crtc; 5363 bool encoder_enabled; 5364 enum i915_pipe pipe; 5365 5366 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5367 connector->base.base.id, 5368 connector->base.name); 5369 5370 /* there is no real hw state for MST connectors */ 5371 if (connector->mst_port) 5372 return; 5373 5374 I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 5375 "wrong connector dpms state\n"); 5376 I915_STATE_WARN(connector->base.encoder != &encoder->base, 5377 "active connector not linked to encoder\n"); 5378 5379 if (encoder) { 5380 I915_STATE_WARN(!encoder->connectors_active, 5381 "encoder->connectors_active not set\n"); 5382 5383 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 5384 I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n"); 5385 if (I915_STATE_WARN_ON(!encoder->base.crtc)) 5386 return; 5387 5388 crtc = encoder->base.crtc; 5389 5390 I915_STATE_WARN(!crtc->enabled, "crtc not enabled\n"); 5391 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5392 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, 5393 "encoder active on the wrong pipe\n"); 5394 } 5395 } 5396 } 5397 5398 /* Even simpler default implementation, if there's really no special case to 5399 * consider. */ 5400 void intel_connector_dpms(struct drm_connector *connector, int mode) 5401 { 5402 /* All the simple cases only support two dpms states. */ 5403 if (mode != DRM_MODE_DPMS_ON) 5404 mode = DRM_MODE_DPMS_OFF; 5405 5406 if (mode == connector->dpms) 5407 return; 5408 5409 connector->dpms = mode; 5410 5411 /* Only need to change hw state when actually enabled */ 5412 if (connector->encoder) 5413 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); 5414 5415 intel_modeset_check_state(connector->dev); 5416 } 5417 5418 /* Simple connector->get_hw_state implementation for encoders that support only 5419 * one connector and no cloning and hence the encoder state determines the state 5420 * of the connector. */ 5421 bool intel_connector_get_hw_state(struct intel_connector *connector) 5422 { 5423 enum i915_pipe pipe = 0; 5424 struct intel_encoder *encoder = connector->encoder; 5425 5426 return encoder->get_hw_state(encoder, &pipe); 5427 } 5428 5429 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 5430 struct intel_crtc_state *pipe_config) 5431 { 5432 struct drm_i915_private *dev_priv = dev->dev_private; 5433 struct intel_crtc *pipe_B_crtc = 5434 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); 5435 5436 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 5437 pipe_name(pipe), pipe_config->fdi_lanes); 5438 if (pipe_config->fdi_lanes > 4) { 5439 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 5440 pipe_name(pipe), pipe_config->fdi_lanes); 5441 return false; 5442 } 5443 5444 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5445 if (pipe_config->fdi_lanes > 2) { 5446 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 5447 pipe_config->fdi_lanes); 5448 return false; 5449 } else { 5450 return true; 5451 } 5452 } 5453 5454 if (INTEL_INFO(dev)->num_pipes == 2) 5455 return true; 5456 5457 /* Ivybridge 3 pipe is really complicated */ 5458 switch (pipe) { 5459 case PIPE_A: 5460 return true; 5461 case PIPE_B: 5462 if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && 5463 pipe_config->fdi_lanes > 2) { 5464 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5465 pipe_name(pipe), pipe_config->fdi_lanes); 5466 return false; 5467 } 5468 return true; 5469 case PIPE_C: 5470 if (!pipe_has_enabled_pch(pipe_B_crtc) || 5471 pipe_B_crtc->config->fdi_lanes <= 2) { 5472 if (pipe_config->fdi_lanes > 2) { 5473 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5474 pipe_name(pipe), pipe_config->fdi_lanes); 5475 return false; 5476 } 5477 } else { 5478 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5479 return false; 5480 } 5481 return true; 5482 default: 5483 BUG(); 5484 } 5485 } 5486 5487 #define RETRY 1 5488 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 5489 struct intel_crtc_state *pipe_config) 5490 { 5491 struct drm_device *dev = intel_crtc->base.dev; 5492 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 5493 int lane, link_bw, fdi_dotclock; 5494 bool setup_ok, needs_recompute = false; 5495 5496 retry: 5497 /* FDI is a binary signal running at ~2.7GHz, encoding 5498 * each output octet as 10 bits. The actual frequency 5499 * is stored as a divider into a 100MHz clock, and the 5500 * mode pixel clock is stored in units of 1KHz. 5501 * Hence the bw of each lane in terms of the mode signal 5502 * is: 5503 */ 5504 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5505 5506 fdi_dotclock = adjusted_mode->crtc_clock; 5507 5508 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 5509 pipe_config->pipe_bpp); 5510 5511 pipe_config->fdi_lanes = lane; 5512 5513 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 5514 link_bw, &pipe_config->fdi_m_n); 5515 5516 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, 5517 intel_crtc->pipe, pipe_config); 5518 if (!setup_ok && pipe_config->pipe_bpp > 6*3) { 5519 pipe_config->pipe_bpp -= 2*3; 5520 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 5521 pipe_config->pipe_bpp); 5522 needs_recompute = true; 5523 pipe_config->bw_constrained = true; 5524 5525 goto retry; 5526 } 5527 5528 if (needs_recompute) 5529 return RETRY; 5530 5531 return setup_ok ? 0 : -EINVAL; 5532 } 5533 5534 static void hsw_compute_ips_config(struct intel_crtc *crtc, 5535 struct intel_crtc_state *pipe_config) 5536 { 5537 pipe_config->ips_enabled = i915.enable_ips && 5538 hsw_crtc_supports_ips(crtc) && 5539 pipe_config->pipe_bpp <= 24; 5540 } 5541 5542 static int intel_crtc_compute_config(struct intel_crtc *crtc, 5543 struct intel_crtc_state *pipe_config) 5544 { 5545 struct drm_device *dev = crtc->base.dev; 5546 struct drm_i915_private *dev_priv = dev->dev_private; 5547 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 5548 5549 /* FIXME should check pixel clock limits on all platforms */ 5550 if (INTEL_INFO(dev)->gen < 4) { 5551 int clock_limit = 5552 dev_priv->display.get_display_clock_speed(dev); 5553 5554 /* 5555 * Enable pixel doubling when the dot clock 5556 * is > 90% of the (display) core speed. 5557 * 5558 * GDG double wide on either pipe, 5559 * otherwise pipe A only. 5560 */ 5561 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 5562 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 5563 clock_limit *= 2; 5564 pipe_config->double_wide = true; 5565 } 5566 5567 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 5568 return -EINVAL; 5569 } 5570 5571 /* 5572 * Pipe horizontal size must be even in: 5573 * - DVO ganged mode 5574 * - LVDS dual channel mode 5575 * - Double wide pipe 5576 */ 5577 if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5578 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5579 pipe_config->pipe_src_w &= ~1; 5580 5581 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 5582 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 5583 */ 5584 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 5585 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 5586 return -EINVAL; 5587 5588 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { 5589 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ 5590 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { 5591 /* only a 8bpc pipe, with 6bpc dither through the panel fitter 5592 * for lvds. */ 5593 pipe_config->pipe_bpp = 8*3; 5594 } 5595 5596 if (HAS_IPS(dev)) 5597 hsw_compute_ips_config(crtc, pipe_config); 5598 5599 if (pipe_config->has_pch_encoder) 5600 return ironlake_fdi_compute_config(crtc, pipe_config); 5601 5602 return 0; 5603 } 5604 5605 static int valleyview_get_display_clock_speed(struct drm_device *dev) 5606 { 5607 struct drm_i915_private *dev_priv = dev->dev_private; 5608 u32 val; 5609 int divider; 5610 5611 /* FIXME: Punit isn't quite ready yet */ 5612 if (IS_CHERRYVIEW(dev)) 5613 return 400000; 5614 5615 if (dev_priv->hpll_freq == 0) 5616 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 5617 5618 mutex_lock(&dev_priv->dpio_lock); 5619 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5620 mutex_unlock(&dev_priv->dpio_lock); 5621 5622 divider = val & DISPLAY_FREQUENCY_VALUES; 5623 5624 WARN((val & DISPLAY_FREQUENCY_STATUS) != 5625 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5626 "cdclk change in progress\n"); 5627 5628 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 5629 } 5630 5631 static int i945_get_display_clock_speed(struct drm_device *dev) 5632 { 5633 return 400000; 5634 } 5635 5636 static int i915_get_display_clock_speed(struct drm_device *dev) 5637 { 5638 return 333000; 5639 } 5640 5641 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 5642 { 5643 return 200000; 5644 } 5645 5646 static int pnv_get_display_clock_speed(struct drm_device *dev) 5647 { 5648 u16 gcfgc = 0; 5649 5650 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5651 5652 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5653 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 5654 return 267000; 5655 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 5656 return 333000; 5657 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 5658 return 444000; 5659 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 5660 return 200000; 5661 default: 5662 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 5663 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 5664 return 133000; 5665 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 5666 return 167000; 5667 } 5668 } 5669 5670 static int i915gm_get_display_clock_speed(struct drm_device *dev) 5671 { 5672 u16 gcfgc = 0; 5673 5674 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5675 5676 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 5677 return 133000; 5678 else { 5679 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5680 case GC_DISPLAY_CLOCK_333_MHZ: 5681 return 333000; 5682 default: 5683 case GC_DISPLAY_CLOCK_190_200_MHZ: 5684 return 190000; 5685 } 5686 } 5687 } 5688 5689 static int i865_get_display_clock_speed(struct drm_device *dev) 5690 { 5691 return 266000; 5692 } 5693 5694 static int i855_get_display_clock_speed(struct drm_device *dev) 5695 { 5696 u16 hpllcc = 0; 5697 /* Assume that the hardware is in the high speed state. This 5698 * should be the default. 5699 */ 5700 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 5701 case GC_CLOCK_133_200: 5702 case GC_CLOCK_100_200: 5703 return 200000; 5704 case GC_CLOCK_166_250: 5705 return 250000; 5706 case GC_CLOCK_100_133: 5707 return 133000; 5708 } 5709 5710 /* Shouldn't happen */ 5711 return 0; 5712 } 5713 5714 static int i830_get_display_clock_speed(struct drm_device *dev) 5715 { 5716 return 133000; 5717 } 5718 5719 static void 5720 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 5721 { 5722 while (*num > DATA_LINK_M_N_MASK || 5723 *den > DATA_LINK_M_N_MASK) { 5724 *num >>= 1; 5725 *den >>= 1; 5726 } 5727 } 5728 5729 static void compute_m_n(unsigned int m, unsigned int n, 5730 uint32_t *ret_m, uint32_t *ret_n) 5731 { 5732 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 5733 *ret_m = div_u64((uint64_t) m * *ret_n, n); 5734 intel_reduce_m_n_ratio(ret_m, ret_n); 5735 } 5736 5737 void 5738 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 5739 int pixel_clock, int link_clock, 5740 struct intel_link_m_n *m_n) 5741 { 5742 m_n->tu = 64; 5743 5744 compute_m_n(bits_per_pixel * pixel_clock, 5745 link_clock * nlanes * 8, 5746 &m_n->gmch_m, &m_n->gmch_n); 5747 5748 compute_m_n(pixel_clock, link_clock, 5749 &m_n->link_m, &m_n->link_n); 5750 } 5751 5752 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 5753 { 5754 if (i915.panel_use_ssc >= 0) 5755 return i915.panel_use_ssc != 0; 5756 return dev_priv->vbt.lvds_use_ssc 5757 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5758 } 5759 5760 static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) 5761 { 5762 struct drm_device *dev = crtc->base.dev; 5763 struct drm_i915_private *dev_priv = dev->dev_private; 5764 int refclk; 5765 5766 if (IS_VALLEYVIEW(dev)) { 5767 refclk = 100000; 5768 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 5769 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5770 refclk = dev_priv->vbt.lvds_ssc_freq; 5771 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 5772 } else if (!IS_GEN2(dev)) { 5773 refclk = 96000; 5774 } else { 5775 refclk = 48000; 5776 } 5777 5778 return refclk; 5779 } 5780 5781 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 5782 { 5783 return (1 << dpll->n) << 16 | dpll->m2; 5784 } 5785 5786 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 5787 { 5788 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 5789 } 5790 5791 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 5792 struct intel_crtc_state *crtc_state, 5793 intel_clock_t *reduced_clock) 5794 { 5795 struct drm_device *dev = crtc->base.dev; 5796 u32 fp, fp2 = 0; 5797 5798 if (IS_PINEVIEW(dev)) { 5799 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 5800 if (reduced_clock) 5801 fp2 = pnv_dpll_compute_fp(reduced_clock); 5802 } else { 5803 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 5804 if (reduced_clock) 5805 fp2 = i9xx_dpll_compute_fp(reduced_clock); 5806 } 5807 5808 crtc_state->dpll_hw_state.fp0 = fp; 5809 5810 crtc->lowfreq_avail = false; 5811 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 5812 reduced_clock && i915.powersave) { 5813 crtc_state->dpll_hw_state.fp1 = fp2; 5814 crtc->lowfreq_avail = true; 5815 } else { 5816 crtc_state->dpll_hw_state.fp1 = fp; 5817 } 5818 } 5819 5820 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 5821 pipe) 5822 { 5823 u32 reg_val; 5824 5825 /* 5826 * PLLB opamp always calibrates to max value of 0x3f, force enable it 5827 * and set it to a reasonable value instead. 5828 */ 5829 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5830 reg_val &= 0xffffff00; 5831 reg_val |= 0x00000030; 5832 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5833 5834 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5835 reg_val &= 0x8cffffff; 5836 reg_val = 0x8c000000; 5837 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5838 5839 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 5840 reg_val &= 0xffffff00; 5841 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 5842 5843 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 5844 reg_val &= 0x00ffffff; 5845 reg_val |= 0xb0000000; 5846 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 5847 } 5848 5849 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 5850 struct intel_link_m_n *m_n) 5851 { 5852 struct drm_device *dev = crtc->base.dev; 5853 struct drm_i915_private *dev_priv = dev->dev_private; 5854 int pipe = crtc->pipe; 5855 5856 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5857 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 5858 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 5859 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 5860 } 5861 5862 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 5863 struct intel_link_m_n *m_n, 5864 struct intel_link_m_n *m2_n2) 5865 { 5866 struct drm_device *dev = crtc->base.dev; 5867 struct drm_i915_private *dev_priv = dev->dev_private; 5868 int pipe = crtc->pipe; 5869 enum transcoder transcoder = crtc->config->cpu_transcoder; 5870 5871 if (INTEL_INFO(dev)->gen >= 5) { 5872 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 5873 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 5874 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 5875 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 5876 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 5877 * for gen < 8) and if DRRS is supported (to make sure the 5878 * registers are not unnecessarily accessed). 5879 */ 5880 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 5881 crtc->config->has_drrs) { 5882 I915_WRITE(PIPE_DATA_M2(transcoder), 5883 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 5884 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 5885 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 5886 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 5887 } 5888 } else { 5889 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 5890 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 5891 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 5892 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 5893 } 5894 } 5895 5896 void intel_dp_set_m_n(struct intel_crtc *crtc) 5897 { 5898 if (crtc->config->has_pch_encoder) 5899 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 5900 else 5901 intel_cpu_transcoder_set_m_n(crtc, &crtc->config->dp_m_n, 5902 &crtc->config->dp_m2_n2); 5903 } 5904 5905 static void vlv_update_pll(struct intel_crtc *crtc, 5906 struct intel_crtc_state *pipe_config) 5907 { 5908 u32 dpll, dpll_md; 5909 5910 /* 5911 * Enable DPIO clock input. We should never disable the reference 5912 * clock for pipe B, since VGA hotplug / manual detection depends 5913 * on it. 5914 */ 5915 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 5916 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 5917 /* We should never disable this, set it here for state tracking */ 5918 if (crtc->pipe == PIPE_B) 5919 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5920 dpll |= DPLL_VCO_ENABLE; 5921 pipe_config->dpll_hw_state.dpll = dpll; 5922 5923 dpll_md = (pipe_config->pixel_multiplier - 1) 5924 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5925 pipe_config->dpll_hw_state.dpll_md = dpll_md; 5926 } 5927 5928 static void vlv_prepare_pll(struct intel_crtc *crtc, 5929 const struct intel_crtc_state *pipe_config) 5930 { 5931 struct drm_device *dev = crtc->base.dev; 5932 struct drm_i915_private *dev_priv = dev->dev_private; 5933 int pipe = crtc->pipe; 5934 u32 mdiv; 5935 u32 bestn, bestm1, bestm2, bestp1, bestp2; 5936 u32 coreclk, reg_val; 5937 5938 mutex_lock(&dev_priv->dpio_lock); 5939 5940 bestn = pipe_config->dpll.n; 5941 bestm1 = pipe_config->dpll.m1; 5942 bestm2 = pipe_config->dpll.m2; 5943 bestp1 = pipe_config->dpll.p1; 5944 bestp2 = pipe_config->dpll.p2; 5945 5946 /* See eDP HDMI DPIO driver vbios notes doc */ 5947 5948 /* PLL B needs special handling */ 5949 if (pipe == PIPE_B) 5950 vlv_pllb_recal_opamp(dev_priv, pipe); 5951 5952 /* Set up Tx target for periodic Rcomp update */ 5953 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 5954 5955 /* Disable target IRef on PLL */ 5956 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 5957 reg_val &= 0x00ffffff; 5958 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 5959 5960 /* Disable fast lock */ 5961 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 5962 5963 /* Set idtafcrecal before PLL is enabled */ 5964 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 5965 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 5966 mdiv |= ((bestn << DPIO_N_SHIFT)); 5967 mdiv |= (1 << DPIO_K_SHIFT); 5968 5969 /* 5970 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 5971 * but we don't support that). 5972 * Note: don't use the DAC post divider as it seems unstable. 5973 */ 5974 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 5975 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5976 5977 mdiv |= DPIO_ENABLE_CALIBRATION; 5978 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5979 5980 /* Set HBR and RBR LPF coefficients */ 5981 if (pipe_config->port_clock == 162000 || 5982 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 5983 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 5984 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5985 0x009f0003); 5986 else 5987 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5988 0x00d0000f); 5989 5990 if (pipe_config->has_dp_encoder) { 5991 /* Use SSC source */ 5992 if (pipe == PIPE_A) 5993 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5994 0x0df40000); 5995 else 5996 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 5997 0x0df70000); 5998 } else { /* HDMI or VGA */ 5999 /* Use bend source */ 6000 if (pipe == PIPE_A) 6001 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6002 0x0df70000); 6003 else 6004 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6005 0x0df40000); 6006 } 6007 6008 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6009 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6010 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 6011 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 6012 coreclk |= 0x01000000; 6013 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6014 6015 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6016 mutex_unlock(&dev_priv->dpio_lock); 6017 } 6018 6019 static void chv_update_pll(struct intel_crtc *crtc, 6020 struct intel_crtc_state *pipe_config) 6021 { 6022 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 6023 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 6024 DPLL_VCO_ENABLE; 6025 if (crtc->pipe != PIPE_A) 6026 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6027 6028 pipe_config->dpll_hw_state.dpll_md = 6029 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6030 } 6031 6032 static void chv_prepare_pll(struct intel_crtc *crtc, 6033 const struct intel_crtc_state *pipe_config) 6034 { 6035 struct drm_device *dev = crtc->base.dev; 6036 struct drm_i915_private *dev_priv = dev->dev_private; 6037 int pipe = crtc->pipe; 6038 int dpll_reg = DPLL(crtc->pipe); 6039 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6040 u32 loopfilter, intcoeff; 6041 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6042 int refclk; 6043 6044 bestn = pipe_config->dpll.n; 6045 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6046 bestm1 = pipe_config->dpll.m1; 6047 bestm2 = pipe_config->dpll.m2 >> 22; 6048 bestp1 = pipe_config->dpll.p1; 6049 bestp2 = pipe_config->dpll.p2; 6050 6051 /* 6052 * Enable Refclk and SSC 6053 */ 6054 I915_WRITE(dpll_reg, 6055 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6056 6057 mutex_lock(&dev_priv->dpio_lock); 6058 6059 /* p1 and p2 divider */ 6060 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6061 5 << DPIO_CHV_S1_DIV_SHIFT | 6062 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6063 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6064 1 << DPIO_CHV_K_DIV_SHIFT); 6065 6066 /* Feedback post-divider - m2 */ 6067 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6068 6069 /* Feedback refclk divider - n and m1 */ 6070 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6071 DPIO_CHV_M1_DIV_BY_2 | 6072 1 << DPIO_CHV_N_DIV_SHIFT); 6073 6074 /* M2 fraction division */ 6075 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6076 6077 /* M2 fraction division enable */ 6078 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), 6079 DPIO_CHV_FRAC_DIV_EN | 6080 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 6081 6082 /* Loop filter */ 6083 refclk = i9xx_get_refclk(crtc, 0); 6084 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 6085 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 6086 if (refclk == 100000) 6087 intcoeff = 11; 6088 else if (refclk == 38400) 6089 intcoeff = 10; 6090 else 6091 intcoeff = 9; 6092 loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; 6093 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6094 6095 /* AFC Recal */ 6096 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6097 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6098 DPIO_AFC_RECAL); 6099 6100 mutex_unlock(&dev_priv->dpio_lock); 6101 } 6102 6103 /** 6104 * vlv_force_pll_on - forcibly enable just the PLL 6105 * @dev_priv: i915 private structure 6106 * @pipe: pipe PLL to enable 6107 * @dpll: PLL configuration 6108 * 6109 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6110 * in cases where we need the PLL enabled even when @pipe is not going to 6111 * be enabled. 6112 */ 6113 void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 6114 const struct dpll *dpll) 6115 { 6116 struct intel_crtc *crtc = 6117 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 6118 struct intel_crtc_state pipe_config = { 6119 .pixel_multiplier = 1, 6120 .dpll = *dpll, 6121 }; 6122 6123 if (IS_CHERRYVIEW(dev)) { 6124 chv_update_pll(crtc, &pipe_config); 6125 chv_prepare_pll(crtc, &pipe_config); 6126 chv_enable_pll(crtc, &pipe_config); 6127 } else { 6128 vlv_update_pll(crtc, &pipe_config); 6129 vlv_prepare_pll(crtc, &pipe_config); 6130 vlv_enable_pll(crtc, &pipe_config); 6131 } 6132 } 6133 6134 /** 6135 * vlv_force_pll_off - forcibly disable just the PLL 6136 * @dev_priv: i915 private structure 6137 * @pipe: pipe PLL to disable 6138 * 6139 * Disable the PLL for @pipe. To be used in cases where we need 6140 * the PLL enabled even when @pipe is not going to be enabled. 6141 */ 6142 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 6143 { 6144 if (IS_CHERRYVIEW(dev)) 6145 chv_disable_pll(to_i915(dev), pipe); 6146 else 6147 vlv_disable_pll(to_i915(dev), pipe); 6148 } 6149 6150 static void i9xx_update_pll(struct intel_crtc *crtc, 6151 struct intel_crtc_state *crtc_state, 6152 intel_clock_t *reduced_clock, 6153 int num_connectors) 6154 { 6155 struct drm_device *dev = crtc->base.dev; 6156 struct drm_i915_private *dev_priv = dev->dev_private; 6157 u32 dpll; 6158 bool is_sdvo; 6159 struct dpll *clock = &crtc_state->dpll; 6160 6161 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6162 6163 is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) || 6164 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI); 6165 6166 dpll = DPLL_VGA_MODE_DIS; 6167 6168 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) 6169 dpll |= DPLLB_MODE_LVDS; 6170 else 6171 dpll |= DPLLB_MODE_DAC_SERIAL; 6172 6173 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6174 dpll |= (crtc_state->pixel_multiplier - 1) 6175 << SDVO_MULTIPLIER_SHIFT_HIRES; 6176 } 6177 6178 if (is_sdvo) 6179 dpll |= DPLL_SDVO_HIGH_SPEED; 6180 6181 if (crtc_state->has_dp_encoder) 6182 dpll |= DPLL_SDVO_HIGH_SPEED; 6183 6184 /* compute bitmask from p1 value */ 6185 if (IS_PINEVIEW(dev)) 6186 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6187 else { 6188 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6189 if (IS_G4X(dev) && reduced_clock) 6190 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6191 } 6192 switch (clock->p2) { 6193 case 5: 6194 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6195 break; 6196 case 7: 6197 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6198 break; 6199 case 10: 6200 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6201 break; 6202 case 14: 6203 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6204 break; 6205 } 6206 if (INTEL_INFO(dev)->gen >= 4) 6207 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6208 6209 if (crtc_state->sdvo_tv_clock) 6210 dpll |= PLL_REF_INPUT_TVCLKINBC; 6211 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6212 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6213 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6214 else 6215 dpll |= PLL_REF_INPUT_DREFCLK; 6216 6217 dpll |= DPLL_VCO_ENABLE; 6218 crtc_state->dpll_hw_state.dpll = dpll; 6219 6220 if (INTEL_INFO(dev)->gen >= 4) { 6221 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6222 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6223 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6224 } 6225 } 6226 6227 static void i8xx_update_pll(struct intel_crtc *crtc, 6228 struct intel_crtc_state *crtc_state, 6229 intel_clock_t *reduced_clock, 6230 int num_connectors) 6231 { 6232 struct drm_device *dev = crtc->base.dev; 6233 struct drm_i915_private *dev_priv = dev->dev_private; 6234 u32 dpll; 6235 struct dpll *clock = &crtc_state->dpll; 6236 6237 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6238 6239 dpll = DPLL_VGA_MODE_DIS; 6240 6241 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { 6242 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6243 } else { 6244 if (clock->p1 == 2) 6245 dpll |= PLL_P1_DIVIDE_BY_TWO; 6246 else 6247 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6248 if (clock->p2 == 4) 6249 dpll |= PLL_P2_DIVIDE_BY_4; 6250 } 6251 6252 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) 6253 dpll |= DPLL_DVO_2X_MODE; 6254 6255 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && 6256 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6257 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6258 else 6259 dpll |= PLL_REF_INPUT_DREFCLK; 6260 6261 dpll |= DPLL_VCO_ENABLE; 6262 crtc_state->dpll_hw_state.dpll = dpll; 6263 } 6264 6265 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6266 { 6267 struct drm_device *dev = intel_crtc->base.dev; 6268 struct drm_i915_private *dev_priv = dev->dev_private; 6269 enum i915_pipe pipe = intel_crtc->pipe; 6270 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6271 struct drm_display_mode *adjusted_mode = 6272 &intel_crtc->config->base.adjusted_mode; 6273 uint32_t crtc_vtotal, crtc_vblank_end; 6274 int vsyncshift = 0; 6275 6276 /* We need to be careful not to changed the adjusted mode, for otherwise 6277 * the hw state checker will get angry at the mismatch. */ 6278 crtc_vtotal = adjusted_mode->crtc_vtotal; 6279 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6280 6281 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6282 /* the chip adds 2 halflines automatically */ 6283 crtc_vtotal -= 1; 6284 crtc_vblank_end -= 1; 6285 6286 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6287 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6288 else 6289 vsyncshift = adjusted_mode->crtc_hsync_start - 6290 adjusted_mode->crtc_htotal / 2; 6291 if (vsyncshift < 0) 6292 vsyncshift += adjusted_mode->crtc_htotal; 6293 } 6294 6295 if (INTEL_INFO(dev)->gen > 3) 6296 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6297 6298 I915_WRITE(HTOTAL(cpu_transcoder), 6299 (adjusted_mode->crtc_hdisplay - 1) | 6300 ((adjusted_mode->crtc_htotal - 1) << 16)); 6301 I915_WRITE(HBLANK(cpu_transcoder), 6302 (adjusted_mode->crtc_hblank_start - 1) | 6303 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6304 I915_WRITE(HSYNC(cpu_transcoder), 6305 (adjusted_mode->crtc_hsync_start - 1) | 6306 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6307 6308 I915_WRITE(VTOTAL(cpu_transcoder), 6309 (adjusted_mode->crtc_vdisplay - 1) | 6310 ((crtc_vtotal - 1) << 16)); 6311 I915_WRITE(VBLANK(cpu_transcoder), 6312 (adjusted_mode->crtc_vblank_start - 1) | 6313 ((crtc_vblank_end - 1) << 16)); 6314 I915_WRITE(VSYNC(cpu_transcoder), 6315 (adjusted_mode->crtc_vsync_start - 1) | 6316 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6317 6318 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6319 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6320 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6321 * bits. */ 6322 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 6323 (pipe == PIPE_B || pipe == PIPE_C)) 6324 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6325 6326 /* pipesrc controls the size that is scaled from, which should 6327 * always be the user's requested size. 6328 */ 6329 I915_WRITE(PIPESRC(pipe), 6330 ((intel_crtc->config->pipe_src_w - 1) << 16) | 6331 (intel_crtc->config->pipe_src_h - 1)); 6332 } 6333 6334 static void intel_get_pipe_timings(struct intel_crtc *crtc, 6335 struct intel_crtc_state *pipe_config) 6336 { 6337 struct drm_device *dev = crtc->base.dev; 6338 struct drm_i915_private *dev_priv = dev->dev_private; 6339 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6340 uint32_t tmp; 6341 6342 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6343 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6344 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6345 tmp = I915_READ(HBLANK(cpu_transcoder)); 6346 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6347 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6348 tmp = I915_READ(HSYNC(cpu_transcoder)); 6349 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6350 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6351 6352 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6353 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6354 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6355 tmp = I915_READ(VBLANK(cpu_transcoder)); 6356 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6357 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6358 tmp = I915_READ(VSYNC(cpu_transcoder)); 6359 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6360 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6361 6362 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6363 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6364 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 6365 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 6366 } 6367 6368 tmp = I915_READ(PIPESRC(crtc->pipe)); 6369 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6370 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6371 6372 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 6373 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 6374 } 6375 6376 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 6377 struct intel_crtc_state *pipe_config) 6378 { 6379 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 6380 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 6381 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 6382 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 6383 6384 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 6385 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 6386 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 6387 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 6388 6389 mode->flags = pipe_config->base.adjusted_mode.flags; 6390 6391 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 6392 mode->flags |= pipe_config->base.adjusted_mode.flags; 6393 } 6394 6395 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 6396 { 6397 struct drm_device *dev = intel_crtc->base.dev; 6398 struct drm_i915_private *dev_priv = dev->dev_private; 6399 uint32_t pipeconf; 6400 6401 pipeconf = 0; 6402 6403 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 6404 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 6405 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 6406 6407 if (intel_crtc->config->double_wide) 6408 pipeconf |= PIPECONF_DOUBLE_WIDE; 6409 6410 /* only g4x and later have fancy bpc/dither controls */ 6411 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6412 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 6413 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 6414 pipeconf |= PIPECONF_DITHER_EN | 6415 PIPECONF_DITHER_TYPE_SP; 6416 6417 switch (intel_crtc->config->pipe_bpp) { 6418 case 18: 6419 pipeconf |= PIPECONF_6BPC; 6420 break; 6421 case 24: 6422 pipeconf |= PIPECONF_8BPC; 6423 break; 6424 case 30: 6425 pipeconf |= PIPECONF_10BPC; 6426 break; 6427 default: 6428 /* Case prevented by intel_choose_pipe_bpp_dither. */ 6429 BUG(); 6430 } 6431 } 6432 6433 if (HAS_PIPE_CXSR(dev)) { 6434 if (intel_crtc->lowfreq_avail) { 6435 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 6436 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 6437 } else { 6438 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 6439 } 6440 } 6441 6442 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6443 if (INTEL_INFO(dev)->gen < 4 || 6444 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6445 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6446 else 6447 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6448 } else 6449 pipeconf |= PIPECONF_PROGRESSIVE; 6450 6451 if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) 6452 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 6453 6454 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 6455 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6456 } 6457 6458 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 6459 struct intel_crtc_state *crtc_state) 6460 { 6461 struct drm_device *dev = crtc->base.dev; 6462 struct drm_i915_private *dev_priv = dev->dev_private; 6463 int refclk, num_connectors = 0; 6464 intel_clock_t clock, reduced_clock; 6465 bool ok, has_reduced_clock = false; 6466 bool is_lvds = false, is_dsi = false; 6467 struct intel_encoder *encoder; 6468 const intel_limit_t *limit; 6469 6470 for_each_intel_encoder(dev, encoder) { 6471 if (encoder->new_crtc != crtc) 6472 continue; 6473 6474 switch (encoder->type) { 6475 case INTEL_OUTPUT_LVDS: 6476 is_lvds = true; 6477 break; 6478 case INTEL_OUTPUT_DSI: 6479 is_dsi = true; 6480 break; 6481 default: 6482 break; 6483 } 6484 6485 num_connectors++; 6486 } 6487 6488 if (is_dsi) 6489 return 0; 6490 6491 if (!crtc_state->clock_set) { 6492 refclk = i9xx_get_refclk(crtc, num_connectors); 6493 6494 /* 6495 * Returns a set of divisors for the desired target clock with 6496 * the given refclk, or FALSE. The returned values represent 6497 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 6498 * 2) / p1 / p2. 6499 */ 6500 limit = intel_limit(crtc, refclk); 6501 ok = dev_priv->display.find_dpll(limit, crtc, 6502 crtc_state->port_clock, 6503 refclk, NULL, &clock); 6504 if (!ok) { 6505 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6506 return -EINVAL; 6507 } 6508 6509 if (is_lvds && dev_priv->lvds_downclock_avail) { 6510 /* 6511 * Ensure we match the reduced clock's P to the target 6512 * clock. If the clocks don't match, we can't switch 6513 * the display clock by using the FP0/FP1. In such case 6514 * we will disable the LVDS downclock feature. 6515 */ 6516 has_reduced_clock = 6517 dev_priv->display.find_dpll(limit, crtc, 6518 dev_priv->lvds_downclock, 6519 refclk, &clock, 6520 &reduced_clock); 6521 } 6522 /* Compat-code for transition, will disappear. */ 6523 crtc_state->dpll.n = clock.n; 6524 crtc_state->dpll.m1 = clock.m1; 6525 crtc_state->dpll.m2 = clock.m2; 6526 crtc_state->dpll.p1 = clock.p1; 6527 crtc_state->dpll.p2 = clock.p2; 6528 } 6529 6530 if (IS_GEN2(dev)) { 6531 i8xx_update_pll(crtc, crtc_state, 6532 has_reduced_clock ? &reduced_clock : NULL, 6533 num_connectors); 6534 } else if (IS_CHERRYVIEW(dev)) { 6535 chv_update_pll(crtc, crtc_state); 6536 } else if (IS_VALLEYVIEW(dev)) { 6537 vlv_update_pll(crtc, crtc_state); 6538 } else { 6539 i9xx_update_pll(crtc, crtc_state, 6540 has_reduced_clock ? &reduced_clock : NULL, 6541 num_connectors); 6542 } 6543 6544 return 0; 6545 } 6546 6547 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 6548 struct intel_crtc_state *pipe_config) 6549 { 6550 struct drm_device *dev = crtc->base.dev; 6551 struct drm_i915_private *dev_priv = dev->dev_private; 6552 uint32_t tmp; 6553 6554 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 6555 return; 6556 6557 tmp = I915_READ(PFIT_CONTROL); 6558 if (!(tmp & PFIT_ENABLE)) 6559 return; 6560 6561 /* Check whether the pfit is attached to our pipe. */ 6562 if (INTEL_INFO(dev)->gen < 4) { 6563 if (crtc->pipe != PIPE_B) 6564 return; 6565 } else { 6566 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 6567 return; 6568 } 6569 6570 pipe_config->gmch_pfit.control = tmp; 6571 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 6572 if (INTEL_INFO(dev)->gen < 5) 6573 pipe_config->gmch_pfit.lvds_border_bits = 6574 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 6575 } 6576 6577 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 6578 struct intel_crtc_state *pipe_config) 6579 { 6580 struct drm_device *dev = crtc->base.dev; 6581 struct drm_i915_private *dev_priv = dev->dev_private; 6582 int pipe = pipe_config->cpu_transcoder; 6583 intel_clock_t clock; 6584 u32 mdiv; 6585 int refclk = 100000; 6586 6587 /* In case of MIPI DPLL will not even be used */ 6588 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 6589 return; 6590 6591 mutex_lock(&dev_priv->dpio_lock); 6592 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 6593 mutex_unlock(&dev_priv->dpio_lock); 6594 6595 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 6596 clock.m2 = mdiv & DPIO_M2DIV_MASK; 6597 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 6598 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 6599 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 6600 6601 vlv_clock(refclk, &clock); 6602 6603 /* clock.dot is the fast clock */ 6604 pipe_config->port_clock = clock.dot / 5; 6605 } 6606 6607 static void 6608 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 6609 struct intel_initial_plane_config *plane_config) 6610 { 6611 struct drm_device *dev = crtc->base.dev; 6612 struct drm_i915_private *dev_priv = dev->dev_private; 6613 u32 val, base, offset; 6614 int pipe = crtc->pipe, plane = crtc->plane; 6615 int fourcc, pixel_format; 6616 int aligned_height; 6617 struct drm_framebuffer *fb; 6618 struct intel_framebuffer *intel_fb; 6619 6620 val = I915_READ(DSPCNTR(plane)); 6621 if (!(val & DISPLAY_PLANE_ENABLE)) 6622 return; 6623 6624 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6625 if (!intel_fb) { 6626 DRM_DEBUG_KMS("failed to alloc fb\n"); 6627 return; 6628 } 6629 6630 fb = &intel_fb->base; 6631 6632 if (INTEL_INFO(dev)->gen >= 4) 6633 if (val & DISPPLANE_TILED) 6634 plane_config->tiling = I915_TILING_X; 6635 6636 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 6637 fourcc = i9xx_format_to_fourcc(pixel_format); 6638 fb->pixel_format = fourcc; 6639 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 6640 6641 if (INTEL_INFO(dev)->gen >= 4) { 6642 if (plane_config->tiling) 6643 offset = I915_READ(DSPTILEOFF(plane)); 6644 else 6645 offset = I915_READ(DSPLINOFF(plane)); 6646 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 6647 } else { 6648 base = I915_READ(DSPADDR(plane)); 6649 } 6650 plane_config->base = base; 6651 6652 val = I915_READ(PIPESRC(pipe)); 6653 fb->width = ((val >> 16) & 0xfff) + 1; 6654 fb->height = ((val >> 0) & 0xfff) + 1; 6655 6656 val = I915_READ(DSPSTRIDE(pipe)); 6657 fb->pitches[0] = val & 0xffffffc0; 6658 6659 aligned_height = intel_fb_align_height(dev, fb->height, 6660 plane_config->tiling); 6661 6662 plane_config->size = fb->pitches[0] * aligned_height; 6663 6664 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6665 pipe_name(pipe), plane, fb->width, fb->height, 6666 fb->bits_per_pixel, base, fb->pitches[0], 6667 plane_config->size); 6668 6669 crtc->base.primary->fb = fb; 6670 } 6671 6672 static void chv_crtc_clock_get(struct intel_crtc *crtc, 6673 struct intel_crtc_state *pipe_config) 6674 { 6675 struct drm_device *dev = crtc->base.dev; 6676 struct drm_i915_private *dev_priv = dev->dev_private; 6677 int pipe = pipe_config->cpu_transcoder; 6678 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6679 intel_clock_t clock; 6680 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 6681 int refclk = 100000; 6682 6683 mutex_lock(&dev_priv->dpio_lock); 6684 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 6685 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 6686 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 6687 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 6688 mutex_unlock(&dev_priv->dpio_lock); 6689 6690 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 6691 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 6692 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 6693 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 6694 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 6695 6696 chv_clock(refclk, &clock); 6697 6698 /* clock.dot is the fast clock */ 6699 pipe_config->port_clock = clock.dot / 5; 6700 } 6701 6702 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 6703 struct intel_crtc_state *pipe_config) 6704 { 6705 struct drm_device *dev = crtc->base.dev; 6706 struct drm_i915_private *dev_priv = dev->dev_private; 6707 uint32_t tmp; 6708 6709 if (!intel_display_power_is_enabled(dev_priv, 6710 POWER_DOMAIN_PIPE(crtc->pipe))) 6711 return false; 6712 6713 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 6714 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 6715 6716 tmp = I915_READ(PIPECONF(crtc->pipe)); 6717 if (!(tmp & PIPECONF_ENABLE)) 6718 return false; 6719 6720 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6721 switch (tmp & PIPECONF_BPC_MASK) { 6722 case PIPECONF_6BPC: 6723 pipe_config->pipe_bpp = 18; 6724 break; 6725 case PIPECONF_8BPC: 6726 pipe_config->pipe_bpp = 24; 6727 break; 6728 case PIPECONF_10BPC: 6729 pipe_config->pipe_bpp = 30; 6730 break; 6731 default: 6732 break; 6733 } 6734 } 6735 6736 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 6737 pipe_config->limited_color_range = true; 6738 6739 if (INTEL_INFO(dev)->gen < 4) 6740 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 6741 6742 intel_get_pipe_timings(crtc, pipe_config); 6743 6744 i9xx_get_pfit_config(crtc, pipe_config); 6745 6746 if (INTEL_INFO(dev)->gen >= 4) { 6747 tmp = I915_READ(DPLL_MD(crtc->pipe)); 6748 pipe_config->pixel_multiplier = 6749 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 6750 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 6751 pipe_config->dpll_hw_state.dpll_md = tmp; 6752 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6753 tmp = I915_READ(DPLL(crtc->pipe)); 6754 pipe_config->pixel_multiplier = 6755 ((tmp & SDVO_MULTIPLIER_MASK) 6756 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 6757 } else { 6758 /* Note that on i915G/GM the pixel multiplier is in the sdvo 6759 * port and will be fixed up in the encoder->get_config 6760 * function. */ 6761 pipe_config->pixel_multiplier = 1; 6762 } 6763 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 6764 if (!IS_VALLEYVIEW(dev)) { 6765 /* 6766 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 6767 * on 830. Filter it out here so that we don't 6768 * report errors due to that. 6769 */ 6770 if (IS_I830(dev)) 6771 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 6772 6773 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 6774 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 6775 } else { 6776 /* Mask out read-only status bits. */ 6777 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 6778 DPLL_PORTC_READY_MASK | 6779 DPLL_PORTB_READY_MASK); 6780 } 6781 6782 if (IS_CHERRYVIEW(dev)) 6783 chv_crtc_clock_get(crtc, pipe_config); 6784 else if (IS_VALLEYVIEW(dev)) 6785 vlv_crtc_clock_get(crtc, pipe_config); 6786 else 6787 i9xx_crtc_clock_get(crtc, pipe_config); 6788 6789 return true; 6790 } 6791 6792 static void ironlake_init_pch_refclk(struct drm_device *dev) 6793 { 6794 struct drm_i915_private *dev_priv = dev->dev_private; 6795 struct intel_encoder *encoder; 6796 u32 val, final; 6797 bool has_lvds = false; 6798 bool has_cpu_edp = false; 6799 bool has_panel = false; 6800 bool has_ck505 = false; 6801 bool can_ssc = false; 6802 6803 /* We need to take the global config into account */ 6804 for_each_intel_encoder(dev, encoder) { 6805 switch (encoder->type) { 6806 case INTEL_OUTPUT_LVDS: 6807 has_panel = true; 6808 has_lvds = true; 6809 break; 6810 case INTEL_OUTPUT_EDP: 6811 has_panel = true; 6812 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 6813 has_cpu_edp = true; 6814 break; 6815 default: 6816 break; 6817 } 6818 } 6819 6820 if (HAS_PCH_IBX(dev)) { 6821 has_ck505 = dev_priv->vbt.display_clock_mode; 6822 can_ssc = has_ck505; 6823 } else { 6824 has_ck505 = false; 6825 can_ssc = true; 6826 } 6827 6828 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 6829 has_panel, has_lvds, has_ck505); 6830 6831 /* Ironlake: try to setup display ref clock before DPLL 6832 * enabling. This is only under driver's control after 6833 * PCH B stepping, previous chipset stepping should be 6834 * ignoring this setting. 6835 */ 6836 val = I915_READ(PCH_DREF_CONTROL); 6837 6838 /* As we must carefully and slowly disable/enable each source in turn, 6839 * compute the final state we want first and check if we need to 6840 * make any changes at all. 6841 */ 6842 final = val; 6843 final &= ~DREF_NONSPREAD_SOURCE_MASK; 6844 if (has_ck505) 6845 final |= DREF_NONSPREAD_CK505_ENABLE; 6846 else 6847 final |= DREF_NONSPREAD_SOURCE_ENABLE; 6848 6849 final &= ~DREF_SSC_SOURCE_MASK; 6850 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6851 final &= ~DREF_SSC1_ENABLE; 6852 6853 if (has_panel) { 6854 final |= DREF_SSC_SOURCE_ENABLE; 6855 6856 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6857 final |= DREF_SSC1_ENABLE; 6858 6859 if (has_cpu_edp) { 6860 if (intel_panel_use_ssc(dev_priv) && can_ssc) 6861 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6862 else 6863 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6864 } else 6865 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6866 } else { 6867 final |= DREF_SSC_SOURCE_DISABLE; 6868 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6869 } 6870 6871 if (final == val) 6872 return; 6873 6874 /* Always enable nonspread source */ 6875 val &= ~DREF_NONSPREAD_SOURCE_MASK; 6876 6877 if (has_ck505) 6878 val |= DREF_NONSPREAD_CK505_ENABLE; 6879 else 6880 val |= DREF_NONSPREAD_SOURCE_ENABLE; 6881 6882 if (has_panel) { 6883 val &= ~DREF_SSC_SOURCE_MASK; 6884 val |= DREF_SSC_SOURCE_ENABLE; 6885 6886 /* SSC must be turned on before enabling the CPU output */ 6887 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6888 DRM_DEBUG_KMS("Using SSC on panel\n"); 6889 val |= DREF_SSC1_ENABLE; 6890 } else 6891 val &= ~DREF_SSC1_ENABLE; 6892 6893 /* Get SSC going before enabling the outputs */ 6894 I915_WRITE(PCH_DREF_CONTROL, val); 6895 POSTING_READ(PCH_DREF_CONTROL); 6896 udelay(200); 6897 6898 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6899 6900 /* Enable CPU source on CPU attached eDP */ 6901 if (has_cpu_edp) { 6902 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 6903 DRM_DEBUG_KMS("Using SSC on eDP\n"); 6904 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 6905 } else 6906 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 6907 } else 6908 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6909 6910 I915_WRITE(PCH_DREF_CONTROL, val); 6911 POSTING_READ(PCH_DREF_CONTROL); 6912 udelay(200); 6913 } else { 6914 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 6915 6916 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 6917 6918 /* Turn off CPU output */ 6919 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 6920 6921 I915_WRITE(PCH_DREF_CONTROL, val); 6922 POSTING_READ(PCH_DREF_CONTROL); 6923 udelay(200); 6924 6925 /* Turn off the SSC source */ 6926 val &= ~DREF_SSC_SOURCE_MASK; 6927 val |= DREF_SSC_SOURCE_DISABLE; 6928 6929 /* Turn off SSC1 */ 6930 val &= ~DREF_SSC1_ENABLE; 6931 6932 I915_WRITE(PCH_DREF_CONTROL, val); 6933 POSTING_READ(PCH_DREF_CONTROL); 6934 udelay(200); 6935 } 6936 6937 BUG_ON(val != final); 6938 } 6939 6940 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 6941 { 6942 uint32_t tmp; 6943 6944 tmp = I915_READ(SOUTH_CHICKEN2); 6945 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 6946 I915_WRITE(SOUTH_CHICKEN2, tmp); 6947 6948 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 6949 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 6950 DRM_ERROR("FDI mPHY reset assert timeout\n"); 6951 6952 tmp = I915_READ(SOUTH_CHICKEN2); 6953 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 6954 I915_WRITE(SOUTH_CHICKEN2, tmp); 6955 6956 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 6957 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 6958 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 6959 } 6960 6961 /* WaMPhyProgramming:hsw */ 6962 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 6963 { 6964 uint32_t tmp; 6965 6966 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 6967 tmp &= ~(0xFF << 24); 6968 tmp |= (0x12 << 24); 6969 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 6970 6971 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 6972 tmp |= (1 << 11); 6973 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 6974 6975 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 6976 tmp |= (1 << 11); 6977 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 6978 6979 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 6980 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6981 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 6982 6983 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 6984 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 6985 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 6986 6987 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 6988 tmp &= ~(7 << 13); 6989 tmp |= (5 << 13); 6990 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 6991 6992 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 6993 tmp &= ~(7 << 13); 6994 tmp |= (5 << 13); 6995 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 6996 6997 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 6998 tmp &= ~0xFF; 6999 tmp |= 0x1C; 7000 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7001 7002 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7003 tmp &= ~0xFF; 7004 tmp |= 0x1C; 7005 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7006 7007 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7008 tmp &= ~(0xFF << 16); 7009 tmp |= (0x1C << 16); 7010 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7011 7012 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7013 tmp &= ~(0xFF << 16); 7014 tmp |= (0x1C << 16); 7015 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7016 7017 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7018 tmp |= (1 << 27); 7019 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7020 7021 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7022 tmp |= (1 << 27); 7023 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7024 7025 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7026 tmp &= ~(0xF << 28); 7027 tmp |= (4 << 28); 7028 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7029 7030 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7031 tmp &= ~(0xF << 28); 7032 tmp |= (4 << 28); 7033 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7034 } 7035 7036 /* Implements 3 different sequences from BSpec chapter "Display iCLK 7037 * Programming" based on the parameters passed: 7038 * - Sequence to enable CLKOUT_DP 7039 * - Sequence to enable CLKOUT_DP without spread 7040 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7041 */ 7042 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 7043 bool with_fdi) 7044 { 7045 struct drm_i915_private *dev_priv = dev->dev_private; 7046 uint32_t reg, tmp; 7047 7048 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7049 with_spread = true; 7050 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 7051 with_fdi, "LP PCH doesn't have FDI\n")) 7052 with_fdi = false; 7053 7054 mutex_lock(&dev_priv->dpio_lock); 7055 7056 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7057 tmp &= ~SBI_SSCCTL_DISABLE; 7058 tmp |= SBI_SSCCTL_PATHALT; 7059 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7060 7061 udelay(24); 7062 7063 if (with_spread) { 7064 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7065 tmp &= ~SBI_SSCCTL_PATHALT; 7066 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7067 7068 if (with_fdi) { 7069 lpt_reset_fdi_mphy(dev_priv); 7070 lpt_program_fdi_mphy(dev_priv); 7071 } 7072 } 7073 7074 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 7075 SBI_GEN0 : SBI_DBUFF0; 7076 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7077 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7078 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7079 7080 mutex_unlock(&dev_priv->dpio_lock); 7081 } 7082 7083 /* Sequence to disable CLKOUT_DP */ 7084 static void lpt_disable_clkout_dp(struct drm_device *dev) 7085 { 7086 struct drm_i915_private *dev_priv = dev->dev_private; 7087 uint32_t reg, tmp; 7088 7089 mutex_lock(&dev_priv->dpio_lock); 7090 7091 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 7092 SBI_GEN0 : SBI_DBUFF0; 7093 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7094 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7095 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7096 7097 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7098 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7099 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7100 tmp |= SBI_SSCCTL_PATHALT; 7101 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7102 udelay(32); 7103 } 7104 tmp |= SBI_SSCCTL_DISABLE; 7105 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7106 } 7107 7108 mutex_unlock(&dev_priv->dpio_lock); 7109 } 7110 7111 static void lpt_init_pch_refclk(struct drm_device *dev) 7112 { 7113 struct intel_encoder *encoder; 7114 bool has_vga = false; 7115 7116 for_each_intel_encoder(dev, encoder) { 7117 switch (encoder->type) { 7118 case INTEL_OUTPUT_ANALOG: 7119 has_vga = true; 7120 break; 7121 default: 7122 break; 7123 } 7124 } 7125 7126 if (has_vga) 7127 lpt_enable_clkout_dp(dev, true, true); 7128 else 7129 lpt_disable_clkout_dp(dev); 7130 } 7131 7132 /* 7133 * Initialize reference clocks when the driver loads 7134 */ 7135 void intel_init_pch_refclk(struct drm_device *dev) 7136 { 7137 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 7138 ironlake_init_pch_refclk(dev); 7139 else if (HAS_PCH_LPT(dev)) 7140 lpt_init_pch_refclk(dev); 7141 } 7142 7143 static int ironlake_get_refclk(struct drm_crtc *crtc) 7144 { 7145 struct drm_device *dev = crtc->dev; 7146 struct drm_i915_private *dev_priv = dev->dev_private; 7147 struct intel_encoder *encoder; 7148 int num_connectors = 0; 7149 bool is_lvds = false; 7150 7151 for_each_intel_encoder(dev, encoder) { 7152 if (encoder->new_crtc != to_intel_crtc(crtc)) 7153 continue; 7154 7155 switch (encoder->type) { 7156 case INTEL_OUTPUT_LVDS: 7157 is_lvds = true; 7158 break; 7159 default: 7160 break; 7161 } 7162 num_connectors++; 7163 } 7164 7165 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 7166 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 7167 dev_priv->vbt.lvds_ssc_freq); 7168 return dev_priv->vbt.lvds_ssc_freq; 7169 } 7170 7171 return 120000; 7172 } 7173 7174 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 7175 { 7176 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 7177 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7178 int pipe = intel_crtc->pipe; 7179 uint32_t val; 7180 7181 val = 0; 7182 7183 switch (intel_crtc->config->pipe_bpp) { 7184 case 18: 7185 val |= PIPECONF_6BPC; 7186 break; 7187 case 24: 7188 val |= PIPECONF_8BPC; 7189 break; 7190 case 30: 7191 val |= PIPECONF_10BPC; 7192 break; 7193 case 36: 7194 val |= PIPECONF_12BPC; 7195 break; 7196 default: 7197 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7198 BUG(); 7199 } 7200 7201 if (intel_crtc->config->dither) 7202 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 7203 7204 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 7205 val |= PIPECONF_INTERLACED_ILK; 7206 else 7207 val |= PIPECONF_PROGRESSIVE; 7208 7209 if (intel_crtc->config->limited_color_range) 7210 val |= PIPECONF_COLOR_RANGE_SELECT; 7211 7212 I915_WRITE(PIPECONF(pipe), val); 7213 POSTING_READ(PIPECONF(pipe)); 7214 } 7215 7216 /* 7217 * Set up the pipe CSC unit. 7218 * 7219 * Currently only full range RGB to limited range RGB conversion 7220 * is supported, but eventually this should handle various 7221 * RGB<->YCbCr scenarios as well. 7222 */ 7223 static void intel_set_pipe_csc(struct drm_crtc *crtc) 7224 { 7225 struct drm_device *dev = crtc->dev; 7226 struct drm_i915_private *dev_priv = dev->dev_private; 7227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7228 int pipe = intel_crtc->pipe; 7229 uint16_t coeff = 0x7800; /* 1.0 */ 7230 7231 /* 7232 * TODO: Check what kind of values actually come out of the pipe 7233 * with these coeff/postoff values and adjust to get the best 7234 * accuracy. Perhaps we even need to take the bpc value into 7235 * consideration. 7236 */ 7237 7238 if (intel_crtc->config->limited_color_range) 7239 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 7240 7241 /* 7242 * GY/GU and RY/RU should be the other way around according 7243 * to BSpec, but reality doesn't agree. Just set them up in 7244 * a way that results in the correct picture. 7245 */ 7246 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 7247 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 7248 7249 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 7250 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 7251 7252 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 7253 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 7254 7255 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 7256 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 7257 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 7258 7259 if (INTEL_INFO(dev)->gen > 6) { 7260 uint16_t postoff = 0; 7261 7262 if (intel_crtc->config->limited_color_range) 7263 postoff = (16 * (1 << 12) / 255) & 0x1fff; 7264 7265 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 7266 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 7267 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 7268 7269 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 7270 } else { 7271 uint32_t mode = CSC_MODE_YUV_TO_RGB; 7272 7273 if (intel_crtc->config->limited_color_range) 7274 mode |= CSC_BLACK_SCREEN_OFFSET; 7275 7276 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 7277 } 7278 } 7279 7280 static void haswell_set_pipeconf(struct drm_crtc *crtc) 7281 { 7282 struct drm_device *dev = crtc->dev; 7283 struct drm_i915_private *dev_priv = dev->dev_private; 7284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7285 enum i915_pipe pipe = intel_crtc->pipe; 7286 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7287 uint32_t val; 7288 7289 val = 0; 7290 7291 if (IS_HASWELL(dev) && intel_crtc->config->dither) 7292 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 7293 7294 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 7295 val |= PIPECONF_INTERLACED_ILK; 7296 else 7297 val |= PIPECONF_PROGRESSIVE; 7298 7299 I915_WRITE(PIPECONF(cpu_transcoder), val); 7300 POSTING_READ(PIPECONF(cpu_transcoder)); 7301 7302 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 7303 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 7304 7305 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 7306 val = 0; 7307 7308 switch (intel_crtc->config->pipe_bpp) { 7309 case 18: 7310 val |= PIPEMISC_DITHER_6_BPC; 7311 break; 7312 case 24: 7313 val |= PIPEMISC_DITHER_8_BPC; 7314 break; 7315 case 30: 7316 val |= PIPEMISC_DITHER_10_BPC; 7317 break; 7318 case 36: 7319 val |= PIPEMISC_DITHER_12_BPC; 7320 break; 7321 default: 7322 /* Case prevented by pipe_config_set_bpp. */ 7323 BUG(); 7324 } 7325 7326 if (intel_crtc->config->dither) 7327 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 7328 7329 I915_WRITE(PIPEMISC(pipe), val); 7330 } 7331 } 7332 7333 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 7334 struct intel_crtc_state *crtc_state, 7335 intel_clock_t *clock, 7336 bool *has_reduced_clock, 7337 intel_clock_t *reduced_clock) 7338 { 7339 struct drm_device *dev = crtc->dev; 7340 struct drm_i915_private *dev_priv = dev->dev_private; 7341 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7342 int refclk; 7343 const intel_limit_t *limit; 7344 bool ret, is_lvds = false; 7345 7346 is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS); 7347 7348 refclk = ironlake_get_refclk(crtc); 7349 7350 /* 7351 * Returns a set of divisors for the desired target clock with the given 7352 * refclk, or FALSE. The returned values represent the clock equation: 7353 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7354 */ 7355 limit = intel_limit(intel_crtc, refclk); 7356 ret = dev_priv->display.find_dpll(limit, intel_crtc, 7357 crtc_state->port_clock, 7358 refclk, NULL, clock); 7359 if (!ret) 7360 return false; 7361 7362 if (is_lvds && dev_priv->lvds_downclock_avail) { 7363 /* 7364 * Ensure we match the reduced clock's P to the target clock. 7365 * If the clocks don't match, we can't switch the display clock 7366 * by using the FP0/FP1. In such case we will disable the LVDS 7367 * downclock feature. 7368 */ 7369 *has_reduced_clock = 7370 dev_priv->display.find_dpll(limit, intel_crtc, 7371 dev_priv->lvds_downclock, 7372 refclk, clock, 7373 reduced_clock); 7374 } 7375 7376 return true; 7377 } 7378 7379 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 7380 { 7381 /* 7382 * Account for spread spectrum to avoid 7383 * oversubscribing the link. Max center spread 7384 * is 2.5%; use 5% for safety's sake. 7385 */ 7386 u32 bps = target_clock * bpp * 21 / 20; 7387 return DIV_ROUND_UP(bps, link_bw * 8); 7388 } 7389 7390 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 7391 { 7392 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 7393 } 7394 7395 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 7396 struct intel_crtc_state *crtc_state, 7397 u32 *fp, 7398 intel_clock_t *reduced_clock, u32 *fp2) 7399 { 7400 struct drm_crtc *crtc = &intel_crtc->base; 7401 struct drm_device *dev = crtc->dev; 7402 struct drm_i915_private *dev_priv = dev->dev_private; 7403 struct intel_encoder *intel_encoder; 7404 uint32_t dpll; 7405 int factor, num_connectors = 0; 7406 bool is_lvds = false, is_sdvo = false; 7407 7408 for_each_intel_encoder(dev, intel_encoder) { 7409 if (intel_encoder->new_crtc != to_intel_crtc(crtc)) 7410 continue; 7411 7412 switch (intel_encoder->type) { 7413 case INTEL_OUTPUT_LVDS: 7414 is_lvds = true; 7415 break; 7416 case INTEL_OUTPUT_SDVO: 7417 case INTEL_OUTPUT_HDMI: 7418 is_sdvo = true; 7419 break; 7420 default: 7421 break; 7422 } 7423 7424 num_connectors++; 7425 } 7426 7427 /* Enable autotuning of the PLL clock (if permissible) */ 7428 factor = 21; 7429 if (is_lvds) { 7430 if ((intel_panel_use_ssc(dev_priv) && 7431 dev_priv->vbt.lvds_ssc_freq == 100000) || 7432 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 7433 factor = 25; 7434 } else if (crtc_state->sdvo_tv_clock) 7435 factor = 20; 7436 7437 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 7438 *fp |= FP_CB_TUNE; 7439 7440 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 7441 *fp2 |= FP_CB_TUNE; 7442 7443 dpll = 0; 7444 7445 if (is_lvds) 7446 dpll |= DPLLB_MODE_LVDS; 7447 else 7448 dpll |= DPLLB_MODE_DAC_SERIAL; 7449 7450 dpll |= (crtc_state->pixel_multiplier - 1) 7451 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 7452 7453 if (is_sdvo) 7454 dpll |= DPLL_SDVO_HIGH_SPEED; 7455 if (crtc_state->has_dp_encoder) 7456 dpll |= DPLL_SDVO_HIGH_SPEED; 7457 7458 /* compute bitmask from p1 value */ 7459 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7460 /* also FPA1 */ 7461 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7462 7463 switch (crtc_state->dpll.p2) { 7464 case 5: 7465 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7466 break; 7467 case 7: 7468 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7469 break; 7470 case 10: 7471 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7472 break; 7473 case 14: 7474 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7475 break; 7476 } 7477 7478 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7479 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7480 else 7481 dpll |= PLL_REF_INPUT_DREFCLK; 7482 7483 return dpll | DPLL_VCO_ENABLE; 7484 } 7485 7486 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 7487 struct intel_crtc_state *crtc_state) 7488 { 7489 struct drm_device *dev = crtc->base.dev; 7490 intel_clock_t clock, reduced_clock; 7491 u32 dpll = 0, fp = 0, fp2 = 0; 7492 bool ok, has_reduced_clock = false; 7493 bool is_lvds = false; 7494 struct intel_shared_dpll *pll; 7495 7496 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 7497 7498 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7499 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7500 7501 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 7502 &has_reduced_clock, &reduced_clock); 7503 if (!ok && !crtc_state->clock_set) { 7504 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7505 return -EINVAL; 7506 } 7507 /* Compat-code for transition, will disappear. */ 7508 if (!crtc_state->clock_set) { 7509 crtc_state->dpll.n = clock.n; 7510 crtc_state->dpll.m1 = clock.m1; 7511 crtc_state->dpll.m2 = clock.m2; 7512 crtc_state->dpll.p1 = clock.p1; 7513 crtc_state->dpll.p2 = clock.p2; 7514 } 7515 7516 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7517 if (crtc_state->has_pch_encoder) { 7518 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7519 if (has_reduced_clock) 7520 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7521 7522 dpll = ironlake_compute_dpll(crtc, crtc_state, 7523 &fp, &reduced_clock, 7524 has_reduced_clock ? &fp2 : NULL); 7525 7526 crtc_state->dpll_hw_state.dpll = dpll; 7527 crtc_state->dpll_hw_state.fp0 = fp; 7528 if (has_reduced_clock) 7529 crtc_state->dpll_hw_state.fp1 = fp2; 7530 else 7531 crtc_state->dpll_hw_state.fp1 = fp; 7532 7533 pll = intel_get_shared_dpll(crtc, crtc_state); 7534 if (pll == NULL) { 7535 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7536 pipe_name(crtc->pipe)); 7537 return -EINVAL; 7538 } 7539 } 7540 7541 if (is_lvds && has_reduced_clock && i915.powersave) 7542 crtc->lowfreq_avail = true; 7543 else 7544 crtc->lowfreq_avail = false; 7545 7546 return 0; 7547 } 7548 7549 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 7550 struct intel_link_m_n *m_n) 7551 { 7552 struct drm_device *dev = crtc->base.dev; 7553 struct drm_i915_private *dev_priv = dev->dev_private; 7554 enum i915_pipe pipe = crtc->pipe; 7555 7556 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 7557 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 7558 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 7559 & ~TU_SIZE_MASK; 7560 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 7561 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 7562 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7563 } 7564 7565 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7566 enum transcoder transcoder, 7567 struct intel_link_m_n *m_n, 7568 struct intel_link_m_n *m2_n2) 7569 { 7570 struct drm_device *dev = crtc->base.dev; 7571 struct drm_i915_private *dev_priv = dev->dev_private; 7572 enum i915_pipe pipe = crtc->pipe; 7573 7574 if (INTEL_INFO(dev)->gen >= 5) { 7575 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 7576 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 7577 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 7578 & ~TU_SIZE_MASK; 7579 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7580 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7581 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7582 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 7583 * gen < 8) and if DRRS is supported (to make sure the 7584 * registers are not unnecessarily read). 7585 */ 7586 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 7587 crtc->config->has_drrs) { 7588 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 7589 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 7590 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 7591 & ~TU_SIZE_MASK; 7592 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 7593 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 7594 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7595 } 7596 } else { 7597 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7598 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7599 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 7600 & ~TU_SIZE_MASK; 7601 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 7602 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 7603 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7604 } 7605 } 7606 7607 void intel_dp_get_m_n(struct intel_crtc *crtc, 7608 struct intel_crtc_state *pipe_config) 7609 { 7610 if (pipe_config->has_pch_encoder) 7611 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7612 else 7613 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7614 &pipe_config->dp_m_n, 7615 &pipe_config->dp_m2_n2); 7616 } 7617 7618 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7619 struct intel_crtc_state *pipe_config) 7620 { 7621 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7622 &pipe_config->fdi_m_n, NULL); 7623 } 7624 7625 static void skylake_get_pfit_config(struct intel_crtc *crtc, 7626 struct intel_crtc_state *pipe_config) 7627 { 7628 struct drm_device *dev = crtc->base.dev; 7629 struct drm_i915_private *dev_priv = dev->dev_private; 7630 uint32_t tmp; 7631 7632 tmp = I915_READ(PS_CTL(crtc->pipe)); 7633 7634 if (tmp & PS_ENABLE) { 7635 pipe_config->pch_pfit.enabled = true; 7636 pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe)); 7637 pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe)); 7638 } 7639 } 7640 7641 static void 7642 skylake_get_initial_plane_config(struct intel_crtc *crtc, 7643 struct intel_initial_plane_config *plane_config) 7644 { 7645 struct drm_device *dev = crtc->base.dev; 7646 struct drm_i915_private *dev_priv = dev->dev_private; 7647 u32 val, base, offset, stride_mult; 7648 int pipe = crtc->pipe; 7649 int fourcc, pixel_format; 7650 int aligned_height; 7651 struct drm_framebuffer *fb; 7652 struct intel_framebuffer *intel_fb; 7653 7654 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7655 if (!intel_fb) { 7656 DRM_DEBUG_KMS("failed to alloc fb\n"); 7657 return; 7658 } 7659 7660 fb = &intel_fb->base; 7661 7662 val = I915_READ(PLANE_CTL(pipe, 0)); 7663 if (!(val & PLANE_CTL_ENABLE)) 7664 goto error; 7665 7666 if (val & PLANE_CTL_TILED_MASK) 7667 plane_config->tiling = I915_TILING_X; 7668 7669 pixel_format = val & PLANE_CTL_FORMAT_MASK; 7670 fourcc = skl_format_to_fourcc(pixel_format, 7671 val & PLANE_CTL_ORDER_RGBX, 7672 val & PLANE_CTL_ALPHA_MASK); 7673 fb->pixel_format = fourcc; 7674 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7675 7676 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 7677 plane_config->base = base; 7678 7679 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 7680 7681 val = I915_READ(PLANE_SIZE(pipe, 0)); 7682 fb->height = ((val >> 16) & 0xfff) + 1; 7683 fb->width = ((val >> 0) & 0x1fff) + 1; 7684 7685 val = I915_READ(PLANE_STRIDE(pipe, 0)); 7686 switch (plane_config->tiling) { 7687 case I915_TILING_NONE: 7688 stride_mult = 64; 7689 break; 7690 case I915_TILING_X: 7691 stride_mult = 512; 7692 break; 7693 default: 7694 MISSING_CASE(plane_config->tiling); 7695 goto error; 7696 } 7697 fb->pitches[0] = (val & 0x3ff) * stride_mult; 7698 7699 aligned_height = intel_fb_align_height(dev, fb->height, 7700 plane_config->tiling); 7701 7702 plane_config->size = fb->pitches[0] * aligned_height; 7703 7704 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7705 pipe_name(pipe), fb->width, fb->height, 7706 fb->bits_per_pixel, base, fb->pitches[0], 7707 plane_config->size); 7708 7709 crtc->base.primary->fb = fb; 7710 return; 7711 7712 error: 7713 kfree(fb); 7714 } 7715 7716 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7717 struct intel_crtc_state *pipe_config) 7718 { 7719 struct drm_device *dev = crtc->base.dev; 7720 struct drm_i915_private *dev_priv = dev->dev_private; 7721 uint32_t tmp; 7722 7723 tmp = I915_READ(PF_CTL(crtc->pipe)); 7724 7725 if (tmp & PF_ENABLE) { 7726 pipe_config->pch_pfit.enabled = true; 7727 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 7728 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 7729 7730 /* We currently do not free assignements of panel fitters on 7731 * ivb/hsw (since we don't use the higher upscaling modes which 7732 * differentiates them) so just WARN about this case for now. */ 7733 if (IS_GEN7(dev)) { 7734 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 7735 PF_PIPE_SEL_IVB(crtc->pipe)); 7736 } 7737 } 7738 } 7739 7740 static void 7741 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 7742 struct intel_initial_plane_config *plane_config) 7743 { 7744 struct drm_device *dev = crtc->base.dev; 7745 struct drm_i915_private *dev_priv = dev->dev_private; 7746 u32 val, base, offset; 7747 int pipe = crtc->pipe; 7748 int fourcc, pixel_format; 7749 int aligned_height; 7750 struct drm_framebuffer *fb; 7751 struct intel_framebuffer *intel_fb; 7752 7753 val = I915_READ(DSPCNTR(pipe)); 7754 if (!(val & DISPLAY_PLANE_ENABLE)) 7755 return; 7756 7757 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7758 if (!intel_fb) { 7759 DRM_DEBUG_KMS("failed to alloc fb\n"); 7760 return; 7761 } 7762 7763 fb = &intel_fb->base; 7764 7765 if (INTEL_INFO(dev)->gen >= 4) 7766 if (val & DISPPLANE_TILED) 7767 plane_config->tiling = I915_TILING_X; 7768 7769 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7770 fourcc = i9xx_format_to_fourcc(pixel_format); 7771 fb->pixel_format = fourcc; 7772 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7773 7774 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 7775 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 7776 offset = I915_READ(DSPOFFSET(pipe)); 7777 } else { 7778 if (plane_config->tiling) 7779 offset = I915_READ(DSPTILEOFF(pipe)); 7780 else 7781 offset = I915_READ(DSPLINOFF(pipe)); 7782 } 7783 plane_config->base = base; 7784 7785 val = I915_READ(PIPESRC(pipe)); 7786 fb->width = ((val >> 16) & 0xfff) + 1; 7787 fb->height = ((val >> 0) & 0xfff) + 1; 7788 7789 val = I915_READ(DSPSTRIDE(pipe)); 7790 fb->pitches[0] = val & 0xffffffc0; 7791 7792 aligned_height = intel_fb_align_height(dev, fb->height, 7793 plane_config->tiling); 7794 7795 plane_config->size = fb->pitches[0] * aligned_height; 7796 7797 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7798 pipe_name(pipe), fb->width, fb->height, 7799 fb->bits_per_pixel, base, fb->pitches[0], 7800 plane_config->size); 7801 7802 crtc->base.primary->fb = fb; 7803 } 7804 7805 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 7806 struct intel_crtc_state *pipe_config) 7807 { 7808 struct drm_device *dev = crtc->base.dev; 7809 struct drm_i915_private *dev_priv = dev->dev_private; 7810 uint32_t tmp; 7811 7812 if (!intel_display_power_is_enabled(dev_priv, 7813 POWER_DOMAIN_PIPE(crtc->pipe))) 7814 return false; 7815 7816 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7817 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7818 7819 tmp = I915_READ(PIPECONF(crtc->pipe)); 7820 if (!(tmp & PIPECONF_ENABLE)) 7821 return false; 7822 7823 switch (tmp & PIPECONF_BPC_MASK) { 7824 case PIPECONF_6BPC: 7825 pipe_config->pipe_bpp = 18; 7826 break; 7827 case PIPECONF_8BPC: 7828 pipe_config->pipe_bpp = 24; 7829 break; 7830 case PIPECONF_10BPC: 7831 pipe_config->pipe_bpp = 30; 7832 break; 7833 case PIPECONF_12BPC: 7834 pipe_config->pipe_bpp = 36; 7835 break; 7836 default: 7837 break; 7838 } 7839 7840 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 7841 pipe_config->limited_color_range = true; 7842 7843 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 7844 struct intel_shared_dpll *pll; 7845 7846 pipe_config->has_pch_encoder = true; 7847 7848 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 7849 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 7850 FDI_DP_PORT_WIDTH_SHIFT) + 1; 7851 7852 ironlake_get_fdi_m_n_config(crtc, pipe_config); 7853 7854 if (HAS_PCH_IBX(dev_priv->dev)) { 7855 pipe_config->shared_dpll = 7856 (enum intel_dpll_id) crtc->pipe; 7857 } else { 7858 tmp = I915_READ(PCH_DPLL_SEL); 7859 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 7860 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 7861 else 7862 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 7863 } 7864 7865 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 7866 7867 WARN_ON(!pll->get_hw_state(dev_priv, pll, 7868 &pipe_config->dpll_hw_state)); 7869 7870 tmp = pipe_config->dpll_hw_state.dpll; 7871 pipe_config->pixel_multiplier = 7872 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 7873 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 7874 7875 ironlake_pch_clock_get(crtc, pipe_config); 7876 } else { 7877 pipe_config->pixel_multiplier = 1; 7878 } 7879 7880 intel_get_pipe_timings(crtc, pipe_config); 7881 7882 ironlake_get_pfit_config(crtc, pipe_config); 7883 7884 return true; 7885 } 7886 7887 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 7888 { 7889 struct drm_device *dev = dev_priv->dev; 7890 struct intel_crtc *crtc; 7891 7892 for_each_intel_crtc(dev, crtc) 7893 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 7894 pipe_name(crtc->pipe)); 7895 7896 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 7897 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 7898 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 7899 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 7900 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 7901 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 7902 "CPU PWM1 enabled\n"); 7903 if (IS_HASWELL(dev)) 7904 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 7905 "CPU PWM2 enabled\n"); 7906 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 7907 "PCH PWM1 enabled\n"); 7908 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 7909 "Utility pin enabled\n"); 7910 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 7911 7912 /* 7913 * In theory we can still leave IRQs enabled, as long as only the HPD 7914 * interrupts remain enabled. We used to check for that, but since it's 7915 * gen-specific and since we only disable LCPLL after we fully disable 7916 * the interrupts, the check below should be enough. 7917 */ 7918 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 7919 } 7920 7921 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 7922 { 7923 struct drm_device *dev = dev_priv->dev; 7924 7925 if (IS_HASWELL(dev)) 7926 return I915_READ(D_COMP_HSW); 7927 else 7928 return I915_READ(D_COMP_BDW); 7929 } 7930 7931 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 7932 { 7933 struct drm_device *dev = dev_priv->dev; 7934 7935 if (IS_HASWELL(dev)) { 7936 mutex_lock(&dev_priv->rps.hw_lock); 7937 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 7938 val)) 7939 DRM_ERROR("Failed to write to D_COMP\n"); 7940 mutex_unlock(&dev_priv->rps.hw_lock); 7941 } else { 7942 I915_WRITE(D_COMP_BDW, val); 7943 POSTING_READ(D_COMP_BDW); 7944 } 7945 } 7946 7947 /* 7948 * This function implements pieces of two sequences from BSpec: 7949 * - Sequence for display software to disable LCPLL 7950 * - Sequence for display software to allow package C8+ 7951 * The steps implemented here are just the steps that actually touch the LCPLL 7952 * register. Callers should take care of disabling all the display engine 7953 * functions, doing the mode unset, fixing interrupts, etc. 7954 */ 7955 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 7956 bool switch_to_fclk, bool allow_power_down) 7957 { 7958 uint32_t val; 7959 7960 assert_can_disable_lcpll(dev_priv); 7961 7962 val = I915_READ(LCPLL_CTL); 7963 7964 if (switch_to_fclk) { 7965 val |= LCPLL_CD_SOURCE_FCLK; 7966 I915_WRITE(LCPLL_CTL, val); 7967 7968 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 7969 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 7970 DRM_ERROR("Switching to FCLK failed\n"); 7971 7972 val = I915_READ(LCPLL_CTL); 7973 } 7974 7975 val |= LCPLL_PLL_DISABLE; 7976 I915_WRITE(LCPLL_CTL, val); 7977 POSTING_READ(LCPLL_CTL); 7978 7979 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 7980 DRM_ERROR("LCPLL still locked\n"); 7981 7982 val = hsw_read_dcomp(dev_priv); 7983 val |= D_COMP_COMP_DISABLE; 7984 hsw_write_dcomp(dev_priv, val); 7985 ndelay(100); 7986 7987 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 7988 1)) 7989 DRM_ERROR("D_COMP RCOMP still in progress\n"); 7990 7991 if (allow_power_down) { 7992 val = I915_READ(LCPLL_CTL); 7993 val |= LCPLL_POWER_DOWN_ALLOW; 7994 I915_WRITE(LCPLL_CTL, val); 7995 POSTING_READ(LCPLL_CTL); 7996 } 7997 } 7998 7999 /* 8000 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8001 * source. 8002 */ 8003 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8004 { 8005 uint32_t val; 8006 8007 val = I915_READ(LCPLL_CTL); 8008 8009 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8010 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8011 return; 8012 8013 /* 8014 * Make sure we're not on PC8 state before disabling PC8, otherwise 8015 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8016 */ 8017 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8018 8019 if (val & LCPLL_POWER_DOWN_ALLOW) { 8020 val &= ~LCPLL_POWER_DOWN_ALLOW; 8021 I915_WRITE(LCPLL_CTL, val); 8022 POSTING_READ(LCPLL_CTL); 8023 } 8024 8025 val = hsw_read_dcomp(dev_priv); 8026 val |= D_COMP_COMP_FORCE; 8027 val &= ~D_COMP_COMP_DISABLE; 8028 hsw_write_dcomp(dev_priv, val); 8029 8030 val = I915_READ(LCPLL_CTL); 8031 val &= ~LCPLL_PLL_DISABLE; 8032 I915_WRITE(LCPLL_CTL, val); 8033 8034 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 8035 DRM_ERROR("LCPLL not locked yet\n"); 8036 8037 if (val & LCPLL_CD_SOURCE_FCLK) { 8038 val = I915_READ(LCPLL_CTL); 8039 val &= ~LCPLL_CD_SOURCE_FCLK; 8040 I915_WRITE(LCPLL_CTL, val); 8041 8042 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 8043 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8044 DRM_ERROR("Switching back to LCPLL failed\n"); 8045 } 8046 8047 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8048 } 8049 8050 /* 8051 * Package states C8 and deeper are really deep PC states that can only be 8052 * reached when all the devices on the system allow it, so even if the graphics 8053 * device allows PC8+, it doesn't mean the system will actually get to these 8054 * states. Our driver only allows PC8+ when going into runtime PM. 8055 * 8056 * The requirements for PC8+ are that all the outputs are disabled, the power 8057 * well is disabled and most interrupts are disabled, and these are also 8058 * requirements for runtime PM. When these conditions are met, we manually do 8059 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8060 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8061 * hang the machine. 8062 * 8063 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8064 * the state of some registers, so when we come back from PC8+ we need to 8065 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8066 * need to take care of the registers kept by RC6. Notice that this happens even 8067 * if we don't put the device in PCI D3 state (which is what currently happens 8068 * because of the runtime PM support). 8069 * 8070 * For more, read "Display Sequences for Package C8" on the hardware 8071 * documentation. 8072 */ 8073 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8074 { 8075 struct drm_device *dev = dev_priv->dev; 8076 uint32_t val; 8077 8078 DRM_DEBUG_KMS("Enabling package C8+\n"); 8079 8080 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 8081 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8082 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8083 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8084 } 8085 8086 lpt_disable_clkout_dp(dev); 8087 hsw_disable_lcpll(dev_priv, true, true); 8088 } 8089 8090 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8091 { 8092 struct drm_device *dev = dev_priv->dev; 8093 uint32_t val; 8094 8095 DRM_DEBUG_KMS("Disabling package C8+\n"); 8096 8097 hsw_restore_lcpll(dev_priv); 8098 lpt_init_pch_refclk(dev); 8099 8100 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 8101 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8102 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8103 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8104 } 8105 8106 intel_prepare_ddi(dev); 8107 } 8108 8109 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8110 struct intel_crtc_state *crtc_state) 8111 { 8112 if (!intel_ddi_pll_select(crtc, crtc_state)) 8113 return -EINVAL; 8114 8115 crtc->lowfreq_avail = false; 8116 8117 return 0; 8118 } 8119 8120 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8121 enum port port, 8122 struct intel_crtc_state *pipe_config) 8123 { 8124 u32 temp, dpll_ctl1; 8125 8126 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8127 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 8128 8129 switch (pipe_config->ddi_pll_sel) { 8130 case SKL_DPLL0: 8131 /* 8132 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 8133 * of the shared DPLL framework and thus needs to be read out 8134 * separately 8135 */ 8136 dpll_ctl1 = I915_READ(DPLL_CTRL1); 8137 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 8138 break; 8139 case SKL_DPLL1: 8140 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 8141 break; 8142 case SKL_DPLL2: 8143 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 8144 break; 8145 case SKL_DPLL3: 8146 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 8147 break; 8148 } 8149 } 8150 8151 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8152 enum port port, 8153 struct intel_crtc_state *pipe_config) 8154 { 8155 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8156 8157 switch (pipe_config->ddi_pll_sel) { 8158 case PORT_CLK_SEL_WRPLL1: 8159 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 8160 break; 8161 case PORT_CLK_SEL_WRPLL2: 8162 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 8163 break; 8164 } 8165 } 8166 8167 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 8168 struct intel_crtc_state *pipe_config) 8169 { 8170 struct drm_device *dev = crtc->base.dev; 8171 struct drm_i915_private *dev_priv = dev->dev_private; 8172 struct intel_shared_dpll *pll; 8173 enum port port; 8174 uint32_t tmp; 8175 8176 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 8177 8178 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 8179 8180 if (IS_SKYLAKE(dev)) 8181 skylake_get_ddi_pll(dev_priv, port, pipe_config); 8182 else 8183 haswell_get_ddi_pll(dev_priv, port, pipe_config); 8184 8185 if (pipe_config->shared_dpll >= 0) { 8186 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 8187 8188 WARN_ON(!pll->get_hw_state(dev_priv, pll, 8189 &pipe_config->dpll_hw_state)); 8190 } 8191 8192 /* 8193 * Haswell has only FDI/PCH transcoder A. It is which is connected to 8194 * DDI E. So just check whether this pipe is wired to DDI E and whether 8195 * the PCH transcoder is on. 8196 */ 8197 if (INTEL_INFO(dev)->gen < 9 && 8198 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 8199 pipe_config->has_pch_encoder = true; 8200 8201 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 8202 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8203 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8204 8205 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8206 } 8207 } 8208 8209 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 8210 struct intel_crtc_state *pipe_config) 8211 { 8212 struct drm_device *dev = crtc->base.dev; 8213 struct drm_i915_private *dev_priv = dev->dev_private; 8214 enum intel_display_power_domain pfit_domain; 8215 uint32_t tmp; 8216 8217 if (!intel_display_power_is_enabled(dev_priv, 8218 POWER_DOMAIN_PIPE(crtc->pipe))) 8219 return false; 8220 8221 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8222 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8223 8224 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8225 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8226 enum i915_pipe trans_edp_pipe; 8227 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8228 default: 8229 WARN(1, "unknown pipe linked to edp transcoder\n"); 8230 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8231 case TRANS_DDI_EDP_INPUT_A_ON: 8232 trans_edp_pipe = PIPE_A; 8233 break; 8234 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8235 trans_edp_pipe = PIPE_B; 8236 break; 8237 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8238 trans_edp_pipe = PIPE_C; 8239 break; 8240 } 8241 8242 if (trans_edp_pipe == crtc->pipe) 8243 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8244 } 8245 8246 if (!intel_display_power_is_enabled(dev_priv, 8247 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 8248 return false; 8249 8250 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 8251 if (!(tmp & PIPECONF_ENABLE)) 8252 return false; 8253 8254 haswell_get_ddi_port_state(crtc, pipe_config); 8255 8256 intel_get_pipe_timings(crtc, pipe_config); 8257 8258 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 8259 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 8260 if (IS_SKYLAKE(dev)) 8261 skylake_get_pfit_config(crtc, pipe_config); 8262 else 8263 ironlake_get_pfit_config(crtc, pipe_config); 8264 } 8265 8266 if (IS_HASWELL(dev)) 8267 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 8268 (I915_READ(IPS_CTL) & IPS_ENABLE); 8269 8270 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 8271 pipe_config->pixel_multiplier = 8272 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 8273 } else { 8274 pipe_config->pixel_multiplier = 1; 8275 } 8276 8277 return true; 8278 } 8279 8280 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 8281 { 8282 struct drm_device *dev = crtc->dev; 8283 struct drm_i915_private *dev_priv = dev->dev_private; 8284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8285 uint32_t cntl = 0, size = 0; 8286 8287 if (base) { 8288 unsigned int width = intel_crtc->cursor_width; 8289 unsigned int height = intel_crtc->cursor_height; 8290 unsigned int stride = roundup_pow_of_two(width) * 4; 8291 8292 switch (stride) { 8293 default: 8294 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 8295 width, stride); 8296 stride = 256; 8297 /* fallthrough */ 8298 case 256: 8299 case 512: 8300 case 1024: 8301 case 2048: 8302 break; 8303 } 8304 8305 cntl |= CURSOR_ENABLE | 8306 CURSOR_GAMMA_ENABLE | 8307 CURSOR_FORMAT_ARGB | 8308 CURSOR_STRIDE(stride); 8309 8310 size = (height << 12) | width; 8311 } 8312 8313 if (intel_crtc->cursor_cntl != 0 && 8314 (intel_crtc->cursor_base != base || 8315 intel_crtc->cursor_size != size || 8316 intel_crtc->cursor_cntl != cntl)) { 8317 /* On these chipsets we can only modify the base/size/stride 8318 * whilst the cursor is disabled. 8319 */ 8320 I915_WRITE(_CURACNTR, 0); 8321 POSTING_READ(_CURACNTR); 8322 intel_crtc->cursor_cntl = 0; 8323 } 8324 8325 if (intel_crtc->cursor_base != base) { 8326 I915_WRITE(_CURABASE, base); 8327 intel_crtc->cursor_base = base; 8328 } 8329 8330 if (intel_crtc->cursor_size != size) { 8331 I915_WRITE(CURSIZE, size); 8332 intel_crtc->cursor_size = size; 8333 } 8334 8335 if (intel_crtc->cursor_cntl != cntl) { 8336 I915_WRITE(_CURACNTR, cntl); 8337 POSTING_READ(_CURACNTR); 8338 intel_crtc->cursor_cntl = cntl; 8339 } 8340 } 8341 8342 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8343 { 8344 struct drm_device *dev = crtc->dev; 8345 struct drm_i915_private *dev_priv = dev->dev_private; 8346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8347 int pipe = intel_crtc->pipe; 8348 uint32_t cntl; 8349 8350 cntl = 0; 8351 if (base) { 8352 cntl = MCURSOR_GAMMA_ENABLE; 8353 switch (intel_crtc->cursor_width) { 8354 case 64: 8355 cntl |= CURSOR_MODE_64_ARGB_AX; 8356 break; 8357 case 128: 8358 cntl |= CURSOR_MODE_128_ARGB_AX; 8359 break; 8360 case 256: 8361 cntl |= CURSOR_MODE_256_ARGB_AX; 8362 break; 8363 default: 8364 MISSING_CASE(intel_crtc->cursor_width); 8365 return; 8366 } 8367 cntl |= pipe << 28; /* Connect to correct pipe */ 8368 8369 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8370 cntl |= CURSOR_PIPE_CSC_ENABLE; 8371 } 8372 8373 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) 8374 cntl |= CURSOR_ROTATE_180; 8375 8376 if (intel_crtc->cursor_cntl != cntl) { 8377 I915_WRITE(CURCNTR(pipe), cntl); 8378 POSTING_READ(CURCNTR(pipe)); 8379 intel_crtc->cursor_cntl = cntl; 8380 } 8381 8382 /* and commit changes on next vblank */ 8383 I915_WRITE(CURBASE(pipe), base); 8384 POSTING_READ(CURBASE(pipe)); 8385 8386 intel_crtc->cursor_base = base; 8387 } 8388 8389 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8390 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 8391 bool on) 8392 { 8393 struct drm_device *dev = crtc->dev; 8394 struct drm_i915_private *dev_priv = dev->dev_private; 8395 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8396 int pipe = intel_crtc->pipe; 8397 int x = crtc->cursor_x; 8398 int y = crtc->cursor_y; 8399 u32 base = 0, pos = 0; 8400 8401 if (on) 8402 base = intel_crtc->cursor_addr; 8403 8404 if (x >= intel_crtc->config->pipe_src_w) 8405 base = 0; 8406 8407 if (y >= intel_crtc->config->pipe_src_h) 8408 base = 0; 8409 8410 if (x < 0) { 8411 if (x + intel_crtc->cursor_width <= 0) 8412 base = 0; 8413 8414 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 8415 x = -x; 8416 } 8417 pos |= x << CURSOR_X_SHIFT; 8418 8419 if (y < 0) { 8420 if (y + intel_crtc->cursor_height <= 0) 8421 base = 0; 8422 8423 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 8424 y = -y; 8425 } 8426 pos |= y << CURSOR_Y_SHIFT; 8427 8428 if (base == 0 && intel_crtc->cursor_base == 0) 8429 return; 8430 8431 I915_WRITE(CURPOS(pipe), pos); 8432 8433 /* ILK+ do this automagically */ 8434 if (HAS_GMCH_DISPLAY(dev) && 8435 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 8436 base += (intel_crtc->cursor_height * 8437 intel_crtc->cursor_width - 1) * 4; 8438 } 8439 8440 if (IS_845G(dev) || IS_I865G(dev)) 8441 i845_update_cursor(crtc, base); 8442 else 8443 i9xx_update_cursor(crtc, base); 8444 } 8445 8446 static bool cursor_size_ok(struct drm_device *dev, 8447 uint32_t width, uint32_t height) 8448 { 8449 if (width == 0 || height == 0) 8450 return false; 8451 8452 /* 8453 * 845g/865g are special in that they are only limited by 8454 * the width of their cursors, the height is arbitrary up to 8455 * the precision of the register. Everything else requires 8456 * square cursors, limited to a few power-of-two sizes. 8457 */ 8458 if (IS_845G(dev) || IS_I865G(dev)) { 8459 if ((width & 63) != 0) 8460 return false; 8461 8462 if (width > (IS_845G(dev) ? 64 : 512)) 8463 return false; 8464 8465 if (height > 1023) 8466 return false; 8467 } else { 8468 switch (width | height) { 8469 case 256: 8470 case 128: 8471 if (IS_GEN2(dev)) 8472 return false; 8473 case 64: 8474 break; 8475 default: 8476 return false; 8477 } 8478 } 8479 8480 return true; 8481 } 8482 8483 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 8484 u16 *blue, uint32_t start, uint32_t size) 8485 { 8486 int end = (start + size > 256) ? 256 : start + size, i; 8487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8488 8489 for (i = start; i < end; i++) { 8490 intel_crtc->lut_r[i] = red[i] >> 8; 8491 intel_crtc->lut_g[i] = green[i] >> 8; 8492 intel_crtc->lut_b[i] = blue[i] >> 8; 8493 } 8494 8495 intel_crtc_load_lut(crtc); 8496 } 8497 8498 /* VESA 640x480x72Hz mode to set on the pipe */ 8499 static struct drm_display_mode load_detect_mode = { 8500 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 8501 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 8502 }; 8503 8504 struct drm_framebuffer * 8505 __intel_framebuffer_create(struct drm_device *dev, 8506 struct drm_mode_fb_cmd2 *mode_cmd, 8507 struct drm_i915_gem_object *obj) 8508 { 8509 struct intel_framebuffer *intel_fb; 8510 int ret; 8511 8512 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8513 if (!intel_fb) { 8514 drm_gem_object_unreference(&obj->base); 8515 return ERR_PTR(-ENOMEM); 8516 } 8517 8518 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 8519 if (ret) 8520 goto err; 8521 8522 return &intel_fb->base; 8523 err: 8524 drm_gem_object_unreference(&obj->base); 8525 kfree(intel_fb); 8526 8527 return ERR_PTR(ret); 8528 } 8529 8530 static struct drm_framebuffer * 8531 intel_framebuffer_create(struct drm_device *dev, 8532 struct drm_mode_fb_cmd2 *mode_cmd, 8533 struct drm_i915_gem_object *obj) 8534 { 8535 struct drm_framebuffer *fb; 8536 int ret; 8537 8538 ret = i915_mutex_lock_interruptible(dev); 8539 if (ret) 8540 return ERR_PTR(ret); 8541 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 8542 mutex_unlock(&dev->struct_mutex); 8543 8544 return fb; 8545 } 8546 8547 static u32 8548 intel_framebuffer_pitch_for_width(int width, int bpp) 8549 { 8550 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 8551 return ALIGN(pitch, 64); 8552 } 8553 8554 static u32 8555 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8556 { 8557 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8558 return PAGE_ALIGN(pitch * mode->vdisplay); 8559 } 8560 8561 static struct drm_framebuffer * 8562 intel_framebuffer_create_for_mode(struct drm_device *dev, 8563 struct drm_display_mode *mode, 8564 int depth, int bpp) 8565 { 8566 struct drm_i915_gem_object *obj; 8567 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 8568 8569 obj = i915_gem_alloc_object(dev, 8570 intel_framebuffer_size_for_mode(mode, bpp)); 8571 if (obj == NULL) 8572 return ERR_PTR(-ENOMEM); 8573 8574 mode_cmd.width = mode->hdisplay; 8575 mode_cmd.height = mode->vdisplay; 8576 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 8577 bpp); 8578 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 8579 8580 return intel_framebuffer_create(dev, &mode_cmd, obj); 8581 } 8582 8583 static struct drm_framebuffer * 8584 mode_fits_in_fbdev(struct drm_device *dev, 8585 struct drm_display_mode *mode) 8586 { 8587 #ifdef CONFIG_DRM_I915_FBDEV 8588 struct drm_i915_private *dev_priv = dev->dev_private; 8589 struct drm_i915_gem_object *obj; 8590 struct drm_framebuffer *fb; 8591 8592 if (!dev_priv->fbdev) 8593 return NULL; 8594 8595 if (!dev_priv->fbdev->fb) 8596 return NULL; 8597 8598 obj = dev_priv->fbdev->fb->obj; 8599 BUG_ON(!obj); 8600 8601 fb = &dev_priv->fbdev->fb->base; 8602 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 8603 fb->bits_per_pixel)) 8604 return NULL; 8605 8606 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 8607 return NULL; 8608 8609 return fb; 8610 #else 8611 return NULL; 8612 #endif 8613 } 8614 8615 bool intel_get_load_detect_pipe(struct drm_connector *connector, 8616 struct drm_display_mode *mode, 8617 struct intel_load_detect_pipe *old, 8618 struct drm_modeset_acquire_ctx *ctx) 8619 { 8620 struct intel_crtc *intel_crtc; 8621 struct intel_encoder *intel_encoder = 8622 intel_attached_encoder(connector); 8623 struct drm_crtc *possible_crtc; 8624 struct drm_encoder *encoder = &intel_encoder->base; 8625 struct drm_crtc *crtc = NULL; 8626 struct drm_device *dev = encoder->dev; 8627 struct drm_framebuffer *fb; 8628 struct drm_mode_config *config = &dev->mode_config; 8629 int ret, i = -1; 8630 8631 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8632 connector->base.id, connector->name, 8633 encoder->base.id, encoder->name); 8634 8635 retry: 8636 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8637 if (ret) 8638 goto fail_unlock; 8639 8640 /* 8641 * Algorithm gets a little messy: 8642 * 8643 * - if the connector already has an assigned crtc, use it (but make 8644 * sure it's on first) 8645 * 8646 * - try to find the first unused crtc that can drive this connector, 8647 * and use that if we find one 8648 */ 8649 8650 /* See if we already have a CRTC for this connector */ 8651 if (encoder->crtc) { 8652 crtc = encoder->crtc; 8653 8654 ret = drm_modeset_lock(&crtc->mutex, ctx); 8655 if (ret) 8656 goto fail_unlock; 8657 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 8658 if (ret) 8659 goto fail_unlock; 8660 8661 old->dpms_mode = connector->dpms; 8662 old->load_detect_temp = false; 8663 8664 /* Make sure the crtc and connector are running */ 8665 if (connector->dpms != DRM_MODE_DPMS_ON) 8666 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 8667 8668 return true; 8669 } 8670 8671 /* Find an unused one (if possible) */ 8672 for_each_crtc(dev, possible_crtc) { 8673 i++; 8674 if (!(encoder->possible_crtcs & (1 << i))) 8675 continue; 8676 if (possible_crtc->enabled) 8677 continue; 8678 /* This can occur when applying the pipe A quirk on resume. */ 8679 if (to_intel_crtc(possible_crtc)->new_enabled) 8680 continue; 8681 8682 crtc = possible_crtc; 8683 break; 8684 } 8685 8686 /* 8687 * If we didn't find an unused CRTC, don't use any. 8688 */ 8689 if (!crtc) { 8690 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 8691 goto fail_unlock; 8692 } 8693 8694 ret = drm_modeset_lock(&crtc->mutex, ctx); 8695 if (ret) 8696 goto fail_unlock; 8697 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 8698 if (ret) 8699 goto fail_unlock; 8700 intel_encoder->new_crtc = to_intel_crtc(crtc); 8701 to_intel_connector(connector)->new_encoder = intel_encoder; 8702 8703 intel_crtc = to_intel_crtc(crtc); 8704 intel_crtc->new_enabled = true; 8705 intel_crtc->new_config = intel_crtc->config; 8706 old->dpms_mode = connector->dpms; 8707 old->load_detect_temp = true; 8708 old->release_fb = NULL; 8709 8710 if (!mode) 8711 mode = &load_detect_mode; 8712 8713 /* We need a framebuffer large enough to accommodate all accesses 8714 * that the plane may generate whilst we perform load detection. 8715 * We can not rely on the fbcon either being present (we get called 8716 * during its initialisation to detect all boot displays, or it may 8717 * not even exist) or that it is large enough to satisfy the 8718 * requested mode. 8719 */ 8720 fb = mode_fits_in_fbdev(dev, mode); 8721 if (fb == NULL) { 8722 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 8723 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 8724 old->release_fb = fb; 8725 } else 8726 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 8727 if (IS_ERR(fb)) { 8728 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 8729 goto fail; 8730 } 8731 8732 if (intel_set_mode(crtc, mode, 0, 0, fb)) { 8733 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 8734 if (old->release_fb) 8735 old->release_fb->funcs->destroy(old->release_fb); 8736 goto fail; 8737 } 8738 crtc->primary->crtc = crtc; 8739 8740 /* let the connector get through one full cycle before testing */ 8741 intel_wait_for_vblank(dev, intel_crtc->pipe); 8742 return true; 8743 8744 fail: 8745 intel_crtc->new_enabled = crtc->enabled; 8746 if (intel_crtc->new_enabled) 8747 intel_crtc->new_config = intel_crtc->config; 8748 else 8749 intel_crtc->new_config = NULL; 8750 fail_unlock: 8751 if (ret == -EDEADLK) { 8752 drm_modeset_backoff(ctx); 8753 goto retry; 8754 } 8755 8756 return false; 8757 } 8758 8759 void intel_release_load_detect_pipe(struct drm_connector *connector, 8760 struct intel_load_detect_pipe *old) 8761 { 8762 struct intel_encoder *intel_encoder = 8763 intel_attached_encoder(connector); 8764 struct drm_encoder *encoder = &intel_encoder->base; 8765 struct drm_crtc *crtc = encoder->crtc; 8766 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8767 8768 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8769 connector->base.id, connector->name, 8770 encoder->base.id, encoder->name); 8771 8772 if (old->load_detect_temp) { 8773 to_intel_connector(connector)->new_encoder = NULL; 8774 intel_encoder->new_crtc = NULL; 8775 intel_crtc->new_enabled = false; 8776 intel_crtc->new_config = NULL; 8777 intel_set_mode(crtc, NULL, 0, 0, NULL); 8778 8779 if (old->release_fb) { 8780 drm_framebuffer_unregister_private(old->release_fb); 8781 drm_framebuffer_unreference(old->release_fb); 8782 } 8783 8784 return; 8785 } 8786 8787 /* Switch crtc and encoder back off if necessary */ 8788 if (old->dpms_mode != DRM_MODE_DPMS_ON) 8789 connector->funcs->dpms(connector, old->dpms_mode); 8790 } 8791 8792 static int i9xx_pll_refclk(struct drm_device *dev, 8793 const struct intel_crtc_state *pipe_config) 8794 { 8795 struct drm_i915_private *dev_priv = dev->dev_private; 8796 u32 dpll = pipe_config->dpll_hw_state.dpll; 8797 8798 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 8799 return dev_priv->vbt.lvds_ssc_freq; 8800 else if (HAS_PCH_SPLIT(dev)) 8801 return 120000; 8802 else if (!IS_GEN2(dev)) 8803 return 96000; 8804 else 8805 return 48000; 8806 } 8807 8808 /* Returns the clock of the currently programmed mode of the given pipe. */ 8809 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 8810 struct intel_crtc_state *pipe_config) 8811 { 8812 struct drm_device *dev = crtc->base.dev; 8813 struct drm_i915_private *dev_priv = dev->dev_private; 8814 int pipe = pipe_config->cpu_transcoder; 8815 u32 dpll = pipe_config->dpll_hw_state.dpll; 8816 u32 fp; 8817 intel_clock_t clock; 8818 int refclk = i9xx_pll_refclk(dev, pipe_config); 8819 8820 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 8821 fp = pipe_config->dpll_hw_state.fp0; 8822 else 8823 fp = pipe_config->dpll_hw_state.fp1; 8824 8825 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 8826 if (IS_PINEVIEW(dev)) { 8827 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 8828 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 8829 } else { 8830 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 8831 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 8832 } 8833 8834 if (!IS_GEN2(dev)) { 8835 if (IS_PINEVIEW(dev)) 8836 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 8837 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 8838 else 8839 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 8840 DPLL_FPA01_P1_POST_DIV_SHIFT); 8841 8842 switch (dpll & DPLL_MODE_MASK) { 8843 case DPLLB_MODE_DAC_SERIAL: 8844 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 8845 5 : 10; 8846 break; 8847 case DPLLB_MODE_LVDS: 8848 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 8849 7 : 14; 8850 break; 8851 default: 8852 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 8853 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 8854 return; 8855 } 8856 8857 if (IS_PINEVIEW(dev)) 8858 pineview_clock(refclk, &clock); 8859 else 8860 i9xx_clock(refclk, &clock); 8861 } else { 8862 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 8863 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 8864 8865 if (is_lvds) { 8866 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 8867 DPLL_FPA01_P1_POST_DIV_SHIFT); 8868 8869 if (lvds & LVDS_CLKB_POWER_UP) 8870 clock.p2 = 7; 8871 else 8872 clock.p2 = 14; 8873 } else { 8874 if (dpll & PLL_P1_DIVIDE_BY_TWO) 8875 clock.p1 = 2; 8876 else { 8877 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 8878 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 8879 } 8880 if (dpll & PLL_P2_DIVIDE_BY_4) 8881 clock.p2 = 4; 8882 else 8883 clock.p2 = 2; 8884 } 8885 8886 i9xx_clock(refclk, &clock); 8887 } 8888 8889 /* 8890 * This value includes pixel_multiplier. We will use 8891 * port_clock to compute adjusted_mode.crtc_clock in the 8892 * encoder's get_config() function. 8893 */ 8894 pipe_config->port_clock = clock.dot; 8895 } 8896 8897 int intel_dotclock_calculate(int link_freq, 8898 const struct intel_link_m_n *m_n) 8899 { 8900 /* 8901 * The calculation for the data clock is: 8902 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 8903 * But we want to avoid losing precison if possible, so: 8904 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 8905 * 8906 * and the link clock is simpler: 8907 * link_clock = (m * link_clock) / n 8908 */ 8909 8910 if (!m_n->link_n) 8911 return 0; 8912 8913 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 8914 } 8915 8916 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 8917 struct intel_crtc_state *pipe_config) 8918 { 8919 struct drm_device *dev = crtc->base.dev; 8920 8921 /* read out port_clock from the DPLL */ 8922 i9xx_crtc_clock_get(crtc, pipe_config); 8923 8924 /* 8925 * This value does not include pixel_multiplier. 8926 * We will check that port_clock and adjusted_mode.crtc_clock 8927 * agree once we know their relationship in the encoder's 8928 * get_config() function. 8929 */ 8930 pipe_config->base.adjusted_mode.crtc_clock = 8931 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 8932 &pipe_config->fdi_m_n); 8933 } 8934 8935 /** Returns the currently programmed mode of the given pipe. */ 8936 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 8937 struct drm_crtc *crtc) 8938 { 8939 struct drm_i915_private *dev_priv = dev->dev_private; 8940 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8941 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8942 struct drm_display_mode *mode; 8943 struct intel_crtc_state pipe_config; 8944 int htot = I915_READ(HTOTAL(cpu_transcoder)); 8945 int hsync = I915_READ(HSYNC(cpu_transcoder)); 8946 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 8947 int vsync = I915_READ(VSYNC(cpu_transcoder)); 8948 enum i915_pipe pipe = intel_crtc->pipe; 8949 8950 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 8951 if (!mode) 8952 return NULL; 8953 8954 /* 8955 * Construct a pipe_config sufficient for getting the clock info 8956 * back out of crtc_clock_get. 8957 * 8958 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 8959 * to use a real value here instead. 8960 */ 8961 pipe_config.cpu_transcoder = (enum transcoder) pipe; 8962 pipe_config.pixel_multiplier = 1; 8963 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 8964 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 8965 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 8966 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 8967 8968 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 8969 mode->hdisplay = (htot & 0xffff) + 1; 8970 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 8971 mode->hsync_start = (hsync & 0xffff) + 1; 8972 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 8973 mode->vdisplay = (vtot & 0xffff) + 1; 8974 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 8975 mode->vsync_start = (vsync & 0xffff) + 1; 8976 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 8977 8978 drm_mode_set_name(mode); 8979 8980 return mode; 8981 } 8982 8983 static void intel_decrease_pllclock(struct drm_crtc *crtc) 8984 { 8985 struct drm_device *dev = crtc->dev; 8986 struct drm_i915_private *dev_priv = dev->dev_private; 8987 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8988 8989 if (!HAS_GMCH_DISPLAY(dev)) 8990 return; 8991 8992 if (!dev_priv->lvds_downclock_avail) 8993 return; 8994 8995 /* 8996 * Since this is called by a timer, we should never get here in 8997 * the manual case. 8998 */ 8999 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 9000 int pipe = intel_crtc->pipe; 9001 int dpll_reg = DPLL(pipe); 9002 int dpll; 9003 9004 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 9005 9006 assert_panel_unlocked(dev_priv, pipe); 9007 9008 dpll = I915_READ(dpll_reg); 9009 dpll |= DISPLAY_RATE_SELECT_FPA1; 9010 I915_WRITE(dpll_reg, dpll); 9011 intel_wait_for_vblank(dev, pipe); 9012 dpll = I915_READ(dpll_reg); 9013 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 9014 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 9015 } 9016 9017 } 9018 9019 void intel_mark_busy(struct drm_device *dev) 9020 { 9021 struct drm_i915_private *dev_priv = dev->dev_private; 9022 9023 if (dev_priv->mm.busy) 9024 return; 9025 9026 intel_runtime_pm_get(dev_priv); 9027 i915_update_gfx_val(dev_priv); 9028 dev_priv->mm.busy = true; 9029 } 9030 9031 void intel_mark_idle(struct drm_device *dev) 9032 { 9033 struct drm_i915_private *dev_priv = dev->dev_private; 9034 struct drm_crtc *crtc; 9035 9036 if (!dev_priv->mm.busy) 9037 return; 9038 9039 dev_priv->mm.busy = false; 9040 9041 if (!i915.powersave) 9042 goto out; 9043 9044 for_each_crtc(dev, crtc) { 9045 if (!crtc->primary->fb) 9046 continue; 9047 9048 intel_decrease_pllclock(crtc); 9049 } 9050 9051 if (INTEL_INFO(dev)->gen >= 6) 9052 gen6_rps_idle(dev->dev_private); 9053 9054 out: 9055 intel_runtime_pm_put(dev_priv); 9056 } 9057 9058 static void intel_crtc_set_state(struct intel_crtc *crtc, 9059 struct intel_crtc_state *crtc_state) 9060 { 9061 kfree(crtc->config); 9062 crtc->config = crtc_state; 9063 crtc->base.state = &crtc_state->base; 9064 } 9065 9066 static void intel_crtc_destroy(struct drm_crtc *crtc) 9067 { 9068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9069 struct drm_device *dev = crtc->dev; 9070 struct intel_unpin_work *work; 9071 9072 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9073 work = intel_crtc->unpin_work; 9074 intel_crtc->unpin_work = NULL; 9075 lockmgr(&dev->event_lock, LK_RELEASE); 9076 9077 if (work) { 9078 cancel_work_sync(&work->work); 9079 kfree(work); 9080 } 9081 9082 intel_crtc_set_state(intel_crtc, NULL); 9083 drm_crtc_cleanup(crtc); 9084 9085 kfree(intel_crtc); 9086 } 9087 9088 static void intel_unpin_work_fn(struct work_struct *__work) 9089 { 9090 struct intel_unpin_work *work = 9091 container_of(__work, struct intel_unpin_work, work); 9092 struct drm_device *dev = work->crtc->dev; 9093 enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; 9094 9095 mutex_lock(&dev->struct_mutex); 9096 intel_unpin_fb_obj(work->old_fb_obj); 9097 drm_gem_object_unreference(&work->pending_flip_obj->base); 9098 drm_gem_object_unreference(&work->old_fb_obj->base); 9099 9100 intel_fbc_update(dev); 9101 9102 if (work->flip_queued_req) 9103 i915_gem_request_assign(&work->flip_queued_req, NULL); 9104 mutex_unlock(&dev->struct_mutex); 9105 9106 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9107 9108 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9109 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9110 9111 kfree(work); 9112 } 9113 9114 static void do_intel_finish_page_flip(struct drm_device *dev, 9115 struct drm_crtc *crtc) 9116 { 9117 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9118 struct intel_unpin_work *work; 9119 9120 /* Ignore early vblank irqs */ 9121 if (intel_crtc == NULL) 9122 return; 9123 9124 /* 9125 * This is called both by irq handlers and the reset code (to complete 9126 * lost pageflips) so needs the full irqsave spinlocks. 9127 */ 9128 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9129 work = intel_crtc->unpin_work; 9130 9131 /* Ensure we don't miss a work->pending update ... */ 9132 smp_rmb(); 9133 9134 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 9135 lockmgr(&dev->event_lock, LK_RELEASE); 9136 return; 9137 } 9138 9139 page_flip_completed(intel_crtc); 9140 9141 lockmgr(&dev->event_lock, LK_RELEASE); 9142 } 9143 9144 void intel_finish_page_flip(struct drm_device *dev, int pipe) 9145 { 9146 struct drm_i915_private *dev_priv = dev->dev_private; 9147 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9148 9149 do_intel_finish_page_flip(dev, crtc); 9150 } 9151 9152 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 9153 { 9154 struct drm_i915_private *dev_priv = dev->dev_private; 9155 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 9156 9157 do_intel_finish_page_flip(dev, crtc); 9158 } 9159 9160 /* Is 'a' after or equal to 'b'? */ 9161 static bool g4x_flip_count_after_eq(u32 a, u32 b) 9162 { 9163 return !((a - b) & 0x80000000); 9164 } 9165 9166 static bool page_flip_finished(struct intel_crtc *crtc) 9167 { 9168 struct drm_device *dev = crtc->base.dev; 9169 struct drm_i915_private *dev_priv = dev->dev_private; 9170 9171 if (i915_reset_in_progress(&dev_priv->gpu_error) || 9172 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 9173 return true; 9174 9175 /* 9176 * The relevant registers doen't exist on pre-ctg. 9177 * As the flip done interrupt doesn't trigger for mmio 9178 * flips on gmch platforms, a flip count check isn't 9179 * really needed there. But since ctg has the registers, 9180 * include it in the check anyway. 9181 */ 9182 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 9183 return true; 9184 9185 /* 9186 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9187 * used the same base address. In that case the mmio flip might 9188 * have completed, but the CS hasn't even executed the flip yet. 9189 * 9190 * A flip count check isn't enough as the CS might have updated 9191 * the base address just after start of vblank, but before we 9192 * managed to process the interrupt. This means we'd complete the 9193 * CS flip too soon. 9194 * 9195 * Combining both checks should get us a good enough result. It may 9196 * still happen that the CS flip has been executed, but has not 9197 * yet actually completed. But in case the base address is the same 9198 * anyway, we don't really care. 9199 */ 9200 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 9201 crtc->unpin_work->gtt_offset && 9202 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 9203 crtc->unpin_work->flip_count); 9204 } 9205 9206 void intel_prepare_page_flip(struct drm_device *dev, int plane) 9207 { 9208 struct drm_i915_private *dev_priv = dev->dev_private; 9209 struct intel_crtc *intel_crtc = 9210 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9211 9212 9213 /* 9214 * This is called both by irq handlers and the reset code (to complete 9215 * lost pageflips) so needs the full irqsave spinlocks. 9216 * 9217 * NB: An MMIO update of the plane base pointer will also 9218 * generate a page-flip completion irq, i.e. every modeset 9219 * is also accompanied by a spurious intel_prepare_page_flip(). 9220 */ 9221 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9222 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 9223 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 9224 lockmgr(&dev->event_lock, LK_RELEASE); 9225 } 9226 9227 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 9228 { 9229 /* Ensure that the work item is consistent when activating it ... */ 9230 smp_wmb(); 9231 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 9232 /* and that it is marked active as soon as the irq could fire. */ 9233 smp_wmb(); 9234 } 9235 9236 static int intel_gen2_queue_flip(struct drm_device *dev, 9237 struct drm_crtc *crtc, 9238 struct drm_framebuffer *fb, 9239 struct drm_i915_gem_object *obj, 9240 struct intel_engine_cs *ring, 9241 uint32_t flags) 9242 { 9243 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9244 u32 flip_mask; 9245 int ret; 9246 9247 ret = intel_ring_begin(ring, 6); 9248 if (ret) 9249 return ret; 9250 9251 /* Can't queue multiple flips, so wait for the previous 9252 * one to finish before executing the next. 9253 */ 9254 if (intel_crtc->plane) 9255 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9256 else 9257 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9258 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9259 intel_ring_emit(ring, MI_NOOP); 9260 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9261 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9262 intel_ring_emit(ring, fb->pitches[0]); 9263 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9264 intel_ring_emit(ring, 0); /* aux display base address, unused */ 9265 9266 intel_mark_page_flip_active(intel_crtc); 9267 __intel_ring_advance(ring); 9268 return 0; 9269 } 9270 9271 static int intel_gen3_queue_flip(struct drm_device *dev, 9272 struct drm_crtc *crtc, 9273 struct drm_framebuffer *fb, 9274 struct drm_i915_gem_object *obj, 9275 struct intel_engine_cs *ring, 9276 uint32_t flags) 9277 { 9278 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9279 u32 flip_mask; 9280 int ret; 9281 9282 ret = intel_ring_begin(ring, 6); 9283 if (ret) 9284 return ret; 9285 9286 if (intel_crtc->plane) 9287 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9288 else 9289 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9290 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9291 intel_ring_emit(ring, MI_NOOP); 9292 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 9293 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9294 intel_ring_emit(ring, fb->pitches[0]); 9295 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9296 intel_ring_emit(ring, MI_NOOP); 9297 9298 intel_mark_page_flip_active(intel_crtc); 9299 __intel_ring_advance(ring); 9300 return 0; 9301 } 9302 9303 static int intel_gen4_queue_flip(struct drm_device *dev, 9304 struct drm_crtc *crtc, 9305 struct drm_framebuffer *fb, 9306 struct drm_i915_gem_object *obj, 9307 struct intel_engine_cs *ring, 9308 uint32_t flags) 9309 { 9310 struct drm_i915_private *dev_priv = dev->dev_private; 9311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9312 uint32_t pf, pipesrc; 9313 int ret; 9314 9315 ret = intel_ring_begin(ring, 4); 9316 if (ret) 9317 return ret; 9318 9319 /* i965+ uses the linear or tiled offsets from the 9320 * Display Registers (which do not change across a page-flip) 9321 * so we need only reprogram the base address. 9322 */ 9323 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9324 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9325 intel_ring_emit(ring, fb->pitches[0]); 9326 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 9327 obj->tiling_mode); 9328 9329 /* XXX Enabling the panel-fitter across page-flip is so far 9330 * untested on non-native modes, so ignore it for now. 9331 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 9332 */ 9333 pf = 0; 9334 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9335 intel_ring_emit(ring, pf | pipesrc); 9336 9337 intel_mark_page_flip_active(intel_crtc); 9338 __intel_ring_advance(ring); 9339 return 0; 9340 } 9341 9342 static int intel_gen6_queue_flip(struct drm_device *dev, 9343 struct drm_crtc *crtc, 9344 struct drm_framebuffer *fb, 9345 struct drm_i915_gem_object *obj, 9346 struct intel_engine_cs *ring, 9347 uint32_t flags) 9348 { 9349 struct drm_i915_private *dev_priv = dev->dev_private; 9350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9351 uint32_t pf, pipesrc; 9352 int ret; 9353 9354 ret = intel_ring_begin(ring, 4); 9355 if (ret) 9356 return ret; 9357 9358 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9359 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9360 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 9361 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9362 9363 /* Contrary to the suggestions in the documentation, 9364 * "Enable Panel Fitter" does not seem to be required when page 9365 * flipping with a non-native mode, and worse causes a normal 9366 * modeset to fail. 9367 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 9368 */ 9369 pf = 0; 9370 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9371 intel_ring_emit(ring, pf | pipesrc); 9372 9373 intel_mark_page_flip_active(intel_crtc); 9374 __intel_ring_advance(ring); 9375 return 0; 9376 } 9377 9378 static int intel_gen7_queue_flip(struct drm_device *dev, 9379 struct drm_crtc *crtc, 9380 struct drm_framebuffer *fb, 9381 struct drm_i915_gem_object *obj, 9382 struct intel_engine_cs *ring, 9383 uint32_t flags) 9384 { 9385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9386 uint32_t plane_bit = 0; 9387 int len, ret; 9388 9389 switch (intel_crtc->plane) { 9390 case PLANE_A: 9391 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 9392 break; 9393 case PLANE_B: 9394 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 9395 break; 9396 case PLANE_C: 9397 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 9398 break; 9399 default: 9400 WARN_ONCE(1, "unknown plane in flip command\n"); 9401 return -ENODEV; 9402 } 9403 9404 len = 4; 9405 if (ring->id == RCS) { 9406 len += 6; 9407 /* 9408 * On Gen 8, SRM is now taking an extra dword to accommodate 9409 * 48bits addresses, and we need a NOOP for the batch size to 9410 * stay even. 9411 */ 9412 if (IS_GEN8(dev)) 9413 len += 2; 9414 } 9415 9416 /* 9417 * BSpec MI_DISPLAY_FLIP for IVB: 9418 * "The full packet must be contained within the same cache line." 9419 * 9420 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 9421 * cacheline, if we ever start emitting more commands before 9422 * the MI_DISPLAY_FLIP we may need to first emit everything else, 9423 * then do the cacheline alignment, and finally emit the 9424 * MI_DISPLAY_FLIP. 9425 */ 9426 ret = intel_ring_cacheline_align(ring); 9427 if (ret) 9428 return ret; 9429 9430 ret = intel_ring_begin(ring, len); 9431 if (ret) 9432 return ret; 9433 9434 /* Unmask the flip-done completion message. Note that the bspec says that 9435 * we should do this for both the BCS and RCS, and that we must not unmask 9436 * more than one flip event at any time (or ensure that one flip message 9437 * can be sent by waiting for flip-done prior to queueing new flips). 9438 * Experimentation says that BCS works despite DERRMR masking all 9439 * flip-done completion events and that unmasking all planes at once 9440 * for the RCS also doesn't appear to drop events. Setting the DERRMR 9441 * to zero does lead to lockups within MI_DISPLAY_FLIP. 9442 */ 9443 if (ring->id == RCS) { 9444 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 9445 intel_ring_emit(ring, DERRMR); 9446 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9447 DERRMR_PIPEB_PRI_FLIP_DONE | 9448 DERRMR_PIPEC_PRI_FLIP_DONE)); 9449 if (IS_GEN8(dev)) 9450 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 9451 MI_SRM_LRM_GLOBAL_GTT); 9452 else 9453 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 9454 MI_SRM_LRM_GLOBAL_GTT); 9455 intel_ring_emit(ring, DERRMR); 9456 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9457 if (IS_GEN8(dev)) { 9458 intel_ring_emit(ring, 0); 9459 intel_ring_emit(ring, MI_NOOP); 9460 } 9461 } 9462 9463 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 9464 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 9465 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9466 intel_ring_emit(ring, (MI_NOOP)); 9467 9468 intel_mark_page_flip_active(intel_crtc); 9469 __intel_ring_advance(ring); 9470 return 0; 9471 } 9472 9473 static bool use_mmio_flip(struct intel_engine_cs *ring, 9474 struct drm_i915_gem_object *obj) 9475 { 9476 /* 9477 * This is not being used for older platforms, because 9478 * non-availability of flip done interrupt forces us to use 9479 * CS flips. Older platforms derive flip done using some clever 9480 * tricks involving the flip_pending status bits and vblank irqs. 9481 * So using MMIO flips there would disrupt this mechanism. 9482 */ 9483 9484 if (ring == NULL) 9485 return true; 9486 9487 if (INTEL_INFO(ring->dev)->gen < 5) 9488 return false; 9489 9490 if (i915.use_mmio_flip < 0) 9491 return false; 9492 else if (i915.use_mmio_flip > 0) 9493 return true; 9494 else if (i915.enable_execlists) 9495 return true; 9496 else 9497 return ring != i915_gem_request_get_ring(obj->last_read_req); 9498 } 9499 9500 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) 9501 { 9502 struct drm_device *dev = intel_crtc->base.dev; 9503 struct drm_i915_private *dev_priv = dev->dev_private; 9504 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 9505 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 9506 struct drm_i915_gem_object *obj = intel_fb->obj; 9507 const enum i915_pipe pipe = intel_crtc->pipe; 9508 u32 ctl, stride; 9509 9510 ctl = I915_READ(PLANE_CTL(pipe, 0)); 9511 ctl &= ~PLANE_CTL_TILED_MASK; 9512 if (obj->tiling_mode == I915_TILING_X) 9513 ctl |= PLANE_CTL_TILED_X; 9514 9515 /* 9516 * The stride is either expressed as a multiple of 64 bytes chunks for 9517 * linear buffers or in number of tiles for tiled buffers. 9518 */ 9519 stride = fb->pitches[0] >> 6; 9520 if (obj->tiling_mode == I915_TILING_X) 9521 stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */ 9522 9523 /* 9524 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 9525 * PLANE_SURF updates, the update is then guaranteed to be atomic. 9526 */ 9527 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 9528 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 9529 9530 I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); 9531 POSTING_READ(PLANE_SURF(pipe, 0)); 9532 } 9533 9534 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) 9535 { 9536 struct drm_device *dev = intel_crtc->base.dev; 9537 struct drm_i915_private *dev_priv = dev->dev_private; 9538 struct intel_framebuffer *intel_fb = 9539 to_intel_framebuffer(intel_crtc->base.primary->fb); 9540 struct drm_i915_gem_object *obj = intel_fb->obj; 9541 u32 dspcntr; 9542 u32 reg; 9543 9544 reg = DSPCNTR(intel_crtc->plane); 9545 dspcntr = I915_READ(reg); 9546 9547 if (obj->tiling_mode != I915_TILING_NONE) 9548 dspcntr |= DISPPLANE_TILED; 9549 else 9550 dspcntr &= ~DISPPLANE_TILED; 9551 9552 I915_WRITE(reg, dspcntr); 9553 9554 I915_WRITE(DSPSURF(intel_crtc->plane), 9555 intel_crtc->unpin_work->gtt_offset); 9556 POSTING_READ(DSPSURF(intel_crtc->plane)); 9557 9558 } 9559 9560 /* 9561 * XXX: This is the temporary way to update the plane registers until we get 9562 * around to using the usual plane update functions for MMIO flips 9563 */ 9564 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 9565 { 9566 struct drm_device *dev = intel_crtc->base.dev; 9567 bool atomic_update; 9568 u32 start_vbl_count; 9569 9570 intel_mark_page_flip_active(intel_crtc); 9571 9572 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 9573 9574 if (INTEL_INFO(dev)->gen >= 9) 9575 skl_do_mmio_flip(intel_crtc); 9576 else 9577 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 9578 ilk_do_mmio_flip(intel_crtc); 9579 9580 if (atomic_update) 9581 intel_pipe_update_end(intel_crtc, start_vbl_count); 9582 } 9583 9584 static void intel_mmio_flip_work_func(struct work_struct *work) 9585 { 9586 struct intel_crtc *crtc = 9587 container_of(work, struct intel_crtc, mmio_flip.work); 9588 struct intel_mmio_flip *mmio_flip; 9589 9590 mmio_flip = &crtc->mmio_flip; 9591 if (mmio_flip->req) 9592 WARN_ON(__i915_wait_request(mmio_flip->req, 9593 crtc->reset_counter, 9594 false, NULL, NULL) != 0); 9595 9596 intel_do_mmio_flip(crtc); 9597 if (mmio_flip->req) { 9598 mutex_lock(&crtc->base.dev->struct_mutex); 9599 i915_gem_request_assign(&mmio_flip->req, NULL); 9600 mutex_unlock(&crtc->base.dev->struct_mutex); 9601 } 9602 } 9603 9604 static int intel_queue_mmio_flip(struct drm_device *dev, 9605 struct drm_crtc *crtc, 9606 struct drm_framebuffer *fb, 9607 struct drm_i915_gem_object *obj, 9608 struct intel_engine_cs *ring, 9609 uint32_t flags) 9610 { 9611 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9612 9613 i915_gem_request_assign(&intel_crtc->mmio_flip.req, 9614 obj->last_write_req); 9615 9616 schedule_work(&intel_crtc->mmio_flip.work); 9617 9618 return 0; 9619 } 9620 9621 static int intel_gen9_queue_flip(struct drm_device *dev, 9622 struct drm_crtc *crtc, 9623 struct drm_framebuffer *fb, 9624 struct drm_i915_gem_object *obj, 9625 struct intel_engine_cs *ring, 9626 uint32_t flags) 9627 { 9628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9629 uint32_t plane = 0, stride; 9630 int ret; 9631 9632 switch(intel_crtc->pipe) { 9633 case PIPE_A: 9634 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A; 9635 break; 9636 case PIPE_B: 9637 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B; 9638 break; 9639 case PIPE_C: 9640 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C; 9641 break; 9642 default: 9643 WARN_ONCE(1, "unknown plane in flip command\n"); 9644 return -ENODEV; 9645 } 9646 9647 switch (obj->tiling_mode) { 9648 case I915_TILING_NONE: 9649 stride = fb->pitches[0] >> 6; 9650 break; 9651 case I915_TILING_X: 9652 stride = fb->pitches[0] >> 9; 9653 break; 9654 default: 9655 WARN_ONCE(1, "unknown tiling in flip command\n"); 9656 return -ENODEV; 9657 } 9658 9659 ret = intel_ring_begin(ring, 10); 9660 if (ret) 9661 return ret; 9662 9663 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 9664 intel_ring_emit(ring, DERRMR); 9665 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9666 DERRMR_PIPEB_PRI_FLIP_DONE | 9667 DERRMR_PIPEC_PRI_FLIP_DONE)); 9668 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 9669 MI_SRM_LRM_GLOBAL_GTT); 9670 intel_ring_emit(ring, DERRMR); 9671 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9672 intel_ring_emit(ring, 0); 9673 9674 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane); 9675 intel_ring_emit(ring, stride << 6 | obj->tiling_mode); 9676 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9677 9678 intel_mark_page_flip_active(intel_crtc); 9679 __intel_ring_advance(ring); 9680 9681 return 0; 9682 } 9683 9684 static int intel_default_queue_flip(struct drm_device *dev, 9685 struct drm_crtc *crtc, 9686 struct drm_framebuffer *fb, 9687 struct drm_i915_gem_object *obj, 9688 struct intel_engine_cs *ring, 9689 uint32_t flags) 9690 { 9691 return -ENODEV; 9692 } 9693 9694 static bool __intel_pageflip_stall_check(struct drm_device *dev, 9695 struct drm_crtc *crtc) 9696 { 9697 struct drm_i915_private *dev_priv = dev->dev_private; 9698 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9699 struct intel_unpin_work *work = intel_crtc->unpin_work; 9700 u32 addr; 9701 9702 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 9703 return true; 9704 9705 if (!work->enable_stall_check) 9706 return false; 9707 9708 if (work->flip_ready_vblank == 0) { 9709 if (work->flip_queued_req && 9710 !i915_gem_request_completed(work->flip_queued_req, true)) 9711 return false; 9712 9713 work->flip_ready_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9714 } 9715 9716 if (drm_vblank_count(dev, intel_crtc->pipe) - work->flip_ready_vblank < 3) 9717 return false; 9718 9719 /* Potential stall - if we see that the flip has happened, 9720 * assume a missed interrupt. */ 9721 if (INTEL_INFO(dev)->gen >= 4) 9722 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 9723 else 9724 addr = I915_READ(DSPADDR(intel_crtc->plane)); 9725 9726 /* There is a potential issue here with a false positive after a flip 9727 * to the same address. We could address this by checking for a 9728 * non-incrementing frame counter. 9729 */ 9730 return addr == work->gtt_offset; 9731 } 9732 9733 void intel_check_page_flip(struct drm_device *dev, int pipe) 9734 { 9735 struct drm_i915_private *dev_priv = dev->dev_private; 9736 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9737 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9738 9739 if (crtc == NULL) 9740 return; 9741 9742 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9743 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9744 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9745 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9746 page_flip_completed(intel_crtc); 9747 } 9748 lockmgr(&dev->event_lock, LK_RELEASE); 9749 } 9750 9751 static int intel_crtc_page_flip(struct drm_crtc *crtc, 9752 struct drm_framebuffer *fb, 9753 struct drm_pending_vblank_event *event, 9754 uint32_t page_flip_flags) 9755 { 9756 struct drm_device *dev = crtc->dev; 9757 struct drm_i915_private *dev_priv = dev->dev_private; 9758 struct drm_framebuffer *old_fb = crtc->primary->fb; 9759 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 9760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9761 struct drm_plane *primary = crtc->primary; 9762 enum i915_pipe pipe = intel_crtc->pipe; 9763 struct intel_unpin_work *work; 9764 struct intel_engine_cs *ring; 9765 int ret; 9766 9767 /* 9768 * drm_mode_page_flip_ioctl() should already catch this, but double 9769 * check to be safe. In the future we may enable pageflipping from 9770 * a disabled primary plane. 9771 */ 9772 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 9773 return -EBUSY; 9774 9775 /* Can't change pixel format via MI display flips. */ 9776 if (fb->pixel_format != crtc->primary->fb->pixel_format) 9777 return -EINVAL; 9778 9779 /* 9780 * TILEOFF/LINOFF registers can't be changed via MI display flips. 9781 * Note that pitch changes could also affect these register. 9782 */ 9783 if (INTEL_INFO(dev)->gen > 3 && 9784 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 9785 fb->pitches[0] != crtc->primary->fb->pitches[0])) 9786 return -EINVAL; 9787 9788 if (i915_terminally_wedged(&dev_priv->gpu_error)) 9789 goto out_hang; 9790 9791 work = kzalloc(sizeof(*work), GFP_KERNEL); 9792 if (work == NULL) 9793 return -ENOMEM; 9794 9795 work->event = event; 9796 work->crtc = crtc; 9797 work->old_fb_obj = intel_fb_obj(old_fb); 9798 INIT_WORK(&work->work, intel_unpin_work_fn); 9799 9800 ret = drm_crtc_vblank_get(crtc); 9801 if (ret) 9802 goto free_work; 9803 9804 /* We borrow the event spin lock for protecting unpin_work */ 9805 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9806 if (intel_crtc->unpin_work) { 9807 /* Before declaring the flip queue wedged, check if 9808 * the hardware completed the operation behind our backs. 9809 */ 9810 if (__intel_pageflip_stall_check(dev, crtc)) { 9811 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 9812 page_flip_completed(intel_crtc); 9813 } else { 9814 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9815 lockmgr(&dev->event_lock, LK_RELEASE); 9816 9817 drm_crtc_vblank_put(crtc); 9818 kfree(work); 9819 return -EBUSY; 9820 } 9821 } 9822 intel_crtc->unpin_work = work; 9823 lockmgr(&dev->event_lock, LK_RELEASE); 9824 9825 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 9826 flush_workqueue(dev_priv->wq); 9827 9828 ret = i915_mutex_lock_interruptible(dev); 9829 if (ret) 9830 goto cleanup; 9831 9832 /* Reference the objects for the scheduled work. */ 9833 drm_gem_object_reference(&work->old_fb_obj->base); 9834 drm_gem_object_reference(&obj->base); 9835 9836 crtc->primary->fb = fb; 9837 update_state_fb(crtc->primary); 9838 9839 work->pending_flip_obj = obj; 9840 9841 atomic_inc(&intel_crtc->unpin_work_count); 9842 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 9843 9844 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 9845 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 9846 9847 if (IS_VALLEYVIEW(dev)) { 9848 ring = &dev_priv->ring[BCS]; 9849 if (obj->tiling_mode != work->old_fb_obj->tiling_mode) 9850 /* vlv: DISPLAY_FLIP fails to change tiling */ 9851 ring = NULL; 9852 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 9853 ring = &dev_priv->ring[BCS]; 9854 } else if (INTEL_INFO(dev)->gen >= 7) { 9855 ring = i915_gem_request_get_ring(obj->last_read_req); 9856 if (ring == NULL || ring->id != RCS) 9857 ring = &dev_priv->ring[BCS]; 9858 } else { 9859 ring = &dev_priv->ring[RCS]; 9860 } 9861 9862 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring); 9863 if (ret) 9864 goto cleanup_pending; 9865 9866 work->gtt_offset = 9867 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; 9868 9869 if (use_mmio_flip(ring, obj)) { 9870 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 9871 page_flip_flags); 9872 if (ret) 9873 goto cleanup_unpin; 9874 9875 i915_gem_request_assign(&work->flip_queued_req, 9876 obj->last_write_req); 9877 } else { 9878 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 9879 page_flip_flags); 9880 if (ret) 9881 goto cleanup_unpin; 9882 9883 i915_gem_request_assign(&work->flip_queued_req, 9884 intel_ring_get_request(ring)); 9885 } 9886 9887 work->flip_queued_vblank = drm_vblank_count(dev, intel_crtc->pipe); 9888 work->enable_stall_check = true; 9889 9890 i915_gem_track_fb(work->old_fb_obj, obj, 9891 INTEL_FRONTBUFFER_PRIMARY(pipe)); 9892 9893 intel_fbc_disable(dev); 9894 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9895 mutex_unlock(&dev->struct_mutex); 9896 9897 trace_i915_flip_request(intel_crtc->plane, obj); 9898 9899 return 0; 9900 9901 cleanup_unpin: 9902 intel_unpin_fb_obj(obj); 9903 cleanup_pending: 9904 atomic_dec(&intel_crtc->unpin_work_count); 9905 crtc->primary->fb = old_fb; 9906 update_state_fb(crtc->primary); 9907 drm_gem_object_unreference(&work->old_fb_obj->base); 9908 drm_gem_object_unreference(&obj->base); 9909 mutex_unlock(&dev->struct_mutex); 9910 9911 cleanup: 9912 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9913 intel_crtc->unpin_work = NULL; 9914 lockmgr(&dev->event_lock, LK_RELEASE); 9915 9916 drm_crtc_vblank_put(crtc); 9917 free_work: 9918 kfree(work); 9919 9920 if (ret == -EIO) { 9921 out_hang: 9922 ret = intel_plane_restore(primary); 9923 if (ret == 0 && event) { 9924 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9925 drm_send_vblank_event(dev, pipe, event); 9926 lockmgr(&dev->event_lock, LK_RELEASE); 9927 } 9928 } 9929 return ret; 9930 } 9931 9932 static struct drm_crtc_helper_funcs intel_helper_funcs = { 9933 .mode_set_base_atomic = intel_pipe_set_base_atomic, 9934 .load_lut = intel_crtc_load_lut, 9935 .atomic_begin = intel_begin_crtc_commit, 9936 .atomic_flush = intel_finish_crtc_commit, 9937 }; 9938 9939 /** 9940 * intel_modeset_update_staged_output_state 9941 * 9942 * Updates the staged output configuration state, e.g. after we've read out the 9943 * current hw state. 9944 */ 9945 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 9946 { 9947 struct intel_crtc *crtc; 9948 struct intel_encoder *encoder; 9949 struct intel_connector *connector; 9950 9951 list_for_each_entry(connector, &dev->mode_config.connector_list, 9952 base.head) { 9953 connector->new_encoder = 9954 to_intel_encoder(connector->base.encoder); 9955 } 9956 9957 for_each_intel_encoder(dev, encoder) { 9958 encoder->new_crtc = 9959 to_intel_crtc(encoder->base.crtc); 9960 } 9961 9962 for_each_intel_crtc(dev, crtc) { 9963 crtc->new_enabled = crtc->base.enabled; 9964 9965 if (crtc->new_enabled) 9966 crtc->new_config = crtc->config; 9967 else 9968 crtc->new_config = NULL; 9969 } 9970 } 9971 9972 /** 9973 * intel_modeset_commit_output_state 9974 * 9975 * This function copies the stage display pipe configuration to the real one. 9976 */ 9977 static void intel_modeset_commit_output_state(struct drm_device *dev) 9978 { 9979 struct intel_crtc *crtc; 9980 struct intel_encoder *encoder; 9981 struct intel_connector *connector; 9982 9983 list_for_each_entry(connector, &dev->mode_config.connector_list, 9984 base.head) { 9985 connector->base.encoder = &connector->new_encoder->base; 9986 } 9987 9988 for_each_intel_encoder(dev, encoder) { 9989 encoder->base.crtc = &encoder->new_crtc->base; 9990 } 9991 9992 for_each_intel_crtc(dev, crtc) { 9993 crtc->base.enabled = crtc->new_enabled; 9994 } 9995 } 9996 9997 static void 9998 connected_sink_compute_bpp(struct intel_connector *connector, 9999 struct intel_crtc_state *pipe_config) 10000 { 10001 int bpp = pipe_config->pipe_bpp; 10002 10003 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 10004 connector->base.base.id, 10005 connector->base.name); 10006 10007 /* Don't use an invalid EDID bpc value */ 10008 if (connector->base.display_info.bpc && 10009 connector->base.display_info.bpc * 3 < bpp) { 10010 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 10011 bpp, connector->base.display_info.bpc*3); 10012 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 10013 } 10014 10015 /* Clamp bpp to 8 on screens without EDID 1.4 */ 10016 if (connector->base.display_info.bpc == 0 && bpp > 24) { 10017 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 10018 bpp); 10019 pipe_config->pipe_bpp = 24; 10020 } 10021 } 10022 10023 static int 10024 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 10025 struct drm_framebuffer *fb, 10026 struct intel_crtc_state *pipe_config) 10027 { 10028 struct drm_device *dev = crtc->base.dev; 10029 struct intel_connector *connector; 10030 int bpp; 10031 10032 switch (fb->pixel_format) { 10033 case DRM_FORMAT_C8: 10034 bpp = 8*3; /* since we go through a colormap */ 10035 break; 10036 case DRM_FORMAT_XRGB1555: 10037 case DRM_FORMAT_ARGB1555: 10038 /* checked in intel_framebuffer_init already */ 10039 if (WARN_ON(INTEL_INFO(dev)->gen > 3)) 10040 return -EINVAL; 10041 case DRM_FORMAT_RGB565: 10042 bpp = 6*3; /* min is 18bpp */ 10043 break; 10044 case DRM_FORMAT_XBGR8888: 10045 case DRM_FORMAT_ABGR8888: 10046 /* checked in intel_framebuffer_init already */ 10047 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10048 return -EINVAL; 10049 case DRM_FORMAT_XRGB8888: 10050 case DRM_FORMAT_ARGB8888: 10051 bpp = 8*3; 10052 break; 10053 case DRM_FORMAT_XRGB2101010: 10054 case DRM_FORMAT_ARGB2101010: 10055 case DRM_FORMAT_XBGR2101010: 10056 case DRM_FORMAT_ABGR2101010: 10057 /* checked in intel_framebuffer_init already */ 10058 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10059 return -EINVAL; 10060 bpp = 10*3; 10061 break; 10062 /* TODO: gen4+ supports 16 bpc floating point, too. */ 10063 default: 10064 DRM_DEBUG_KMS("unsupported depth\n"); 10065 return -EINVAL; 10066 } 10067 10068 pipe_config->pipe_bpp = bpp; 10069 10070 /* Clamp display bpp to EDID value */ 10071 list_for_each_entry(connector, &dev->mode_config.connector_list, 10072 base.head) { 10073 if (!connector->new_encoder || 10074 connector->new_encoder->new_crtc != crtc) 10075 continue; 10076 10077 connected_sink_compute_bpp(connector, pipe_config); 10078 } 10079 10080 return bpp; 10081 } 10082 10083 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 10084 { 10085 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 10086 "type: 0x%x flags: 0x%x\n", 10087 mode->crtc_clock, 10088 mode->crtc_hdisplay, mode->crtc_hsync_start, 10089 mode->crtc_hsync_end, mode->crtc_htotal, 10090 mode->crtc_vdisplay, mode->crtc_vsync_start, 10091 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 10092 } 10093 10094 static void intel_dump_pipe_config(struct intel_crtc *crtc, 10095 struct intel_crtc_state *pipe_config, 10096 const char *context) 10097 { 10098 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, 10099 context, pipe_name(crtc->pipe)); 10100 10101 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 10102 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 10103 pipe_config->pipe_bpp, pipe_config->dither); 10104 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10105 pipe_config->has_pch_encoder, 10106 pipe_config->fdi_lanes, 10107 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 10108 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 10109 pipe_config->fdi_m_n.tu); 10110 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10111 pipe_config->has_dp_encoder, 10112 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 10113 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 10114 pipe_config->dp_m_n.tu); 10115 10116 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 10117 pipe_config->has_dp_encoder, 10118 pipe_config->dp_m2_n2.gmch_m, 10119 pipe_config->dp_m2_n2.gmch_n, 10120 pipe_config->dp_m2_n2.link_m, 10121 pipe_config->dp_m2_n2.link_n, 10122 pipe_config->dp_m2_n2.tu); 10123 10124 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 10125 pipe_config->has_audio, 10126 pipe_config->has_infoframe); 10127 10128 DRM_DEBUG_KMS("requested mode:\n"); 10129 drm_mode_debug_printmodeline(&pipe_config->base.mode); 10130 DRM_DEBUG_KMS("adjusted mode:\n"); 10131 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 10132 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 10133 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 10134 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 10135 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 10136 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 10137 pipe_config->gmch_pfit.control, 10138 pipe_config->gmch_pfit.pgm_ratios, 10139 pipe_config->gmch_pfit.lvds_border_bits); 10140 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 10141 pipe_config->pch_pfit.pos, 10142 pipe_config->pch_pfit.size, 10143 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 10144 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 10145 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 10146 } 10147 10148 static bool encoders_cloneable(const struct intel_encoder *a, 10149 const struct intel_encoder *b) 10150 { 10151 /* masks could be asymmetric, so check both ways */ 10152 return a == b || (a->cloneable & (1 << b->type) && 10153 b->cloneable & (1 << a->type)); 10154 } 10155 10156 static bool check_single_encoder_cloning(struct intel_crtc *crtc, 10157 struct intel_encoder *encoder) 10158 { 10159 struct drm_device *dev = crtc->base.dev; 10160 struct intel_encoder *source_encoder; 10161 10162 for_each_intel_encoder(dev, source_encoder) { 10163 if (source_encoder->new_crtc != crtc) 10164 continue; 10165 10166 if (!encoders_cloneable(encoder, source_encoder)) 10167 return false; 10168 } 10169 10170 return true; 10171 } 10172 10173 static bool check_encoder_cloning(struct intel_crtc *crtc) 10174 { 10175 struct drm_device *dev = crtc->base.dev; 10176 struct intel_encoder *encoder; 10177 10178 for_each_intel_encoder(dev, encoder) { 10179 if (encoder->new_crtc != crtc) 10180 continue; 10181 10182 if (!check_single_encoder_cloning(crtc, encoder)) 10183 return false; 10184 } 10185 10186 return true; 10187 } 10188 10189 static bool check_digital_port_conflicts(struct drm_device *dev) 10190 { 10191 struct intel_connector *connector; 10192 unsigned int used_ports = 0; 10193 10194 /* 10195 * Walk the connector list instead of the encoder 10196 * list to detect the problem on ddi platforms 10197 * where there's just one encoder per digital port. 10198 */ 10199 list_for_each_entry(connector, 10200 &dev->mode_config.connector_list, base.head) { 10201 struct intel_encoder *encoder = connector->new_encoder; 10202 10203 if (!encoder) 10204 continue; 10205 10206 WARN_ON(!encoder->new_crtc); 10207 10208 switch (encoder->type) { 10209 unsigned int port_mask; 10210 case INTEL_OUTPUT_UNKNOWN: 10211 if (WARN_ON(!HAS_DDI(dev))) 10212 break; 10213 case INTEL_OUTPUT_DISPLAYPORT: 10214 case INTEL_OUTPUT_HDMI: 10215 case INTEL_OUTPUT_EDP: 10216 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 10217 10218 /* the same port mustn't appear more than once */ 10219 if (used_ports & port_mask) 10220 return false; 10221 10222 used_ports |= port_mask; 10223 default: 10224 break; 10225 } 10226 } 10227 10228 return true; 10229 } 10230 10231 static struct intel_crtc_state * 10232 intel_modeset_pipe_config(struct drm_crtc *crtc, 10233 struct drm_framebuffer *fb, 10234 struct drm_display_mode *mode) 10235 { 10236 struct drm_device *dev = crtc->dev; 10237 struct intel_encoder *encoder; 10238 struct intel_crtc_state *pipe_config; 10239 int plane_bpp, ret = -EINVAL; 10240 bool retry = true; 10241 10242 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10243 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10244 return ERR_PTR(-EINVAL); 10245 } 10246 10247 if (!check_digital_port_conflicts(dev)) { 10248 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 10249 return ERR_PTR(-EINVAL); 10250 } 10251 10252 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10253 if (!pipe_config) 10254 return ERR_PTR(-ENOMEM); 10255 10256 drm_mode_copy(&pipe_config->base.adjusted_mode, mode); 10257 drm_mode_copy(&pipe_config->base.mode, mode); 10258 10259 pipe_config->cpu_transcoder = 10260 (enum transcoder) to_intel_crtc(crtc)->pipe; 10261 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 10262 10263 /* 10264 * Sanitize sync polarity flags based on requested ones. If neither 10265 * positive or negative polarity is requested, treat this as meaning 10266 * negative polarity. 10267 */ 10268 if (!(pipe_config->base.adjusted_mode.flags & 10269 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10270 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10271 10272 if (!(pipe_config->base.adjusted_mode.flags & 10273 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10274 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10275 10276 /* Compute a starting value for pipe_config->pipe_bpp taking the source 10277 * plane pixel format and any sink constraints into account. Returns the 10278 * source plane bpp so that dithering can be selected on mismatches 10279 * after encoders and crtc also have had their say. */ 10280 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10281 fb, pipe_config); 10282 if (plane_bpp < 0) 10283 goto fail; 10284 10285 /* 10286 * Determine the real pipe dimensions. Note that stereo modes can 10287 * increase the actual pipe size due to the frame doubling and 10288 * insertion of additional space for blanks between the frame. This 10289 * is stored in the crtc timings. We use the requested mode to do this 10290 * computation to clearly distinguish it from the adjusted mode, which 10291 * can be changed by the connectors in the below retry loop. 10292 */ 10293 drm_crtc_get_hv_timing(&pipe_config->base.mode, 10294 &pipe_config->pipe_src_w, 10295 &pipe_config->pipe_src_h); 10296 10297 encoder_retry: 10298 /* Ensure the port clock defaults are reset when retrying. */ 10299 pipe_config->port_clock = 0; 10300 pipe_config->pixel_multiplier = 1; 10301 10302 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10303 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 10304 CRTC_STEREO_DOUBLE); 10305 10306 /* Pass our mode to the connectors and the CRTC to give them a chance to 10307 * adjust it according to limitations or connector properties, and also 10308 * a chance to reject the mode entirely. 10309 */ 10310 for_each_intel_encoder(dev, encoder) { 10311 10312 if (&encoder->new_crtc->base != crtc) 10313 continue; 10314 10315 if (!(encoder->compute_config(encoder, pipe_config))) { 10316 DRM_DEBUG_KMS("Encoder config failure\n"); 10317 goto fail; 10318 } 10319 } 10320 10321 /* Set default port clock if not overwritten by the encoder. Needs to be 10322 * done afterwards in case the encoder adjusts the mode. */ 10323 if (!pipe_config->port_clock) 10324 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 10325 * pipe_config->pixel_multiplier; 10326 10327 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10328 if (ret < 0) { 10329 DRM_DEBUG_KMS("CRTC fixup failed\n"); 10330 goto fail; 10331 } 10332 10333 if (ret == RETRY) { 10334 if (WARN(!retry, "loop in pipe configuration computation\n")) { 10335 ret = -EINVAL; 10336 goto fail; 10337 } 10338 10339 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 10340 retry = false; 10341 goto encoder_retry; 10342 } 10343 10344 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 10345 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 10346 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10347 10348 return pipe_config; 10349 fail: 10350 kfree(pipe_config); 10351 return ERR_PTR(ret); 10352 } 10353 10354 /* Computes which crtcs are affected and sets the relevant bits in the mask. For 10355 * simplicity we use the crtc's pipe number (because it's easier to obtain). */ 10356 static void 10357 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 10358 unsigned *prepare_pipes, unsigned *disable_pipes) 10359 { 10360 struct intel_crtc *intel_crtc; 10361 struct drm_device *dev = crtc->dev; 10362 struct intel_encoder *encoder; 10363 struct intel_connector *connector; 10364 struct drm_crtc *tmp_crtc; 10365 10366 *disable_pipes = *modeset_pipes = *prepare_pipes = 0; 10367 10368 /* Check which crtcs have changed outputs connected to them, these need 10369 * to be part of the prepare_pipes mask. We don't (yet) support global 10370 * modeset across multiple crtcs, so modeset_pipes will only have one 10371 * bit set at most. */ 10372 list_for_each_entry(connector, &dev->mode_config.connector_list, 10373 base.head) { 10374 if (connector->base.encoder == &connector->new_encoder->base) 10375 continue; 10376 10377 if (connector->base.encoder) { 10378 tmp_crtc = connector->base.encoder->crtc; 10379 10380 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10381 } 10382 10383 if (connector->new_encoder) 10384 *prepare_pipes |= 10385 1 << connector->new_encoder->new_crtc->pipe; 10386 } 10387 10388 for_each_intel_encoder(dev, encoder) { 10389 if (encoder->base.crtc == &encoder->new_crtc->base) 10390 continue; 10391 10392 if (encoder->base.crtc) { 10393 tmp_crtc = encoder->base.crtc; 10394 10395 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10396 } 10397 10398 if (encoder->new_crtc) 10399 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 10400 } 10401 10402 /* Check for pipes that will be enabled/disabled ... */ 10403 for_each_intel_crtc(dev, intel_crtc) { 10404 if (intel_crtc->base.enabled == intel_crtc->new_enabled) 10405 continue; 10406 10407 if (!intel_crtc->new_enabled) 10408 *disable_pipes |= 1 << intel_crtc->pipe; 10409 else 10410 *prepare_pipes |= 1 << intel_crtc->pipe; 10411 } 10412 10413 10414 /* set_mode is also used to update properties on life display pipes. */ 10415 intel_crtc = to_intel_crtc(crtc); 10416 if (intel_crtc->new_enabled) 10417 *prepare_pipes |= 1 << intel_crtc->pipe; 10418 10419 /* 10420 * For simplicity do a full modeset on any pipe where the output routing 10421 * changed. We could be more clever, but that would require us to be 10422 * more careful with calling the relevant encoder->mode_set functions. 10423 */ 10424 if (*prepare_pipes) 10425 *modeset_pipes = *prepare_pipes; 10426 10427 /* ... and mask these out. */ 10428 *modeset_pipes &= ~(*disable_pipes); 10429 *prepare_pipes &= ~(*disable_pipes); 10430 10431 /* 10432 * HACK: We don't (yet) fully support global modesets. intel_set_config 10433 * obies this rule, but the modeset restore mode of 10434 * intel_modeset_setup_hw_state does not. 10435 */ 10436 *modeset_pipes &= 1 << intel_crtc->pipe; 10437 *prepare_pipes &= 1 << intel_crtc->pipe; 10438 10439 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", 10440 *modeset_pipes, *prepare_pipes, *disable_pipes); 10441 } 10442 10443 static bool intel_crtc_in_use(struct drm_crtc *crtc) 10444 { 10445 struct drm_encoder *encoder; 10446 struct drm_device *dev = crtc->dev; 10447 10448 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 10449 if (encoder->crtc == crtc) 10450 return true; 10451 10452 return false; 10453 } 10454 10455 static void 10456 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 10457 { 10458 struct drm_i915_private *dev_priv = dev->dev_private; 10459 struct intel_encoder *intel_encoder; 10460 struct intel_crtc *intel_crtc; 10461 struct drm_connector *connector; 10462 10463 intel_shared_dpll_commit(dev_priv); 10464 10465 for_each_intel_encoder(dev, intel_encoder) { 10466 if (!intel_encoder->base.crtc) 10467 continue; 10468 10469 intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 10470 10471 if (prepare_pipes & (1 << intel_crtc->pipe)) 10472 intel_encoder->connectors_active = false; 10473 } 10474 10475 intel_modeset_commit_output_state(dev); 10476 10477 /* Double check state. */ 10478 for_each_intel_crtc(dev, intel_crtc) { 10479 WARN_ON(intel_crtc->base.enabled != intel_crtc_in_use(&intel_crtc->base)); 10480 WARN_ON(intel_crtc->new_config && 10481 intel_crtc->new_config != intel_crtc->config); 10482 WARN_ON(intel_crtc->base.enabled != !!intel_crtc->new_config); 10483 } 10484 10485 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10486 if (!connector->encoder || !connector->encoder->crtc) 10487 continue; 10488 10489 intel_crtc = to_intel_crtc(connector->encoder->crtc); 10490 10491 if (prepare_pipes & (1 << intel_crtc->pipe)) { 10492 struct drm_property *dpms_property = 10493 dev->mode_config.dpms_property; 10494 10495 connector->dpms = DRM_MODE_DPMS_ON; 10496 drm_object_property_set_value(&connector->base, 10497 dpms_property, 10498 DRM_MODE_DPMS_ON); 10499 10500 intel_encoder = to_intel_encoder(connector->encoder); 10501 intel_encoder->connectors_active = true; 10502 } 10503 } 10504 10505 } 10506 10507 static bool intel_fuzzy_clock_check(int clock1, int clock2) 10508 { 10509 int diff; 10510 10511 if (clock1 == clock2) 10512 return true; 10513 10514 if (!clock1 || !clock2) 10515 return false; 10516 10517 diff = abs(clock1 - clock2); 10518 10519 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 10520 return true; 10521 10522 return false; 10523 } 10524 10525 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 10526 list_for_each_entry((intel_crtc), \ 10527 &(dev)->mode_config.crtc_list, \ 10528 base.head) \ 10529 if (mask & (1 <<(intel_crtc)->pipe)) 10530 10531 static bool 10532 intel_pipe_config_compare(struct drm_device *dev, 10533 struct intel_crtc_state *current_config, 10534 struct intel_crtc_state *pipe_config) 10535 { 10536 #define PIPE_CONF_CHECK_X(name) \ 10537 if (current_config->name != pipe_config->name) { \ 10538 DRM_ERROR("mismatch in " #name " " \ 10539 "(expected 0x%08x, found 0x%08x)\n", \ 10540 current_config->name, \ 10541 pipe_config->name); \ 10542 return false; \ 10543 } 10544 10545 #define PIPE_CONF_CHECK_I(name) \ 10546 if (current_config->name != pipe_config->name) { \ 10547 DRM_ERROR("mismatch in " #name " " \ 10548 "(expected %i, found %i)\n", \ 10549 current_config->name, \ 10550 pipe_config->name); \ 10551 return false; \ 10552 } 10553 10554 /* This is required for BDW+ where there is only one set of registers for 10555 * switching between high and low RR. 10556 * This macro can be used whenever a comparison has to be made between one 10557 * hw state and multiple sw state variables. 10558 */ 10559 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 10560 if ((current_config->name != pipe_config->name) && \ 10561 (current_config->alt_name != pipe_config->name)) { \ 10562 DRM_ERROR("mismatch in " #name " " \ 10563 "(expected %i or %i, found %i)\n", \ 10564 current_config->name, \ 10565 current_config->alt_name, \ 10566 pipe_config->name); \ 10567 return false; \ 10568 } 10569 10570 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10571 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10572 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10573 "(expected %i, found %i)\n", \ 10574 current_config->name & (mask), \ 10575 pipe_config->name & (mask)); \ 10576 return false; \ 10577 } 10578 10579 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 10580 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 10581 DRM_ERROR("mismatch in " #name " " \ 10582 "(expected %i, found %i)\n", \ 10583 current_config->name, \ 10584 pipe_config->name); \ 10585 return false; \ 10586 } 10587 10588 #define PIPE_CONF_QUIRK(quirk) \ 10589 ((current_config->quirks | pipe_config->quirks) & (quirk)) 10590 10591 PIPE_CONF_CHECK_I(cpu_transcoder); 10592 10593 PIPE_CONF_CHECK_I(has_pch_encoder); 10594 PIPE_CONF_CHECK_I(fdi_lanes); 10595 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 10596 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); 10597 PIPE_CONF_CHECK_I(fdi_m_n.link_m); 10598 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 10599 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10600 10601 PIPE_CONF_CHECK_I(has_dp_encoder); 10602 10603 if (INTEL_INFO(dev)->gen < 8) { 10604 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 10605 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 10606 PIPE_CONF_CHECK_I(dp_m_n.link_m); 10607 PIPE_CONF_CHECK_I(dp_m_n.link_n); 10608 PIPE_CONF_CHECK_I(dp_m_n.tu); 10609 10610 if (current_config->has_drrs) { 10611 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); 10612 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); 10613 PIPE_CONF_CHECK_I(dp_m2_n2.link_m); 10614 PIPE_CONF_CHECK_I(dp_m2_n2.link_n); 10615 PIPE_CONF_CHECK_I(dp_m2_n2.tu); 10616 } 10617 } else { 10618 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); 10619 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); 10620 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); 10621 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); 10622 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); 10623 } 10624 10625 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 10626 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 10627 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 10628 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 10629 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 10630 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 10631 10632 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 10633 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 10634 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 10635 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 10636 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 10637 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 10638 10639 PIPE_CONF_CHECK_I(pixel_multiplier); 10640 PIPE_CONF_CHECK_I(has_hdmi_sink); 10641 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 10642 IS_VALLEYVIEW(dev)) 10643 PIPE_CONF_CHECK_I(limited_color_range); 10644 PIPE_CONF_CHECK_I(has_infoframe); 10645 10646 PIPE_CONF_CHECK_I(has_audio); 10647 10648 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 10649 DRM_MODE_FLAG_INTERLACE); 10650 10651 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 10652 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 10653 DRM_MODE_FLAG_PHSYNC); 10654 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 10655 DRM_MODE_FLAG_NHSYNC); 10656 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 10657 DRM_MODE_FLAG_PVSYNC); 10658 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 10659 DRM_MODE_FLAG_NVSYNC); 10660 } 10661 10662 PIPE_CONF_CHECK_I(pipe_src_w); 10663 PIPE_CONF_CHECK_I(pipe_src_h); 10664 10665 /* 10666 * FIXME: BIOS likes to set up a cloned config with lvds+external 10667 * screen. Since we don't yet re-compute the pipe config when moving 10668 * just the lvds port away to another pipe the sw tracking won't match. 10669 * 10670 * Proper atomic modesets with recomputed global state will fix this. 10671 * Until then just don't check gmch state for inherited modes. 10672 */ 10673 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 10674 PIPE_CONF_CHECK_I(gmch_pfit.control); 10675 /* pfit ratios are autocomputed by the hw on gen4+ */ 10676 if (INTEL_INFO(dev)->gen < 4) 10677 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 10678 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 10679 } 10680 10681 PIPE_CONF_CHECK_I(pch_pfit.enabled); 10682 if (current_config->pch_pfit.enabled) { 10683 PIPE_CONF_CHECK_I(pch_pfit.pos); 10684 PIPE_CONF_CHECK_I(pch_pfit.size); 10685 } 10686 10687 /* BDW+ don't expose a synchronous way to read the state */ 10688 if (IS_HASWELL(dev)) 10689 PIPE_CONF_CHECK_I(ips_enabled); 10690 10691 PIPE_CONF_CHECK_I(double_wide); 10692 10693 PIPE_CONF_CHECK_X(ddi_pll_sel); 10694 10695 PIPE_CONF_CHECK_I(shared_dpll); 10696 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 10697 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 10698 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10699 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10700 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 10701 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 10702 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 10703 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 10704 10705 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10706 PIPE_CONF_CHECK_I(pipe_bpp); 10707 10708 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 10709 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 10710 10711 #undef PIPE_CONF_CHECK_X 10712 #undef PIPE_CONF_CHECK_I 10713 #undef PIPE_CONF_CHECK_I_ALT 10714 #undef PIPE_CONF_CHECK_FLAGS 10715 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 10716 #undef PIPE_CONF_QUIRK 10717 10718 return true; 10719 } 10720 10721 static void check_wm_state(struct drm_device *dev) 10722 { 10723 struct drm_i915_private *dev_priv = dev->dev_private; 10724 struct skl_ddb_allocation hw_ddb, *sw_ddb; 10725 struct intel_crtc *intel_crtc; 10726 int plane; 10727 10728 if (INTEL_INFO(dev)->gen < 9) 10729 return; 10730 10731 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 10732 sw_ddb = &dev_priv->wm.skl_hw.ddb; 10733 10734 for_each_intel_crtc(dev, intel_crtc) { 10735 struct skl_ddb_entry *hw_entry, *sw_entry; 10736 const enum i915_pipe pipe = intel_crtc->pipe; 10737 10738 if (!intel_crtc->active) 10739 continue; 10740 10741 /* planes */ 10742 for_each_plane(pipe, plane) { 10743 hw_entry = &hw_ddb.plane[pipe][plane]; 10744 sw_entry = &sw_ddb->plane[pipe][plane]; 10745 10746 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 10747 continue; 10748 10749 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 10750 "(expected (%u,%u), found (%u,%u))\n", 10751 pipe_name(pipe), plane + 1, 10752 sw_entry->start, sw_entry->end, 10753 hw_entry->start, hw_entry->end); 10754 } 10755 10756 /* cursor */ 10757 hw_entry = &hw_ddb.cursor[pipe]; 10758 sw_entry = &sw_ddb->cursor[pipe]; 10759 10760 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 10761 continue; 10762 10763 DRM_ERROR("mismatch in DDB state pipe %c cursor " 10764 "(expected (%u,%u), found (%u,%u))\n", 10765 pipe_name(pipe), 10766 sw_entry->start, sw_entry->end, 10767 hw_entry->start, hw_entry->end); 10768 } 10769 } 10770 10771 static void 10772 check_connector_state(struct drm_device *dev) 10773 { 10774 struct intel_connector *connector; 10775 10776 list_for_each_entry(connector, &dev->mode_config.connector_list, 10777 base.head) { 10778 /* This also checks the encoder/connector hw state with the 10779 * ->get_hw_state callbacks. */ 10780 intel_connector_check_state(connector); 10781 10782 I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder, 10783 "connector's staged encoder doesn't match current encoder\n"); 10784 } 10785 } 10786 10787 static void 10788 check_encoder_state(struct drm_device *dev) 10789 { 10790 struct intel_encoder *encoder; 10791 struct intel_connector *connector; 10792 10793 for_each_intel_encoder(dev, encoder) { 10794 bool enabled = false; 10795 bool active = false; 10796 enum i915_pipe pipe, tracked_pipe; 10797 10798 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 10799 encoder->base.base.id, 10800 encoder->base.name); 10801 10802 I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc, 10803 "encoder's stage crtc doesn't match current crtc\n"); 10804 I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, 10805 "encoder's active_connectors set, but no crtc\n"); 10806 10807 list_for_each_entry(connector, &dev->mode_config.connector_list, 10808 base.head) { 10809 if (connector->base.encoder != &encoder->base) 10810 continue; 10811 enabled = true; 10812 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 10813 active = true; 10814 } 10815 /* 10816 * for MST connectors if we unplug the connector is gone 10817 * away but the encoder is still connected to a crtc 10818 * until a modeset happens in response to the hotplug. 10819 */ 10820 if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) 10821 continue; 10822 10823 I915_STATE_WARN(!!encoder->base.crtc != enabled, 10824 "encoder's enabled state mismatch " 10825 "(expected %i, found %i)\n", 10826 !!encoder->base.crtc, enabled); 10827 I915_STATE_WARN(active && !encoder->base.crtc, 10828 "active encoder with no crtc\n"); 10829 10830 I915_STATE_WARN(encoder->connectors_active != active, 10831 "encoder's computed active state doesn't match tracked active state " 10832 "(expected %i, found %i)\n", active, encoder->connectors_active); 10833 10834 active = encoder->get_hw_state(encoder, &pipe); 10835 I915_STATE_WARN(active != encoder->connectors_active, 10836 "encoder's hw state doesn't match sw tracking " 10837 "(expected %i, found %i)\n", 10838 encoder->connectors_active, active); 10839 10840 if (!encoder->base.crtc) 10841 continue; 10842 10843 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 10844 I915_STATE_WARN(active && pipe != tracked_pipe, 10845 "active encoder's pipe doesn't match" 10846 "(expected %i, found %i)\n", 10847 tracked_pipe, pipe); 10848 10849 } 10850 } 10851 10852 static void 10853 check_crtc_state(struct drm_device *dev) 10854 { 10855 struct drm_i915_private *dev_priv = dev->dev_private; 10856 struct intel_crtc *crtc; 10857 struct intel_encoder *encoder; 10858 struct intel_crtc_state pipe_config; 10859 10860 for_each_intel_crtc(dev, crtc) { 10861 bool enabled = false; 10862 bool active = false; 10863 10864 memset(&pipe_config, 0, sizeof(pipe_config)); 10865 10866 DRM_DEBUG_KMS("[CRTC:%d]\n", 10867 crtc->base.base.id); 10868 10869 I915_STATE_WARN(crtc->active && !crtc->base.enabled, 10870 "active crtc, but not enabled in sw tracking\n"); 10871 10872 for_each_intel_encoder(dev, encoder) { 10873 if (encoder->base.crtc != &crtc->base) 10874 continue; 10875 enabled = true; 10876 if (encoder->connectors_active) 10877 active = true; 10878 } 10879 10880 I915_STATE_WARN(active != crtc->active, 10881 "crtc's computed active state doesn't match tracked active state " 10882 "(expected %i, found %i)\n", active, crtc->active); 10883 I915_STATE_WARN(enabled != crtc->base.enabled, 10884 "crtc's computed enabled state doesn't match tracked enabled state " 10885 "(expected %i, found %i)\n", enabled, crtc->base.enabled); 10886 10887 active = dev_priv->display.get_pipe_config(crtc, 10888 &pipe_config); 10889 10890 /* hw state is inconsistent with the pipe quirk */ 10891 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 10892 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 10893 active = crtc->active; 10894 10895 for_each_intel_encoder(dev, encoder) { 10896 enum i915_pipe pipe; 10897 if (encoder->base.crtc != &crtc->base) 10898 continue; 10899 if (encoder->get_hw_state(encoder, &pipe)) 10900 encoder->get_config(encoder, &pipe_config); 10901 } 10902 10903 I915_STATE_WARN(crtc->active != active, 10904 "crtc active state doesn't match with hw state " 10905 "(expected %i, found %i)\n", crtc->active, active); 10906 10907 if (active && 10908 !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) { 10909 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 10910 intel_dump_pipe_config(crtc, &pipe_config, 10911 "[hw state]"); 10912 intel_dump_pipe_config(crtc, crtc->config, 10913 "[sw state]"); 10914 } 10915 } 10916 } 10917 10918 static void 10919 check_shared_dpll_state(struct drm_device *dev) 10920 { 10921 struct drm_i915_private *dev_priv = dev->dev_private; 10922 struct intel_crtc *crtc; 10923 struct intel_dpll_hw_state dpll_hw_state; 10924 int i; 10925 10926 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 10927 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 10928 int enabled_crtcs = 0, active_crtcs = 0; 10929 bool active; 10930 10931 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 10932 10933 DRM_DEBUG_KMS("%s\n", pll->name); 10934 10935 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 10936 10937 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 10938 "more active pll users than references: %i vs %i\n", 10939 pll->active, hweight32(pll->config.crtc_mask)); 10940 I915_STATE_WARN(pll->active && !pll->on, 10941 "pll in active use but not on in sw tracking\n"); 10942 I915_STATE_WARN(pll->on && !pll->active, 10943 "pll in on but not on in use in sw tracking\n"); 10944 I915_STATE_WARN(pll->on != active, 10945 "pll on state mismatch (expected %i, found %i)\n", 10946 pll->on, active); 10947 10948 for_each_intel_crtc(dev, crtc) { 10949 if (crtc->base.enabled && intel_crtc_to_shared_dpll(crtc) == pll) 10950 enabled_crtcs++; 10951 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 10952 active_crtcs++; 10953 } 10954 I915_STATE_WARN(pll->active != active_crtcs, 10955 "pll active crtcs mismatch (expected %i, found %i)\n", 10956 pll->active, active_crtcs); 10957 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 10958 "pll enabled crtcs mismatch (expected %i, found %i)\n", 10959 hweight32(pll->config.crtc_mask), enabled_crtcs); 10960 10961 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 10962 sizeof(dpll_hw_state)), 10963 "pll hw state mismatch\n"); 10964 } 10965 } 10966 10967 void 10968 intel_modeset_check_state(struct drm_device *dev) 10969 { 10970 check_wm_state(dev); 10971 check_connector_state(dev); 10972 check_encoder_state(dev); 10973 check_crtc_state(dev); 10974 check_shared_dpll_state(dev); 10975 } 10976 10977 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 10978 int dotclock) 10979 { 10980 /* 10981 * FDI already provided one idea for the dotclock. 10982 * Yell if the encoder disagrees. 10983 */ 10984 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 10985 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 10986 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 10987 } 10988 10989 static void update_scanline_offset(struct intel_crtc *crtc) 10990 { 10991 struct drm_device *dev = crtc->base.dev; 10992 10993 /* 10994 * The scanline counter increments at the leading edge of hsync. 10995 * 10996 * On most platforms it starts counting from vtotal-1 on the 10997 * first active line. That means the scanline counter value is 10998 * always one less than what we would expect. Ie. just after 10999 * start of vblank, which also occurs at start of hsync (on the 11000 * last active line), the scanline counter will read vblank_start-1. 11001 * 11002 * On gen2 the scanline counter starts counting from 1 instead 11003 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 11004 * to keep the value positive), instead of adding one. 11005 * 11006 * On HSW+ the behaviour of the scanline counter depends on the output 11007 * type. For DP ports it behaves like most other platforms, but on HDMI 11008 * there's an extra 1 line difference. So we need to add two instead of 11009 * one to the value. 11010 */ 11011 if (IS_GEN2(dev)) { 11012 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 11013 int vtotal; 11014 11015 vtotal = mode->crtc_vtotal; 11016 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 11017 vtotal /= 2; 11018 11019 crtc->scanline_offset = vtotal - 1; 11020 } else if (HAS_DDI(dev) && 11021 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 11022 crtc->scanline_offset = 2; 11023 } else 11024 crtc->scanline_offset = 1; 11025 } 11026 11027 static struct intel_crtc_state * 11028 intel_modeset_compute_config(struct drm_crtc *crtc, 11029 struct drm_display_mode *mode, 11030 struct drm_framebuffer *fb, 11031 unsigned *modeset_pipes, 11032 unsigned *prepare_pipes, 11033 unsigned *disable_pipes) 11034 { 11035 struct intel_crtc_state *pipe_config = NULL; 11036 11037 intel_modeset_affected_pipes(crtc, modeset_pipes, 11038 prepare_pipes, disable_pipes); 11039 11040 if ((*modeset_pipes) == 0) 11041 goto out; 11042 11043 /* 11044 * Note this needs changes when we start tracking multiple modes 11045 * and crtcs. At that point we'll need to compute the whole config 11046 * (i.e. one pipe_config for each crtc) rather than just the one 11047 * for this crtc. 11048 */ 11049 pipe_config = intel_modeset_pipe_config(crtc, fb, mode); 11050 if (IS_ERR(pipe_config)) { 11051 goto out; 11052 } 11053 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 11054 "[modeset]"); 11055 11056 out: 11057 return pipe_config; 11058 } 11059 11060 static int __intel_set_mode_setup_plls(struct drm_device *dev, 11061 unsigned modeset_pipes, 11062 unsigned disable_pipes) 11063 { 11064 struct drm_i915_private *dev_priv = to_i915(dev); 11065 unsigned clear_pipes = modeset_pipes | disable_pipes; 11066 struct intel_crtc *intel_crtc; 11067 int ret = 0; 11068 11069 if (!dev_priv->display.crtc_compute_clock) 11070 return 0; 11071 11072 ret = intel_shared_dpll_start_config(dev_priv, clear_pipes); 11073 if (ret) 11074 goto done; 11075 11076 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 11077 struct intel_crtc_state *state = intel_crtc->new_config; 11078 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11079 state); 11080 if (ret) { 11081 intel_shared_dpll_abort_config(dev_priv); 11082 goto done; 11083 } 11084 } 11085 11086 done: 11087 return ret; 11088 } 11089 11090 static int __intel_set_mode(struct drm_crtc *crtc, 11091 struct drm_display_mode *mode, 11092 int x, int y, struct drm_framebuffer *fb, 11093 struct intel_crtc_state *pipe_config, 11094 unsigned modeset_pipes, 11095 unsigned prepare_pipes, 11096 unsigned disable_pipes) 11097 { 11098 struct drm_device *dev = crtc->dev; 11099 struct drm_i915_private *dev_priv = dev->dev_private; 11100 struct drm_display_mode *saved_mode; 11101 struct intel_crtc *intel_crtc; 11102 int ret = 0; 11103 11104 saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK); 11105 if (!saved_mode) 11106 return -ENOMEM; 11107 11108 *saved_mode = crtc->mode; 11109 11110 if (modeset_pipes) 11111 to_intel_crtc(crtc)->new_config = pipe_config; 11112 11113 /* 11114 * See if the config requires any additional preparation, e.g. 11115 * to adjust global state with pipes off. We need to do this 11116 * here so we can get the modeset_pipe updated config for the new 11117 * mode set on this crtc. For other crtcs we need to use the 11118 * adjusted_mode bits in the crtc directly. 11119 */ 11120 if (IS_VALLEYVIEW(dev)) { 11121 valleyview_modeset_global_pipes(dev, &prepare_pipes); 11122 11123 /* may have added more to prepare_pipes than we should */ 11124 prepare_pipes &= ~disable_pipes; 11125 } 11126 11127 ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes); 11128 if (ret) 11129 goto done; 11130 11131 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 11132 intel_crtc_disable(&intel_crtc->base); 11133 11134 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11135 if (intel_crtc->base.enabled) 11136 dev_priv->display.crtc_disable(&intel_crtc->base); 11137 } 11138 11139 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 11140 * to set it here already despite that we pass it down the callchain. 11141 * 11142 * Note we'll need to fix this up when we start tracking multiple 11143 * pipes; here we assume a single modeset_pipe and only track the 11144 * single crtc and mode. 11145 */ 11146 if (modeset_pipes) { 11147 crtc->mode = *mode; 11148 /* mode_set/enable/disable functions rely on a correct pipe 11149 * config. */ 11150 intel_crtc_set_state(to_intel_crtc(crtc), pipe_config); 11151 11152 /* 11153 * Calculate and store various constants which 11154 * are later needed by vblank and swap-completion 11155 * timestamping. They are derived from true hwmode. 11156 */ 11157 drm_calc_timestamping_constants(crtc, 11158 &pipe_config->base.adjusted_mode); 11159 } 11160 11161 /* Only after disabling all output pipelines that will be changed can we 11162 * update the the output configuration. */ 11163 intel_modeset_update_state(dev, prepare_pipes); 11164 11165 modeset_update_crtc_power_domains(dev); 11166 11167 /* Set up the DPLL and any encoders state that needs to adjust or depend 11168 * on the DPLL. 11169 */ 11170 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 11171 struct drm_plane *primary = intel_crtc->base.primary; 11172 int vdisplay, hdisplay; 11173 11174 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 11175 ret = primary->funcs->update_plane(primary, &intel_crtc->base, 11176 fb, 0, 0, 11177 hdisplay, vdisplay, 11178 x << 16, y << 16, 11179 hdisplay << 16, vdisplay << 16); 11180 } 11181 11182 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 11183 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11184 update_scanline_offset(intel_crtc); 11185 11186 dev_priv->display.crtc_enable(&intel_crtc->base); 11187 } 11188 11189 /* FIXME: add subpixel order */ 11190 done: 11191 if (ret && crtc->enabled) 11192 crtc->mode = *saved_mode; 11193 11194 kfree(saved_mode); 11195 return ret; 11196 } 11197 11198 static int intel_set_mode_pipes(struct drm_crtc *crtc, 11199 struct drm_display_mode *mode, 11200 int x, int y, struct drm_framebuffer *fb, 11201 struct intel_crtc_state *pipe_config, 11202 unsigned modeset_pipes, 11203 unsigned prepare_pipes, 11204 unsigned disable_pipes) 11205 { 11206 int ret; 11207 11208 ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes, 11209 prepare_pipes, disable_pipes); 11210 11211 if (ret == 0) 11212 intel_modeset_check_state(crtc->dev); 11213 11214 return ret; 11215 } 11216 11217 static int intel_set_mode(struct drm_crtc *crtc, 11218 struct drm_display_mode *mode, 11219 int x, int y, struct drm_framebuffer *fb) 11220 { 11221 struct intel_crtc_state *pipe_config; 11222 unsigned modeset_pipes, prepare_pipes, disable_pipes; 11223 11224 pipe_config = intel_modeset_compute_config(crtc, mode, fb, 11225 &modeset_pipes, 11226 &prepare_pipes, 11227 &disable_pipes); 11228 11229 if (IS_ERR(pipe_config)) 11230 return PTR_ERR(pipe_config); 11231 11232 return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, 11233 modeset_pipes, prepare_pipes, 11234 disable_pipes); 11235 } 11236 11237 void intel_crtc_restore_mode(struct drm_crtc *crtc) 11238 { 11239 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 11240 } 11241 11242 #undef for_each_intel_crtc_masked 11243 11244 static void intel_set_config_free(struct intel_set_config *config) 11245 { 11246 if (!config) 11247 return; 11248 11249 kfree(config->save_connector_encoders); 11250 kfree(config->save_encoder_crtcs); 11251 kfree(config->save_crtc_enabled); 11252 kfree(config); 11253 } 11254 11255 static int intel_set_config_save_state(struct drm_device *dev, 11256 struct intel_set_config *config) 11257 { 11258 struct drm_crtc *crtc; 11259 struct drm_encoder *encoder; 11260 struct drm_connector *connector; 11261 int count; 11262 11263 config->save_crtc_enabled = 11264 kcalloc(dev->mode_config.num_crtc, 11265 sizeof(bool), GFP_KERNEL); 11266 if (!config->save_crtc_enabled) 11267 return -ENOMEM; 11268 11269 config->save_encoder_crtcs = 11270 kcalloc(dev->mode_config.num_encoder, 11271 sizeof(struct drm_crtc *), GFP_KERNEL); 11272 if (!config->save_encoder_crtcs) 11273 return -ENOMEM; 11274 11275 config->save_connector_encoders = 11276 kcalloc(dev->mode_config.num_connector, 11277 sizeof(struct drm_encoder *), GFP_KERNEL); 11278 if (!config->save_connector_encoders) 11279 return -ENOMEM; 11280 11281 /* Copy data. Note that driver private data is not affected. 11282 * Should anything bad happen only the expected state is 11283 * restored, not the drivers personal bookkeeping. 11284 */ 11285 count = 0; 11286 for_each_crtc(dev, crtc) { 11287 config->save_crtc_enabled[count++] = crtc->enabled; 11288 } 11289 11290 count = 0; 11291 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 11292 config->save_encoder_crtcs[count++] = encoder->crtc; 11293 } 11294 11295 count = 0; 11296 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11297 config->save_connector_encoders[count++] = connector->encoder; 11298 } 11299 11300 return 0; 11301 } 11302 11303 static void intel_set_config_restore_state(struct drm_device *dev, 11304 struct intel_set_config *config) 11305 { 11306 struct intel_crtc *crtc; 11307 struct intel_encoder *encoder; 11308 struct intel_connector *connector; 11309 int count; 11310 11311 count = 0; 11312 for_each_intel_crtc(dev, crtc) { 11313 crtc->new_enabled = config->save_crtc_enabled[count++]; 11314 11315 if (crtc->new_enabled) 11316 crtc->new_config = crtc->config; 11317 else 11318 crtc->new_config = NULL; 11319 } 11320 11321 count = 0; 11322 for_each_intel_encoder(dev, encoder) { 11323 encoder->new_crtc = 11324 to_intel_crtc(config->save_encoder_crtcs[count++]); 11325 } 11326 11327 count = 0; 11328 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 11329 connector->new_encoder = 11330 to_intel_encoder(config->save_connector_encoders[count++]); 11331 } 11332 } 11333 11334 static bool 11335 is_crtc_connector_off(struct drm_mode_set *set) 11336 { 11337 int i; 11338 11339 if (set->num_connectors == 0) 11340 return false; 11341 11342 if (WARN_ON(set->connectors == NULL)) 11343 return false; 11344 11345 for (i = 0; i < set->num_connectors; i++) 11346 if (set->connectors[i]->encoder && 11347 set->connectors[i]->encoder->crtc == set->crtc && 11348 set->connectors[i]->dpms != DRM_MODE_DPMS_ON) 11349 return true; 11350 11351 return false; 11352 } 11353 11354 static void 11355 intel_set_config_compute_mode_changes(struct drm_mode_set *set, 11356 struct intel_set_config *config) 11357 { 11358 11359 /* We should be able to check here if the fb has the same properties 11360 * and then just flip_or_move it */ 11361 if (is_crtc_connector_off(set)) { 11362 config->mode_changed = true; 11363 } else if (set->crtc->primary->fb != set->fb) { 11364 /* 11365 * If we have no fb, we can only flip as long as the crtc is 11366 * active, otherwise we need a full mode set. The crtc may 11367 * be active if we've only disabled the primary plane, or 11368 * in fastboot situations. 11369 */ 11370 if (set->crtc->primary->fb == NULL) { 11371 struct intel_crtc *intel_crtc = 11372 to_intel_crtc(set->crtc); 11373 11374 if (intel_crtc->active) { 11375 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 11376 config->fb_changed = true; 11377 } else { 11378 DRM_DEBUG_KMS("inactive crtc, full mode set\n"); 11379 config->mode_changed = true; 11380 } 11381 } else if (set->fb == NULL) { 11382 config->mode_changed = true; 11383 } else if (set->fb->pixel_format != 11384 set->crtc->primary->fb->pixel_format) { 11385 config->mode_changed = true; 11386 } else { 11387 config->fb_changed = true; 11388 } 11389 } 11390 11391 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) 11392 config->fb_changed = true; 11393 11394 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 11395 DRM_DEBUG_KMS("modes are different, full mode set\n"); 11396 drm_mode_debug_printmodeline(&set->crtc->mode); 11397 drm_mode_debug_printmodeline(set->mode); 11398 config->mode_changed = true; 11399 } 11400 11401 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", 11402 set->crtc->base.id, config->mode_changed, config->fb_changed); 11403 } 11404 11405 static int 11406 intel_modeset_stage_output_state(struct drm_device *dev, 11407 struct drm_mode_set *set, 11408 struct intel_set_config *config) 11409 { 11410 struct intel_connector *connector; 11411 struct intel_encoder *encoder; 11412 struct intel_crtc *crtc; 11413 int ro; 11414 11415 /* The upper layers ensure that we either disable a crtc or have a list 11416 * of connectors. For paranoia, double-check this. */ 11417 WARN_ON(!set->fb && (set->num_connectors != 0)); 11418 WARN_ON(set->fb && (set->num_connectors == 0)); 11419 11420 list_for_each_entry(connector, &dev->mode_config.connector_list, 11421 base.head) { 11422 /* Otherwise traverse passed in connector list and get encoders 11423 * for them. */ 11424 for (ro = 0; ro < set->num_connectors; ro++) { 11425 if (set->connectors[ro] == &connector->base) { 11426 connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe); 11427 break; 11428 } 11429 } 11430 11431 /* If we disable the crtc, disable all its connectors. Also, if 11432 * the connector is on the changing crtc but not on the new 11433 * connector list, disable it. */ 11434 if ((!set->fb || ro == set->num_connectors) && 11435 connector->base.encoder && 11436 connector->base.encoder->crtc == set->crtc) { 11437 connector->new_encoder = NULL; 11438 11439 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 11440 connector->base.base.id, 11441 connector->base.name); 11442 } 11443 11444 11445 if (&connector->new_encoder->base != connector->base.encoder) { 11446 DRM_DEBUG_KMS("encoder changed, full mode switch\n"); 11447 config->mode_changed = true; 11448 } 11449 } 11450 /* connector->new_encoder is now updated for all connectors. */ 11451 11452 /* Update crtc of enabled connectors. */ 11453 list_for_each_entry(connector, &dev->mode_config.connector_list, 11454 base.head) { 11455 struct drm_crtc *new_crtc; 11456 11457 if (!connector->new_encoder) 11458 continue; 11459 11460 new_crtc = connector->new_encoder->base.crtc; 11461 11462 for (ro = 0; ro < set->num_connectors; ro++) { 11463 if (set->connectors[ro] == &connector->base) 11464 new_crtc = set->crtc; 11465 } 11466 11467 /* Make sure the new CRTC will work with the encoder */ 11468 if (!drm_encoder_crtc_ok(&connector->new_encoder->base, 11469 new_crtc)) { 11470 return -EINVAL; 11471 } 11472 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); 11473 11474 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11475 connector->base.base.id, 11476 connector->base.name, 11477 new_crtc->base.id); 11478 } 11479 11480 /* Check for any encoders that needs to be disabled. */ 11481 for_each_intel_encoder(dev, encoder) { 11482 int num_connectors = 0; 11483 list_for_each_entry(connector, 11484 &dev->mode_config.connector_list, 11485 base.head) { 11486 if (connector->new_encoder == encoder) { 11487 WARN_ON(!connector->new_encoder->new_crtc); 11488 num_connectors++; 11489 } 11490 } 11491 11492 if (num_connectors == 0) 11493 encoder->new_crtc = NULL; 11494 else if (num_connectors > 1) 11495 return -EINVAL; 11496 11497 /* Only now check for crtc changes so we don't miss encoders 11498 * that will be disabled. */ 11499 if (&encoder->new_crtc->base != encoder->base.crtc) { 11500 DRM_DEBUG_KMS("crtc changed, full mode switch\n"); 11501 config->mode_changed = true; 11502 } 11503 } 11504 /* Now we've also updated encoder->new_crtc for all encoders. */ 11505 list_for_each_entry(connector, &dev->mode_config.connector_list, 11506 base.head) { 11507 if (connector->new_encoder) 11508 if (connector->new_encoder != connector->encoder) 11509 connector->encoder = connector->new_encoder; 11510 } 11511 for_each_intel_crtc(dev, crtc) { 11512 crtc->new_enabled = false; 11513 11514 for_each_intel_encoder(dev, encoder) { 11515 if (encoder->new_crtc == crtc) { 11516 crtc->new_enabled = true; 11517 break; 11518 } 11519 } 11520 11521 if (crtc->new_enabled != crtc->base.enabled) { 11522 DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", 11523 crtc->new_enabled ? "en" : "dis"); 11524 config->mode_changed = true; 11525 } 11526 11527 if (crtc->new_enabled) 11528 crtc->new_config = crtc->config; 11529 else 11530 crtc->new_config = NULL; 11531 } 11532 11533 return 0; 11534 } 11535 11536 static void disable_crtc_nofb(struct intel_crtc *crtc) 11537 { 11538 struct drm_device *dev = crtc->base.dev; 11539 struct intel_encoder *encoder; 11540 struct intel_connector *connector; 11541 11542 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", 11543 pipe_name(crtc->pipe)); 11544 11545 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { 11546 if (connector->new_encoder && 11547 connector->new_encoder->new_crtc == crtc) 11548 connector->new_encoder = NULL; 11549 } 11550 11551 for_each_intel_encoder(dev, encoder) { 11552 if (encoder->new_crtc == crtc) 11553 encoder->new_crtc = NULL; 11554 } 11555 11556 crtc->new_enabled = false; 11557 crtc->new_config = NULL; 11558 } 11559 11560 static int intel_crtc_set_config(struct drm_mode_set *set) 11561 { 11562 struct drm_device *dev; 11563 struct drm_mode_set save_set; 11564 struct intel_set_config *config; 11565 struct intel_crtc_state *pipe_config; 11566 unsigned modeset_pipes, prepare_pipes, disable_pipes; 11567 int ret; 11568 11569 BUG_ON(!set); 11570 BUG_ON(!set->crtc); 11571 BUG_ON(!set->crtc->helper_private); 11572 11573 /* Enforce sane interface api - has been abused by the fb helper. */ 11574 BUG_ON(!set->mode && set->fb); 11575 BUG_ON(set->fb && set->num_connectors == 0); 11576 11577 if (set->fb) { 11578 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 11579 set->crtc->base.id, set->fb->base.id, 11580 (int)set->num_connectors, set->x, set->y); 11581 } else { 11582 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 11583 } 11584 11585 dev = set->crtc->dev; 11586 11587 ret = -ENOMEM; 11588 config = kzalloc(sizeof(*config), GFP_KERNEL); 11589 if (!config) 11590 goto out_config; 11591 11592 ret = intel_set_config_save_state(dev, config); 11593 if (ret) 11594 goto out_config; 11595 11596 save_set.crtc = set->crtc; 11597 save_set.mode = &set->crtc->mode; 11598 save_set.x = set->crtc->x; 11599 save_set.y = set->crtc->y; 11600 save_set.fb = set->crtc->primary->fb; 11601 11602 /* Compute whether we need a full modeset, only an fb base update or no 11603 * change at all. In the future we might also check whether only the 11604 * mode changed, e.g. for LVDS where we only change the panel fitter in 11605 * such cases. */ 11606 intel_set_config_compute_mode_changes(set, config); 11607 11608 ret = intel_modeset_stage_output_state(dev, set, config); 11609 if (ret) 11610 goto fail; 11611 11612 pipe_config = intel_modeset_compute_config(set->crtc, set->mode, 11613 set->fb, 11614 &modeset_pipes, 11615 &prepare_pipes, 11616 &disable_pipes); 11617 if (IS_ERR(pipe_config)) { 11618 ret = PTR_ERR(pipe_config); 11619 goto fail; 11620 } else if (pipe_config) { 11621 if (pipe_config->has_audio != 11622 to_intel_crtc(set->crtc)->config->has_audio) 11623 config->mode_changed = true; 11624 11625 /* 11626 * Note we have an issue here with infoframes: current code 11627 * only updates them on the full mode set path per hw 11628 * requirements. So here we should be checking for any 11629 * required changes and forcing a mode set. 11630 */ 11631 } 11632 11633 /* set_mode will free it in the mode_changed case */ 11634 if (!config->mode_changed) 11635 kfree(pipe_config); 11636 11637 intel_update_pipe_size(to_intel_crtc(set->crtc)); 11638 11639 if (config->mode_changed) { 11640 ret = intel_set_mode_pipes(set->crtc, set->mode, 11641 set->x, set->y, set->fb, pipe_config, 11642 modeset_pipes, prepare_pipes, 11643 disable_pipes); 11644 } else if (config->fb_changed) { 11645 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11646 struct drm_plane *primary = set->crtc->primary; 11647 int vdisplay, hdisplay; 11648 11649 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); 11650 ret = primary->funcs->update_plane(primary, set->crtc, set->fb, 11651 0, 0, hdisplay, vdisplay, 11652 set->x << 16, set->y << 16, 11653 hdisplay << 16, vdisplay << 16); 11654 11655 /* 11656 * We need to make sure the primary plane is re-enabled if it 11657 * has previously been turned off. 11658 */ 11659 if (!intel_crtc->primary_enabled && ret == 0) { 11660 WARN_ON(!intel_crtc->active); 11661 intel_enable_primary_hw_plane(set->crtc->primary, set->crtc); 11662 } 11663 11664 /* 11665 * In the fastboot case this may be our only check of the 11666 * state after boot. It would be better to only do it on 11667 * the first update, but we don't have a nice way of doing that 11668 * (and really, set_config isn't used much for high freq page 11669 * flipping, so increasing its cost here shouldn't be a big 11670 * deal). 11671 */ 11672 if (i915.fastboot && ret == 0) 11673 intel_modeset_check_state(set->crtc->dev); 11674 } 11675 11676 if (ret) { 11677 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 11678 set->crtc->base.id, ret); 11679 fail: 11680 intel_set_config_restore_state(dev, config); 11681 11682 /* 11683 * HACK: if the pipe was on, but we didn't have a framebuffer, 11684 * force the pipe off to avoid oopsing in the modeset code 11685 * due to fb==NULL. This should only happen during boot since 11686 * we don't yet reconstruct the FB from the hardware state. 11687 */ 11688 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) 11689 disable_crtc_nofb(to_intel_crtc(save_set.crtc)); 11690 11691 /* Try to restore the config */ 11692 if (config->mode_changed && 11693 intel_set_mode(save_set.crtc, save_set.mode, 11694 save_set.x, save_set.y, save_set.fb)) 11695 DRM_ERROR("failed to restore config after modeset failure\n"); 11696 } 11697 11698 out_config: 11699 intel_set_config_free(config); 11700 return ret; 11701 } 11702 11703 static const struct drm_crtc_funcs intel_crtc_funcs = { 11704 .gamma_set = intel_crtc_gamma_set, 11705 .set_config = intel_crtc_set_config, 11706 .destroy = intel_crtc_destroy, 11707 .page_flip = intel_crtc_page_flip, 11708 .atomic_duplicate_state = intel_crtc_duplicate_state, 11709 .atomic_destroy_state = intel_crtc_destroy_state, 11710 }; 11711 11712 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 11713 struct intel_shared_dpll *pll, 11714 struct intel_dpll_hw_state *hw_state) 11715 { 11716 uint32_t val; 11717 11718 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 11719 return false; 11720 11721 val = I915_READ(PCH_DPLL(pll->id)); 11722 hw_state->dpll = val; 11723 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 11724 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 11725 11726 return val & DPLL_VCO_ENABLE; 11727 } 11728 11729 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 11730 struct intel_shared_dpll *pll) 11731 { 11732 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 11733 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 11734 } 11735 11736 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 11737 struct intel_shared_dpll *pll) 11738 { 11739 /* PCH refclock must be enabled first */ 11740 ibx_assert_pch_refclk_enabled(dev_priv); 11741 11742 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 11743 11744 /* Wait for the clocks to stabilize. */ 11745 POSTING_READ(PCH_DPLL(pll->id)); 11746 udelay(150); 11747 11748 /* The pixel multiplier can only be updated once the 11749 * DPLL is enabled and the clocks are stable. 11750 * 11751 * So write it again. 11752 */ 11753 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 11754 POSTING_READ(PCH_DPLL(pll->id)); 11755 udelay(200); 11756 } 11757 11758 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 11759 struct intel_shared_dpll *pll) 11760 { 11761 struct drm_device *dev = dev_priv->dev; 11762 struct intel_crtc *crtc; 11763 11764 /* Make sure no transcoder isn't still depending on us. */ 11765 for_each_intel_crtc(dev, crtc) { 11766 if (intel_crtc_to_shared_dpll(crtc) == pll) 11767 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 11768 } 11769 11770 I915_WRITE(PCH_DPLL(pll->id), 0); 11771 POSTING_READ(PCH_DPLL(pll->id)); 11772 udelay(200); 11773 } 11774 11775 static char *ibx_pch_dpll_names[] = { 11776 "PCH DPLL A", 11777 "PCH DPLL B", 11778 }; 11779 11780 static void ibx_pch_dpll_init(struct drm_device *dev) 11781 { 11782 struct drm_i915_private *dev_priv = dev->dev_private; 11783 int i; 11784 11785 dev_priv->num_shared_dpll = 2; 11786 11787 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 11788 dev_priv->shared_dplls[i].id = i; 11789 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 11790 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 11791 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 11792 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 11793 dev_priv->shared_dplls[i].get_hw_state = 11794 ibx_pch_dpll_get_hw_state; 11795 } 11796 } 11797 11798 static void intel_shared_dpll_init(struct drm_device *dev) 11799 { 11800 struct drm_i915_private *dev_priv = dev->dev_private; 11801 11802 if (HAS_DDI(dev)) 11803 intel_ddi_pll_init(dev); 11804 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 11805 ibx_pch_dpll_init(dev); 11806 else 11807 dev_priv->num_shared_dpll = 0; 11808 11809 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 11810 } 11811 11812 /** 11813 * intel_prepare_plane_fb - Prepare fb for usage on plane 11814 * @plane: drm plane to prepare for 11815 * @fb: framebuffer to prepare for presentation 11816 * 11817 * Prepares a framebuffer for usage on a display plane. Generally this 11818 * involves pinning the underlying object and updating the frontbuffer tracking 11819 * bits. Some older platforms need special physical address handling for 11820 * cursor planes. 11821 * 11822 * Returns 0 on success, negative error code on failure. 11823 */ 11824 int 11825 intel_prepare_plane_fb(struct drm_plane *plane, 11826 struct drm_framebuffer *fb) 11827 { 11828 struct drm_device *dev = plane->dev; 11829 struct intel_plane *intel_plane = to_intel_plane(plane); 11830 enum i915_pipe pipe = intel_plane->pipe; 11831 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11832 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11833 unsigned frontbuffer_bits = 0; 11834 int ret = 0; 11835 11836 if (!obj) 11837 return 0; 11838 11839 switch (plane->type) { 11840 case DRM_PLANE_TYPE_PRIMARY: 11841 frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe); 11842 break; 11843 case DRM_PLANE_TYPE_CURSOR: 11844 frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe); 11845 break; 11846 case DRM_PLANE_TYPE_OVERLAY: 11847 frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe); 11848 break; 11849 } 11850 11851 mutex_lock(&dev->struct_mutex); 11852 11853 if (plane->type == DRM_PLANE_TYPE_CURSOR && 11854 INTEL_INFO(dev)->cursor_needs_physical) { 11855 int align = IS_I830(dev) ? 16 * 1024 : 256; 11856 ret = i915_gem_object_attach_phys(obj, align); 11857 if (ret) 11858 DRM_DEBUG_KMS("failed to attach phys object\n"); 11859 } else { 11860 ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); 11861 } 11862 11863 if (ret == 0) 11864 i915_gem_track_fb(old_obj, obj, frontbuffer_bits); 11865 11866 mutex_unlock(&dev->struct_mutex); 11867 11868 return ret; 11869 } 11870 11871 /** 11872 * intel_cleanup_plane_fb - Cleans up an fb after plane use 11873 * @plane: drm plane to clean up for 11874 * @fb: old framebuffer that was on plane 11875 * 11876 * Cleans up a framebuffer that has just been removed from a plane. 11877 */ 11878 void 11879 intel_cleanup_plane_fb(struct drm_plane *plane, 11880 struct drm_framebuffer *fb) 11881 { 11882 struct drm_device *dev = plane->dev; 11883 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11884 11885 if (WARN_ON(!obj)) 11886 return; 11887 11888 if (plane->type != DRM_PLANE_TYPE_CURSOR || 11889 !INTEL_INFO(dev)->cursor_needs_physical) { 11890 mutex_lock(&dev->struct_mutex); 11891 intel_unpin_fb_obj(obj); 11892 mutex_unlock(&dev->struct_mutex); 11893 } 11894 } 11895 11896 static int 11897 intel_check_primary_plane(struct drm_plane *plane, 11898 struct intel_plane_state *state) 11899 { 11900 struct drm_device *dev = plane->dev; 11901 struct drm_i915_private *dev_priv = dev->dev_private; 11902 struct drm_crtc *crtc = state->base.crtc; 11903 struct intel_crtc *intel_crtc; 11904 struct drm_framebuffer *fb = state->base.fb; 11905 struct drm_rect *dest = &state->dst; 11906 struct drm_rect *src = &state->src; 11907 const struct drm_rect *clip = &state->clip; 11908 int ret; 11909 11910 crtc = crtc ? crtc : plane->crtc; 11911 intel_crtc = to_intel_crtc(crtc); 11912 11913 ret = drm_plane_helper_check_update(plane, crtc, fb, 11914 src, dest, clip, 11915 DRM_PLANE_HELPER_NO_SCALING, 11916 DRM_PLANE_HELPER_NO_SCALING, 11917 false, true, &state->visible); 11918 if (ret) 11919 return ret; 11920 11921 if (intel_crtc->active) { 11922 intel_crtc->atomic.wait_for_flips = true; 11923 11924 /* 11925 * FBC does not work on some platforms for rotated 11926 * planes, so disable it when rotation is not 0 and 11927 * update it when rotation is set back to 0. 11928 * 11929 * FIXME: This is redundant with the fbc update done in 11930 * the primary plane enable function except that that 11931 * one is done too late. We eventually need to unify 11932 * this. 11933 */ 11934 if (intel_crtc->primary_enabled && 11935 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11936 dev_priv->fbc.plane == intel_crtc->plane && 11937 state->base.rotation != BIT(DRM_ROTATE_0)) { 11938 intel_crtc->atomic.disable_fbc = true; 11939 } 11940 11941 if (state->visible) { 11942 /* 11943 * BDW signals flip done immediately if the plane 11944 * is disabled, even if the plane enable is already 11945 * armed to occur at the next vblank :( 11946 */ 11947 if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled) 11948 intel_crtc->atomic.wait_vblank = true; 11949 } 11950 11951 intel_crtc->atomic.fb_bits |= 11952 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 11953 11954 intel_crtc->atomic.update_fbc = true; 11955 } 11956 11957 return 0; 11958 } 11959 11960 static void 11961 intel_commit_primary_plane(struct drm_plane *plane, 11962 struct intel_plane_state *state) 11963 { 11964 struct drm_crtc *crtc = state->base.crtc; 11965 struct drm_framebuffer *fb = state->base.fb; 11966 struct drm_device *dev = plane->dev; 11967 struct drm_i915_private *dev_priv = dev->dev_private; 11968 struct intel_crtc *intel_crtc; 11969 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11970 struct intel_plane *intel_plane = to_intel_plane(plane); 11971 struct drm_rect *src = &state->src; 11972 11973 crtc = crtc ? crtc : plane->crtc; 11974 intel_crtc = to_intel_crtc(crtc); 11975 11976 plane->fb = fb; 11977 crtc->x = src->x1 >> 16; 11978 crtc->y = src->y1 >> 16; 11979 11980 intel_plane->obj = obj; 11981 11982 if (intel_crtc->active) { 11983 if (state->visible) { 11984 /* FIXME: kill this fastboot hack */ 11985 intel_update_pipe_size(intel_crtc); 11986 11987 intel_crtc->primary_enabled = true; 11988 11989 dev_priv->display.update_primary_plane(crtc, plane->fb, 11990 crtc->x, crtc->y); 11991 } else { 11992 /* 11993 * If clipping results in a non-visible primary plane, 11994 * we'll disable the primary plane. Note that this is 11995 * a bit different than what happens if userspace 11996 * explicitly disables the plane by passing fb=0 11997 * because plane->fb still gets set and pinned. 11998 */ 11999 intel_disable_primary_hw_plane(plane, crtc); 12000 } 12001 } 12002 } 12003 12004 static void intel_begin_crtc_commit(struct drm_crtc *crtc) 12005 { 12006 struct drm_device *dev = crtc->dev; 12007 struct drm_i915_private *dev_priv = dev->dev_private; 12008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12009 struct intel_plane *intel_plane; 12010 struct drm_plane *p; 12011 unsigned fb_bits = 0; 12012 12013 /* Track fb's for any planes being disabled */ 12014 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 12015 intel_plane = to_intel_plane(p); 12016 12017 if (intel_crtc->atomic.disabled_planes & 12018 (1 << drm_plane_index(p))) { 12019 switch (p->type) { 12020 case DRM_PLANE_TYPE_PRIMARY: 12021 fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe); 12022 break; 12023 case DRM_PLANE_TYPE_CURSOR: 12024 fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe); 12025 break; 12026 case DRM_PLANE_TYPE_OVERLAY: 12027 fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe); 12028 break; 12029 } 12030 12031 mutex_lock(&dev->struct_mutex); 12032 i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits); 12033 mutex_unlock(&dev->struct_mutex); 12034 } 12035 } 12036 12037 if (intel_crtc->atomic.wait_for_flips) 12038 intel_crtc_wait_for_pending_flips(crtc); 12039 12040 if (intel_crtc->atomic.disable_fbc) 12041 intel_fbc_disable(dev); 12042 12043 if (intel_crtc->atomic.pre_disable_primary) 12044 intel_pre_disable_primary(crtc); 12045 12046 if (intel_crtc->atomic.update_wm) 12047 intel_update_watermarks(crtc); 12048 12049 intel_runtime_pm_get(dev_priv); 12050 12051 /* Perform vblank evasion around commit operation */ 12052 if (intel_crtc->active) 12053 intel_crtc->atomic.evade = 12054 intel_pipe_update_start(intel_crtc, 12055 &intel_crtc->atomic.start_vbl_count); 12056 } 12057 12058 static void intel_finish_crtc_commit(struct drm_crtc *crtc) 12059 { 12060 struct drm_device *dev = crtc->dev; 12061 struct drm_i915_private *dev_priv = dev->dev_private; 12062 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12063 struct drm_plane *p; 12064 12065 if (intel_crtc->atomic.evade) 12066 intel_pipe_update_end(intel_crtc, 12067 intel_crtc->atomic.start_vbl_count); 12068 12069 intel_runtime_pm_put(dev_priv); 12070 12071 if (intel_crtc->atomic.wait_vblank) 12072 intel_wait_for_vblank(dev, intel_crtc->pipe); 12073 12074 intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits); 12075 12076 if (intel_crtc->atomic.update_fbc) { 12077 mutex_lock(&dev->struct_mutex); 12078 intel_fbc_update(dev); 12079 mutex_unlock(&dev->struct_mutex); 12080 } 12081 12082 if (intel_crtc->atomic.post_enable_primary) 12083 intel_post_enable_primary(crtc); 12084 12085 drm_for_each_legacy_plane(p, &dev->mode_config.plane_list) 12086 if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p)) 12087 intel_update_sprite_watermarks(p, crtc, 0, 0, 0, 12088 false, false); 12089 12090 memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic)); 12091 } 12092 12093 /** 12094 * intel_plane_destroy - destroy a plane 12095 * @plane: plane to destroy 12096 * 12097 * Common destruction function for all types of planes (primary, cursor, 12098 * sprite). 12099 */ 12100 void intel_plane_destroy(struct drm_plane *plane) 12101 { 12102 struct intel_plane *intel_plane = to_intel_plane(plane); 12103 drm_plane_cleanup(plane); 12104 kfree(intel_plane); 12105 } 12106 12107 const struct drm_plane_funcs intel_plane_funcs = { 12108 .update_plane = drm_plane_helper_update, 12109 .disable_plane = drm_plane_helper_disable, 12110 .destroy = intel_plane_destroy, 12111 .set_property = intel_plane_set_property, 12112 .atomic_get_property = intel_plane_atomic_get_property, 12113 .atomic_set_property = intel_plane_atomic_set_property, 12114 .atomic_duplicate_state = intel_plane_duplicate_state, 12115 .atomic_destroy_state = intel_plane_destroy_state, 12116 12117 }; 12118 12119 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 12120 int pipe) 12121 { 12122 struct intel_plane *primary; 12123 struct intel_plane_state *state; 12124 const uint32_t *intel_primary_formats; 12125 int num_formats; 12126 12127 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 12128 if (primary == NULL) 12129 return NULL; 12130 12131 state = intel_create_plane_state(&primary->base); 12132 if (!state) { 12133 kfree(primary); 12134 return NULL; 12135 } 12136 primary->base.state = &state->base; 12137 12138 primary->can_scale = false; 12139 primary->max_downscale = 1; 12140 primary->pipe = pipe; 12141 primary->plane = pipe; 12142 primary->check_plane = intel_check_primary_plane; 12143 primary->commit_plane = intel_commit_primary_plane; 12144 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 12145 primary->plane = !pipe; 12146 12147 if (INTEL_INFO(dev)->gen <= 3) { 12148 intel_primary_formats = intel_primary_formats_gen2; 12149 num_formats = ARRAY_SIZE(intel_primary_formats_gen2); 12150 } else { 12151 intel_primary_formats = intel_primary_formats_gen4; 12152 num_formats = ARRAY_SIZE(intel_primary_formats_gen4); 12153 } 12154 12155 drm_universal_plane_init(dev, &primary->base, 0, 12156 &intel_plane_funcs, 12157 intel_primary_formats, num_formats, 12158 DRM_PLANE_TYPE_PRIMARY); 12159 12160 if (INTEL_INFO(dev)->gen >= 4) { 12161 if (!dev->mode_config.rotation_property) 12162 dev->mode_config.rotation_property = 12163 drm_mode_create_rotation_property(dev, 12164 BIT(DRM_ROTATE_0) | 12165 BIT(DRM_ROTATE_180)); 12166 if (dev->mode_config.rotation_property) 12167 drm_object_attach_property(&primary->base.base, 12168 dev->mode_config.rotation_property, 12169 state->base.rotation); 12170 } 12171 12172 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 12173 12174 return &primary->base; 12175 } 12176 12177 static int 12178 intel_check_cursor_plane(struct drm_plane *plane, 12179 struct intel_plane_state *state) 12180 { 12181 struct drm_crtc *crtc = state->base.crtc; 12182 struct drm_device *dev = plane->dev; 12183 struct drm_framebuffer *fb = state->base.fb; 12184 struct drm_rect *dest = &state->dst; 12185 struct drm_rect *src = &state->src; 12186 const struct drm_rect *clip = &state->clip; 12187 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12188 struct intel_crtc *intel_crtc; 12189 unsigned stride; 12190 int ret; 12191 12192 crtc = crtc ? crtc : plane->crtc; 12193 intel_crtc = to_intel_crtc(crtc); 12194 12195 ret = drm_plane_helper_check_update(plane, crtc, fb, 12196 src, dest, clip, 12197 DRM_PLANE_HELPER_NO_SCALING, 12198 DRM_PLANE_HELPER_NO_SCALING, 12199 true, true, &state->visible); 12200 if (ret) 12201 return ret; 12202 12203 12204 /* if we want to turn off the cursor ignore width and height */ 12205 if (!obj) 12206 goto finish; 12207 12208 /* Check for which cursor types we support */ 12209 if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) { 12210 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 12211 state->base.crtc_w, state->base.crtc_h); 12212 return -EINVAL; 12213 } 12214 12215 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 12216 if (obj->base.size < stride * state->base.crtc_h) { 12217 DRM_DEBUG_KMS("buffer is too small\n"); 12218 return -ENOMEM; 12219 } 12220 12221 /* we only need to pin inside GTT if cursor is non-phy */ 12222 mutex_lock(&dev->struct_mutex); 12223 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { 12224 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 12225 ret = -EINVAL; 12226 } 12227 mutex_unlock(&dev->struct_mutex); 12228 12229 finish: 12230 if (intel_crtc->active) { 12231 if (intel_crtc->cursor_width != state->base.crtc_w) 12232 intel_crtc->atomic.update_wm = true; 12233 12234 intel_crtc->atomic.fb_bits |= 12235 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe); 12236 } 12237 12238 return ret; 12239 } 12240 12241 static void 12242 intel_commit_cursor_plane(struct drm_plane *plane, 12243 struct intel_plane_state *state) 12244 { 12245 struct drm_crtc *crtc = state->base.crtc; 12246 struct drm_device *dev = plane->dev; 12247 struct intel_crtc *intel_crtc; 12248 struct intel_plane *intel_plane = to_intel_plane(plane); 12249 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 12250 uint32_t addr; 12251 12252 crtc = crtc ? crtc : plane->crtc; 12253 intel_crtc = to_intel_crtc(crtc); 12254 12255 plane->fb = state->base.fb; 12256 crtc->cursor_x = state->base.crtc_x; 12257 crtc->cursor_y = state->base.crtc_y; 12258 12259 intel_plane->obj = obj; 12260 12261 if (intel_crtc->cursor_bo == obj) 12262 goto update; 12263 12264 if (!obj) 12265 addr = 0; 12266 else if (!INTEL_INFO(dev)->cursor_needs_physical) 12267 addr = i915_gem_obj_ggtt_offset(obj); 12268 else 12269 addr = obj->phys_handle->busaddr; 12270 12271 intel_crtc->cursor_addr = addr; 12272 intel_crtc->cursor_bo = obj; 12273 update: 12274 intel_crtc->cursor_width = state->base.crtc_w; 12275 intel_crtc->cursor_height = state->base.crtc_h; 12276 12277 if (intel_crtc->active) 12278 intel_crtc_update_cursor(crtc, state->visible); 12279 } 12280 12281 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 12282 int pipe) 12283 { 12284 struct intel_plane *cursor; 12285 struct intel_plane_state *state; 12286 12287 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 12288 if (cursor == NULL) 12289 return NULL; 12290 12291 state = intel_create_plane_state(&cursor->base); 12292 if (!state) { 12293 kfree(cursor); 12294 return NULL; 12295 } 12296 cursor->base.state = &state->base; 12297 12298 cursor->can_scale = false; 12299 cursor->max_downscale = 1; 12300 cursor->pipe = pipe; 12301 cursor->plane = pipe; 12302 cursor->check_plane = intel_check_cursor_plane; 12303 cursor->commit_plane = intel_commit_cursor_plane; 12304 12305 drm_universal_plane_init(dev, &cursor->base, 0, 12306 &intel_plane_funcs, 12307 intel_cursor_formats, 12308 ARRAY_SIZE(intel_cursor_formats), 12309 DRM_PLANE_TYPE_CURSOR); 12310 12311 if (INTEL_INFO(dev)->gen >= 4) { 12312 if (!dev->mode_config.rotation_property) 12313 dev->mode_config.rotation_property = 12314 drm_mode_create_rotation_property(dev, 12315 BIT(DRM_ROTATE_0) | 12316 BIT(DRM_ROTATE_180)); 12317 if (dev->mode_config.rotation_property) 12318 drm_object_attach_property(&cursor->base.base, 12319 dev->mode_config.rotation_property, 12320 state->base.rotation); 12321 } 12322 12323 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 12324 12325 return &cursor->base; 12326 } 12327 12328 static void intel_crtc_init(struct drm_device *dev, int pipe) 12329 { 12330 struct drm_i915_private *dev_priv = dev->dev_private; 12331 struct intel_crtc *intel_crtc; 12332 struct intel_crtc_state *crtc_state = NULL; 12333 struct drm_plane *primary = NULL; 12334 struct drm_plane *cursor = NULL; 12335 int i, ret; 12336 12337 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 12338 if (intel_crtc == NULL) 12339 return; 12340 12341 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 12342 if (!crtc_state) 12343 goto fail; 12344 intel_crtc_set_state(intel_crtc, crtc_state); 12345 12346 primary = intel_primary_plane_create(dev, pipe); 12347 if (!primary) 12348 goto fail; 12349 12350 cursor = intel_cursor_plane_create(dev, pipe); 12351 if (!cursor) 12352 goto fail; 12353 12354 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 12355 cursor, &intel_crtc_funcs); 12356 if (ret) 12357 goto fail; 12358 12359 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 12360 for (i = 0; i < 256; i++) { 12361 intel_crtc->lut_r[i] = i; 12362 intel_crtc->lut_g[i] = i; 12363 intel_crtc->lut_b[i] = i; 12364 } 12365 12366 /* 12367 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 12368 * is hooked to pipe B. Hence we want plane A feeding pipe B. 12369 */ 12370 intel_crtc->pipe = pipe; 12371 intel_crtc->plane = pipe; 12372 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 12373 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 12374 intel_crtc->plane = !pipe; 12375 } 12376 12377 intel_crtc->cursor_base = ~0; 12378 intel_crtc->cursor_cntl = ~0; 12379 intel_crtc->cursor_size = ~0; 12380 12381 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 12382 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 12383 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 12384 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 12385 12386 INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func); 12387 12388 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 12389 12390 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 12391 return; 12392 12393 fail: 12394 if (primary) 12395 drm_plane_cleanup(primary); 12396 if (cursor) 12397 drm_plane_cleanup(cursor); 12398 kfree(crtc_state); 12399 kfree(intel_crtc); 12400 } 12401 12402 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 12403 { 12404 struct drm_encoder *encoder = connector->base.encoder; 12405 struct drm_device *dev = connector->base.dev; 12406 12407 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 12408 12409 if (!encoder || WARN_ON(!encoder->crtc)) 12410 return INVALID_PIPE; 12411 12412 return to_intel_crtc(encoder->crtc)->pipe; 12413 } 12414 12415 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 12416 struct drm_file *file) 12417 { 12418 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 12419 struct drm_crtc *drmmode_crtc; 12420 struct intel_crtc *crtc; 12421 12422 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 12423 return -ENODEV; 12424 12425 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 12426 12427 if (!drmmode_crtc) { 12428 DRM_ERROR("no such CRTC id\n"); 12429 return -ENOENT; 12430 } 12431 12432 crtc = to_intel_crtc(drmmode_crtc); 12433 pipe_from_crtc_id->pipe = crtc->pipe; 12434 12435 return 0; 12436 } 12437 12438 static int intel_encoder_clones(struct intel_encoder *encoder) 12439 { 12440 struct drm_device *dev = encoder->base.dev; 12441 struct intel_encoder *source_encoder; 12442 int index_mask = 0; 12443 int entry = 0; 12444 12445 for_each_intel_encoder(dev, source_encoder) { 12446 if (encoders_cloneable(encoder, source_encoder)) 12447 index_mask |= (1 << entry); 12448 12449 entry++; 12450 } 12451 12452 return index_mask; 12453 } 12454 12455 static bool has_edp_a(struct drm_device *dev) 12456 { 12457 struct drm_i915_private *dev_priv = dev->dev_private; 12458 12459 if (!IS_MOBILE(dev)) 12460 return false; 12461 12462 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 12463 return false; 12464 12465 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 12466 return false; 12467 12468 return true; 12469 } 12470 12471 static bool intel_crt_present(struct drm_device *dev) 12472 { 12473 struct drm_i915_private *dev_priv = dev->dev_private; 12474 12475 if (INTEL_INFO(dev)->gen >= 9) 12476 return false; 12477 12478 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 12479 return false; 12480 12481 if (IS_CHERRYVIEW(dev)) 12482 return false; 12483 12484 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 12485 return false; 12486 12487 return true; 12488 } 12489 12490 static void intel_setup_outputs(struct drm_device *dev) 12491 { 12492 struct drm_i915_private *dev_priv = dev->dev_private; 12493 struct intel_encoder *encoder; 12494 struct drm_connector *connector; 12495 bool dpd_is_edp = false; 12496 12497 intel_lvds_init(dev); 12498 12499 if (intel_crt_present(dev)) 12500 intel_crt_init(dev); 12501 12502 if (HAS_DDI(dev)) { 12503 int found; 12504 12505 /* Haswell uses DDI functions to detect digital outputs */ 12506 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 12507 /* DDI A only supports eDP */ 12508 if (found) 12509 intel_ddi_init(dev, PORT_A); 12510 12511 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 12512 * register */ 12513 found = I915_READ(SFUSE_STRAP); 12514 12515 if (found & SFUSE_STRAP_DDIB_DETECTED) 12516 intel_ddi_init(dev, PORT_B); 12517 if (found & SFUSE_STRAP_DDIC_DETECTED) 12518 intel_ddi_init(dev, PORT_C); 12519 if (found & SFUSE_STRAP_DDID_DETECTED) 12520 intel_ddi_init(dev, PORT_D); 12521 } else if (HAS_PCH_SPLIT(dev)) { 12522 int found; 12523 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 12524 12525 if (has_edp_a(dev)) 12526 intel_dp_init(dev, DP_A, PORT_A); 12527 12528 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 12529 /* PCH SDVOB multiplex with HDMIB */ 12530 found = intel_sdvo_init(dev, PCH_SDVOB, true); 12531 if (!found) 12532 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 12533 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 12534 intel_dp_init(dev, PCH_DP_B, PORT_B); 12535 } 12536 12537 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 12538 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 12539 12540 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 12541 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 12542 12543 if (I915_READ(PCH_DP_C) & DP_DETECTED) 12544 intel_dp_init(dev, PCH_DP_C, PORT_C); 12545 12546 if (I915_READ(PCH_DP_D) & DP_DETECTED) 12547 intel_dp_init(dev, PCH_DP_D, PORT_D); 12548 } else if (IS_VALLEYVIEW(dev)) { 12549 /* 12550 * The DP_DETECTED bit is the latched state of the DDC 12551 * SDA pin at boot. However since eDP doesn't require DDC 12552 * (no way to plug in a DP->HDMI dongle) the DDC pins for 12553 * eDP ports may have been muxed to an alternate function. 12554 * Thus we can't rely on the DP_DETECTED bit alone to detect 12555 * eDP ports. Consult the VBT as well as DP_DETECTED to 12556 * detect eDP ports. 12557 */ 12558 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 12559 !intel_dp_is_edp(dev, PORT_B)) 12560 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 12561 PORT_B); 12562 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 12563 intel_dp_is_edp(dev, PORT_B)) 12564 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 12565 12566 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 12567 !intel_dp_is_edp(dev, PORT_C)) 12568 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 12569 PORT_C); 12570 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 12571 intel_dp_is_edp(dev, PORT_C)) 12572 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 12573 12574 if (IS_CHERRYVIEW(dev)) { 12575 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 12576 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 12577 PORT_D); 12578 /* eDP not supported on port D, so don't check VBT */ 12579 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 12580 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 12581 } 12582 12583 intel_dsi_init(dev); 12584 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 12585 bool found = false; 12586 12587 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 12588 DRM_DEBUG_KMS("probing SDVOB\n"); 12589 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 12590 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 12591 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 12592 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 12593 } 12594 12595 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 12596 intel_dp_init(dev, DP_B, PORT_B); 12597 } 12598 12599 /* Before G4X SDVOC doesn't have its own detect register */ 12600 12601 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 12602 DRM_DEBUG_KMS("probing SDVOC\n"); 12603 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 12604 } 12605 12606 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 12607 12608 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 12609 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 12610 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 12611 } 12612 if (SUPPORTS_INTEGRATED_DP(dev)) 12613 intel_dp_init(dev, DP_C, PORT_C); 12614 } 12615 12616 if (SUPPORTS_INTEGRATED_DP(dev) && 12617 (I915_READ(DP_D) & DP_DETECTED)) 12618 intel_dp_init(dev, DP_D, PORT_D); 12619 } else if (IS_GEN2(dev)) 12620 intel_dvo_init(dev); 12621 12622 if (SUPPORTS_TV(dev)) 12623 intel_tv_init(dev); 12624 12625 /* 12626 * FIXME: We don't have full atomic support yet, but we want to be 12627 * able to enable/test plane updates via the atomic interface in the 12628 * meantime. However as soon as we flip DRIVER_ATOMIC on, the DRM core 12629 * will take some atomic codepaths to lookup properties during 12630 * drmModeGetConnector() that unconditionally dereference 12631 * connector->state. 12632 * 12633 * We create a dummy connector state here for each connector to ensure 12634 * the DRM core doesn't try to dereference a NULL connector->state. 12635 * The actual connector properties will never be updated or contain 12636 * useful information, but since we're doing this specifically for 12637 * testing/debug of the plane operations (and only when a specific 12638 * kernel module option is given), that shouldn't really matter. 12639 * 12640 * Once atomic support for crtc's + connectors lands, this loop should 12641 * be removed since we'll be setting up real connector state, which 12642 * will contain Intel-specific properties. 12643 */ 12644 if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { 12645 list_for_each_entry(connector, 12646 &dev->mode_config.connector_list, 12647 head) { 12648 if (!WARN_ON(connector->state)) { 12649 connector->state = 12650 kzalloc(sizeof(*connector->state), 12651 GFP_KERNEL); 12652 } 12653 } 12654 } 12655 12656 intel_psr_init(dev); 12657 12658 for_each_intel_encoder(dev, encoder) { 12659 encoder->base.possible_crtcs = encoder->crtc_mask; 12660 encoder->base.possible_clones = 12661 intel_encoder_clones(encoder); 12662 } 12663 12664 intel_init_pch_refclk(dev); 12665 12666 drm_helper_move_panel_connectors_to_head(dev); 12667 } 12668 12669 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 12670 { 12671 struct drm_device *dev = fb->dev; 12672 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12673 12674 drm_framebuffer_cleanup(fb); 12675 mutex_lock(&dev->struct_mutex); 12676 WARN_ON(!intel_fb->obj->framebuffer_references--); 12677 drm_gem_object_unreference(&intel_fb->obj->base); 12678 mutex_unlock(&dev->struct_mutex); 12679 kfree(intel_fb); 12680 } 12681 12682 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 12683 struct drm_file *file, 12684 unsigned int *handle) 12685 { 12686 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12687 struct drm_i915_gem_object *obj = intel_fb->obj; 12688 12689 return drm_gem_handle_create(file, &obj->base, handle); 12690 } 12691 12692 static const struct drm_framebuffer_funcs intel_fb_funcs = { 12693 .destroy = intel_user_framebuffer_destroy, 12694 .create_handle = intel_user_framebuffer_create_handle, 12695 }; 12696 12697 static int intel_framebuffer_init(struct drm_device *dev, 12698 struct intel_framebuffer *intel_fb, 12699 struct drm_mode_fb_cmd2 *mode_cmd, 12700 struct drm_i915_gem_object *obj) 12701 { 12702 int aligned_height; 12703 int pitch_limit; 12704 int ret; 12705 12706 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 12707 12708 if (obj->tiling_mode == I915_TILING_Y) { 12709 DRM_DEBUG("hardware does not support tiling Y\n"); 12710 return -EINVAL; 12711 } 12712 12713 if (mode_cmd->pitches[0] & 63) { 12714 DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n", 12715 mode_cmd->pitches[0]); 12716 return -EINVAL; 12717 } 12718 12719 if (INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev)) { 12720 pitch_limit = 32*1024; 12721 } else if (INTEL_INFO(dev)->gen >= 4) { 12722 if (obj->tiling_mode) 12723 pitch_limit = 16*1024; 12724 else 12725 pitch_limit = 32*1024; 12726 } else if (INTEL_INFO(dev)->gen >= 3) { 12727 if (obj->tiling_mode) 12728 pitch_limit = 8*1024; 12729 else 12730 pitch_limit = 16*1024; 12731 } else 12732 /* XXX DSPC is limited to 4k tiled */ 12733 pitch_limit = 8*1024; 12734 12735 if (mode_cmd->pitches[0] > pitch_limit) { 12736 DRM_DEBUG("%s pitch (%d) must be at less than %d\n", 12737 obj->tiling_mode ? "tiled" : "linear", 12738 mode_cmd->pitches[0], pitch_limit); 12739 return -EINVAL; 12740 } 12741 12742 if (obj->tiling_mode != I915_TILING_NONE && 12743 mode_cmd->pitches[0] != obj->stride) { 12744 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 12745 mode_cmd->pitches[0], obj->stride); 12746 return -EINVAL; 12747 } 12748 12749 /* Reject formats not supported by any plane early. */ 12750 switch (mode_cmd->pixel_format) { 12751 case DRM_FORMAT_C8: 12752 case DRM_FORMAT_RGB565: 12753 case DRM_FORMAT_XRGB8888: 12754 case DRM_FORMAT_ARGB8888: 12755 break; 12756 case DRM_FORMAT_XRGB1555: 12757 case DRM_FORMAT_ARGB1555: 12758 if (INTEL_INFO(dev)->gen > 3) { 12759 DRM_DEBUG("unsupported pixel format: %s\n", 12760 drm_get_format_name(mode_cmd->pixel_format)); 12761 return -EINVAL; 12762 } 12763 break; 12764 case DRM_FORMAT_XBGR8888: 12765 case DRM_FORMAT_ABGR8888: 12766 case DRM_FORMAT_XRGB2101010: 12767 case DRM_FORMAT_ARGB2101010: 12768 case DRM_FORMAT_XBGR2101010: 12769 case DRM_FORMAT_ABGR2101010: 12770 if (INTEL_INFO(dev)->gen < 4) { 12771 DRM_DEBUG("unsupported pixel format: %s\n", 12772 drm_get_format_name(mode_cmd->pixel_format)); 12773 return -EINVAL; 12774 } 12775 break; 12776 case DRM_FORMAT_YUYV: 12777 case DRM_FORMAT_UYVY: 12778 case DRM_FORMAT_YVYU: 12779 case DRM_FORMAT_VYUY: 12780 if (INTEL_INFO(dev)->gen < 5) { 12781 DRM_DEBUG("unsupported pixel format: %s\n", 12782 drm_get_format_name(mode_cmd->pixel_format)); 12783 return -EINVAL; 12784 } 12785 break; 12786 default: 12787 DRM_DEBUG("unsupported pixel format: %s\n", 12788 drm_get_format_name(mode_cmd->pixel_format)); 12789 return -EINVAL; 12790 } 12791 12792 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 12793 if (mode_cmd->offsets[0] != 0) 12794 return -EINVAL; 12795 12796 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 12797 obj->tiling_mode); 12798 /* FIXME drm helper for size checks (especially planar formats)? */ 12799 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 12800 return -EINVAL; 12801 12802 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 12803 intel_fb->obj = obj; 12804 intel_fb->obj->framebuffer_references++; 12805 12806 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 12807 if (ret) { 12808 DRM_ERROR("framebuffer init failed %d\n", ret); 12809 return ret; 12810 } 12811 12812 return 0; 12813 } 12814 12815 static struct drm_framebuffer * 12816 intel_user_framebuffer_create(struct drm_device *dev, 12817 struct drm_file *filp, 12818 struct drm_mode_fb_cmd2 *mode_cmd) 12819 { 12820 struct drm_i915_gem_object *obj; 12821 12822 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 12823 mode_cmd->handles[0])); 12824 if (&obj->base == NULL) 12825 return ERR_PTR(-ENOENT); 12826 12827 return intel_framebuffer_create(dev, mode_cmd, obj); 12828 } 12829 12830 #ifndef CONFIG_DRM_I915_FBDEV 12831 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 12832 { 12833 } 12834 #endif 12835 12836 static const struct drm_mode_config_funcs intel_mode_funcs = { 12837 .fb_create = intel_user_framebuffer_create, 12838 .output_poll_changed = intel_fbdev_output_poll_changed, 12839 .atomic_check = intel_atomic_check, 12840 .atomic_commit = intel_atomic_commit, 12841 }; 12842 12843 /* Set up chip specific display functions */ 12844 static void intel_init_display(struct drm_device *dev) 12845 { 12846 struct drm_i915_private *dev_priv = dev->dev_private; 12847 12848 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 12849 dev_priv->display.find_dpll = g4x_find_best_dpll; 12850 else if (IS_CHERRYVIEW(dev)) 12851 dev_priv->display.find_dpll = chv_find_best_dpll; 12852 else if (IS_VALLEYVIEW(dev)) 12853 dev_priv->display.find_dpll = vlv_find_best_dpll; 12854 else if (IS_PINEVIEW(dev)) 12855 dev_priv->display.find_dpll = pnv_find_best_dpll; 12856 else 12857 dev_priv->display.find_dpll = i9xx_find_best_dpll; 12858 12859 if (INTEL_INFO(dev)->gen >= 9) { 12860 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 12861 dev_priv->display.get_initial_plane_config = 12862 skylake_get_initial_plane_config; 12863 dev_priv->display.crtc_compute_clock = 12864 haswell_crtc_compute_clock; 12865 dev_priv->display.crtc_enable = haswell_crtc_enable; 12866 dev_priv->display.crtc_disable = haswell_crtc_disable; 12867 dev_priv->display.off = ironlake_crtc_off; 12868 dev_priv->display.update_primary_plane = 12869 skylake_update_primary_plane; 12870 } else if (HAS_DDI(dev)) { 12871 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 12872 dev_priv->display.get_initial_plane_config = 12873 ironlake_get_initial_plane_config; 12874 dev_priv->display.crtc_compute_clock = 12875 haswell_crtc_compute_clock; 12876 dev_priv->display.crtc_enable = haswell_crtc_enable; 12877 dev_priv->display.crtc_disable = haswell_crtc_disable; 12878 dev_priv->display.off = ironlake_crtc_off; 12879 dev_priv->display.update_primary_plane = 12880 ironlake_update_primary_plane; 12881 } else if (HAS_PCH_SPLIT(dev)) { 12882 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12883 dev_priv->display.get_initial_plane_config = 12884 ironlake_get_initial_plane_config; 12885 dev_priv->display.crtc_compute_clock = 12886 ironlake_crtc_compute_clock; 12887 dev_priv->display.crtc_enable = ironlake_crtc_enable; 12888 dev_priv->display.crtc_disable = ironlake_crtc_disable; 12889 dev_priv->display.off = ironlake_crtc_off; 12890 dev_priv->display.update_primary_plane = 12891 ironlake_update_primary_plane; 12892 } else if (IS_VALLEYVIEW(dev)) { 12893 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12894 dev_priv->display.get_initial_plane_config = 12895 i9xx_get_initial_plane_config; 12896 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 12897 dev_priv->display.crtc_enable = valleyview_crtc_enable; 12898 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12899 dev_priv->display.off = i9xx_crtc_off; 12900 dev_priv->display.update_primary_plane = 12901 i9xx_update_primary_plane; 12902 } else { 12903 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12904 dev_priv->display.get_initial_plane_config = 12905 i9xx_get_initial_plane_config; 12906 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 12907 dev_priv->display.crtc_enable = i9xx_crtc_enable; 12908 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12909 dev_priv->display.off = i9xx_crtc_off; 12910 dev_priv->display.update_primary_plane = 12911 i9xx_update_primary_plane; 12912 } 12913 12914 /* Returns the core display clock speed */ 12915 if (IS_VALLEYVIEW(dev)) 12916 dev_priv->display.get_display_clock_speed = 12917 valleyview_get_display_clock_speed; 12918 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 12919 dev_priv->display.get_display_clock_speed = 12920 i945_get_display_clock_speed; 12921 else if (IS_I915G(dev)) 12922 dev_priv->display.get_display_clock_speed = 12923 i915_get_display_clock_speed; 12924 else if (IS_I945GM(dev) || IS_845G(dev)) 12925 dev_priv->display.get_display_clock_speed = 12926 i9xx_misc_get_display_clock_speed; 12927 else if (IS_PINEVIEW(dev)) 12928 dev_priv->display.get_display_clock_speed = 12929 pnv_get_display_clock_speed; 12930 else if (IS_I915GM(dev)) 12931 dev_priv->display.get_display_clock_speed = 12932 i915gm_get_display_clock_speed; 12933 else if (IS_I865G(dev)) 12934 dev_priv->display.get_display_clock_speed = 12935 i865_get_display_clock_speed; 12936 else if (IS_I85X(dev)) 12937 dev_priv->display.get_display_clock_speed = 12938 i855_get_display_clock_speed; 12939 else /* 852, 830 */ 12940 dev_priv->display.get_display_clock_speed = 12941 i830_get_display_clock_speed; 12942 12943 if (IS_GEN5(dev)) { 12944 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 12945 } else if (IS_GEN6(dev)) { 12946 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 12947 } else if (IS_IVYBRIDGE(dev)) { 12948 /* FIXME: detect B0+ stepping and use auto training */ 12949 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 12950 dev_priv->display.modeset_global_resources = 12951 ivb_modeset_global_resources; 12952 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 12953 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 12954 } else if (IS_VALLEYVIEW(dev)) { 12955 dev_priv->display.modeset_global_resources = 12956 valleyview_modeset_global_resources; 12957 } 12958 12959 /* Default just returns -ENODEV to indicate unsupported */ 12960 dev_priv->display.queue_flip = intel_default_queue_flip; 12961 12962 switch (INTEL_INFO(dev)->gen) { 12963 case 2: 12964 dev_priv->display.queue_flip = intel_gen2_queue_flip; 12965 break; 12966 12967 case 3: 12968 dev_priv->display.queue_flip = intel_gen3_queue_flip; 12969 break; 12970 12971 case 4: 12972 case 5: 12973 dev_priv->display.queue_flip = intel_gen4_queue_flip; 12974 break; 12975 12976 case 6: 12977 dev_priv->display.queue_flip = intel_gen6_queue_flip; 12978 break; 12979 case 7: 12980 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 12981 dev_priv->display.queue_flip = intel_gen7_queue_flip; 12982 break; 12983 case 9: 12984 dev_priv->display.queue_flip = intel_gen9_queue_flip; 12985 break; 12986 } 12987 12988 intel_panel_init_backlight_funcs(dev); 12989 12990 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 12991 } 12992 12993 /* 12994 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 12995 * resume, or other times. This quirk makes sure that's the case for 12996 * affected systems. 12997 */ 12998 static void quirk_pipea_force(struct drm_device *dev) 12999 { 13000 struct drm_i915_private *dev_priv = dev->dev_private; 13001 13002 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 13003 DRM_INFO("applying pipe a force quirk\n"); 13004 } 13005 13006 static void quirk_pipeb_force(struct drm_device *dev) 13007 { 13008 struct drm_i915_private *dev_priv = dev->dev_private; 13009 13010 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 13011 DRM_INFO("applying pipe b force quirk\n"); 13012 } 13013 13014 /* 13015 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 13016 */ 13017 static void quirk_ssc_force_disable(struct drm_device *dev) 13018 { 13019 struct drm_i915_private *dev_priv = dev->dev_private; 13020 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 13021 DRM_INFO("applying lvds SSC disable quirk\n"); 13022 } 13023 13024 /* 13025 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 13026 * brightness value 13027 */ 13028 static void quirk_invert_brightness(struct drm_device *dev) 13029 { 13030 struct drm_i915_private *dev_priv = dev->dev_private; 13031 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 13032 DRM_INFO("applying inverted panel brightness quirk\n"); 13033 } 13034 13035 /* Some VBT's incorrectly indicate no backlight is present */ 13036 static void quirk_backlight_present(struct drm_device *dev) 13037 { 13038 struct drm_i915_private *dev_priv = dev->dev_private; 13039 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 13040 DRM_INFO("applying backlight present quirk\n"); 13041 } 13042 13043 struct intel_quirk { 13044 int device; 13045 int subsystem_vendor; 13046 int subsystem_device; 13047 void (*hook)(struct drm_device *dev); 13048 }; 13049 13050 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 13051 struct intel_dmi_quirk { 13052 void (*hook)(struct drm_device *dev); 13053 const struct dmi_system_id (*dmi_id_list)[]; 13054 }; 13055 13056 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 13057 { 13058 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 13059 return 1; 13060 } 13061 13062 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 13063 { 13064 .dmi_id_list = &(const struct dmi_system_id[]) { 13065 { 13066 .callback = intel_dmi_reverse_brightness, 13067 .ident = "NCR Corporation", 13068 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 13069 DMI_MATCH(DMI_PRODUCT_NAME, ""), 13070 }, 13071 }, 13072 { } /* terminating entry */ 13073 }, 13074 .hook = quirk_invert_brightness, 13075 }, 13076 }; 13077 13078 static struct intel_quirk intel_quirks[] = { 13079 /* HP Mini needs pipe A force quirk (LP: #322104) */ 13080 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 13081 13082 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 13083 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 13084 13085 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 13086 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 13087 13088 /* 830 needs to leave pipe A & dpll A up */ 13089 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 13090 13091 /* 830 needs to leave pipe B & dpll B up */ 13092 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 13093 13094 /* Lenovo U160 cannot use SSC on LVDS */ 13095 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 13096 13097 /* Sony Vaio Y cannot use SSC on LVDS */ 13098 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 13099 13100 /* Acer Aspire 5734Z must invert backlight brightness */ 13101 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 13102 13103 /* Acer/eMachines G725 */ 13104 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 13105 13106 /* Acer/eMachines e725 */ 13107 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 13108 13109 /* Acer/Packard Bell NCL20 */ 13110 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 13111 13112 /* Acer Aspire 4736Z */ 13113 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 13114 13115 /* Acer Aspire 5336 */ 13116 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 13117 13118 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 13119 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 13120 13121 /* Acer C720 Chromebook (Core i3 4005U) */ 13122 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 13123 13124 /* Apple Macbook 2,1 (Core 2 T7400) */ 13125 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 13126 13127 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 13128 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 13129 13130 /* HP Chromebook 14 (Celeron 2955U) */ 13131 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 13132 13133 /* Dell Chromebook 11 */ 13134 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 13135 }; 13136 13137 static void intel_init_quirks(struct drm_device *dev) 13138 { 13139 struct device *d = dev->dev; 13140 int i; 13141 13142 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 13143 struct intel_quirk *q = &intel_quirks[i]; 13144 13145 if (pci_get_device(d) == q->device && 13146 (pci_get_subvendor(d) == q->subsystem_vendor || 13147 q->subsystem_vendor == PCI_ANY_ID) && 13148 (pci_get_subdevice(d) == q->subsystem_device || 13149 q->subsystem_device == PCI_ANY_ID)) 13150 q->hook(dev); 13151 } 13152 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 13153 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 13154 intel_dmi_quirks[i].hook(dev); 13155 } 13156 } 13157 13158 /* Disable the VGA plane that we never use */ 13159 static void i915_disable_vga(struct drm_device *dev) 13160 { 13161 struct drm_i915_private *dev_priv = dev->dev_private; 13162 u8 sr1; 13163 u32 vga_reg = i915_vgacntrl_reg(dev); 13164 13165 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 13166 #if 0 13167 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 13168 #endif 13169 outb(VGA_SR_INDEX, SR01); 13170 sr1 = inb(VGA_SR_DATA); 13171 outb(VGA_SR_DATA, sr1 | 1 << 5); 13172 #if 0 13173 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 13174 #endif 13175 udelay(300); 13176 13177 /* 13178 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 13179 * from S3 without preserving (some of?) the other bits. 13180 */ 13181 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 13182 POSTING_READ(vga_reg); 13183 } 13184 13185 void intel_modeset_init_hw(struct drm_device *dev) 13186 { 13187 intel_prepare_ddi(dev); 13188 13189 if (IS_VALLEYVIEW(dev)) 13190 vlv_update_cdclk(dev); 13191 13192 intel_init_clock_gating(dev); 13193 13194 intel_enable_gt_powersave(dev); 13195 } 13196 13197 void intel_modeset_init(struct drm_device *dev) 13198 { 13199 struct drm_i915_private *dev_priv = dev->dev_private; 13200 int sprite, ret; 13201 enum i915_pipe pipe; 13202 struct intel_crtc *crtc; 13203 13204 drm_mode_config_init(dev); 13205 13206 dev->mode_config.min_width = 0; 13207 dev->mode_config.min_height = 0; 13208 13209 dev->mode_config.preferred_depth = 24; 13210 dev->mode_config.prefer_shadow = 1; 13211 13212 dev->mode_config.funcs = &intel_mode_funcs; 13213 13214 intel_init_quirks(dev); 13215 13216 intel_init_pm(dev); 13217 13218 if (INTEL_INFO(dev)->num_pipes == 0) 13219 return; 13220 13221 intel_init_display(dev); 13222 intel_init_audio(dev); 13223 13224 if (IS_GEN2(dev)) { 13225 dev->mode_config.max_width = 2048; 13226 dev->mode_config.max_height = 2048; 13227 } else if (IS_GEN3(dev)) { 13228 dev->mode_config.max_width = 4096; 13229 dev->mode_config.max_height = 4096; 13230 } else { 13231 dev->mode_config.max_width = 8192; 13232 dev->mode_config.max_height = 8192; 13233 } 13234 13235 if (IS_845G(dev) || IS_I865G(dev)) { 13236 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 13237 dev->mode_config.cursor_height = 1023; 13238 } else if (IS_GEN2(dev)) { 13239 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 13240 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 13241 } else { 13242 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 13243 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 13244 } 13245 13246 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 13247 13248 DRM_DEBUG_KMS("%d display pipe%s available.\n", 13249 INTEL_INFO(dev)->num_pipes, 13250 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 13251 13252 for_each_pipe(dev_priv, pipe) { 13253 intel_crtc_init(dev, pipe); 13254 for_each_sprite(pipe, sprite) { 13255 ret = intel_plane_init(dev, pipe, sprite); 13256 if (ret) 13257 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 13258 pipe_name(pipe), sprite_name(pipe, sprite), ret); 13259 } 13260 } 13261 13262 intel_init_dpio(dev); 13263 13264 intel_shared_dpll_init(dev); 13265 13266 /* save the BIOS value before clobbering it */ 13267 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 13268 /* Just disable it once at startup */ 13269 i915_disable_vga(dev); 13270 intel_setup_outputs(dev); 13271 13272 /* Just in case the BIOS is doing something questionable. */ 13273 intel_fbc_disable(dev); 13274 13275 drm_modeset_lock_all(dev); 13276 intel_modeset_setup_hw_state(dev, false); 13277 drm_modeset_unlock_all(dev); 13278 13279 for_each_intel_crtc(dev, crtc) { 13280 if (!crtc->active) 13281 continue; 13282 13283 /* 13284 * Note that reserving the BIOS fb up front prevents us 13285 * from stuffing other stolen allocations like the ring 13286 * on top. This prevents some ugliness at boot time, and 13287 * can even allow for smooth boot transitions if the BIOS 13288 * fb is large enough for the active pipe configuration. 13289 */ 13290 if (dev_priv->display.get_initial_plane_config) { 13291 dev_priv->display.get_initial_plane_config(crtc, 13292 &crtc->plane_config); 13293 /* 13294 * If the fb is shared between multiple heads, we'll 13295 * just get the first one. 13296 */ 13297 intel_find_plane_obj(crtc, &crtc->plane_config); 13298 } 13299 } 13300 } 13301 13302 static void intel_enable_pipe_a(struct drm_device *dev) 13303 { 13304 struct intel_connector *connector; 13305 struct drm_connector *crt = NULL; 13306 struct intel_load_detect_pipe load_detect_temp; 13307 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 13308 13309 /* We can't just switch on the pipe A, we need to set things up with a 13310 * proper mode and output configuration. As a gross hack, enable pipe A 13311 * by enabling the load detect pipe once. */ 13312 list_for_each_entry(connector, 13313 &dev->mode_config.connector_list, 13314 base.head) { 13315 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 13316 crt = &connector->base; 13317 break; 13318 } 13319 } 13320 13321 if (!crt) 13322 return; 13323 13324 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 13325 intel_release_load_detect_pipe(crt, &load_detect_temp); 13326 } 13327 13328 static bool 13329 intel_check_plane_mapping(struct intel_crtc *crtc) 13330 { 13331 struct drm_device *dev = crtc->base.dev; 13332 struct drm_i915_private *dev_priv = dev->dev_private; 13333 u32 reg, val; 13334 13335 if (INTEL_INFO(dev)->num_pipes == 1) 13336 return true; 13337 13338 reg = DSPCNTR(!crtc->plane); 13339 val = I915_READ(reg); 13340 13341 if ((val & DISPLAY_PLANE_ENABLE) && 13342 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 13343 return false; 13344 13345 return true; 13346 } 13347 13348 static void intel_sanitize_crtc(struct intel_crtc *crtc) 13349 { 13350 struct drm_device *dev = crtc->base.dev; 13351 struct drm_i915_private *dev_priv = dev->dev_private; 13352 u32 reg; 13353 13354 /* Clear any frame start delays used for debugging left by the BIOS */ 13355 reg = PIPECONF(crtc->config->cpu_transcoder); 13356 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13357 13358 /* restore vblank interrupts to correct state */ 13359 if (crtc->active) { 13360 update_scanline_offset(crtc); 13361 drm_vblank_on(dev, crtc->pipe); 13362 } else 13363 drm_vblank_off(dev, crtc->pipe); 13364 13365 /* We need to sanitize the plane -> pipe mapping first because this will 13366 * disable the crtc (and hence change the state) if it is wrong. Note 13367 * that gen4+ has a fixed plane -> pipe mapping. */ 13368 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 13369 struct intel_connector *connector; 13370 bool plane; 13371 13372 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 13373 crtc->base.base.id); 13374 13375 /* Pipe has the wrong plane attached and the plane is active. 13376 * Temporarily change the plane mapping and disable everything 13377 * ... */ 13378 plane = crtc->plane; 13379 crtc->plane = !plane; 13380 crtc->primary_enabled = true; 13381 dev_priv->display.crtc_disable(&crtc->base); 13382 crtc->plane = plane; 13383 13384 /* ... and break all links. */ 13385 list_for_each_entry(connector, &dev->mode_config.connector_list, 13386 base.head) { 13387 if (connector->encoder->base.crtc != &crtc->base) 13388 continue; 13389 13390 connector->base.dpms = DRM_MODE_DPMS_OFF; 13391 connector->base.encoder = NULL; 13392 } 13393 /* multiple connectors may have the same encoder: 13394 * handle them and break crtc link separately */ 13395 list_for_each_entry(connector, &dev->mode_config.connector_list, 13396 base.head) 13397 if (connector->encoder->base.crtc == &crtc->base) { 13398 connector->encoder->base.crtc = NULL; 13399 connector->encoder->connectors_active = false; 13400 } 13401 13402 WARN_ON(crtc->active); 13403 crtc->base.enabled = false; 13404 } 13405 13406 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 13407 crtc->pipe == PIPE_A && !crtc->active) { 13408 /* BIOS forgot to enable pipe A, this mostly happens after 13409 * resume. Force-enable the pipe to fix this, the update_dpms 13410 * call below we restore the pipe to the right state, but leave 13411 * the required bits on. */ 13412 intel_enable_pipe_a(dev); 13413 } 13414 13415 /* Adjust the state of the output pipe according to whether we 13416 * have active connectors/encoders. */ 13417 intel_crtc_update_dpms(&crtc->base); 13418 13419 if (crtc->active != crtc->base.enabled) { 13420 struct intel_encoder *encoder; 13421 13422 /* This can happen either due to bugs in the get_hw_state 13423 * functions or because the pipe is force-enabled due to the 13424 * pipe A quirk. */ 13425 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 13426 crtc->base.base.id, 13427 crtc->base.enabled ? "enabled" : "disabled", 13428 crtc->active ? "enabled" : "disabled"); 13429 13430 crtc->base.enabled = crtc->active; 13431 13432 /* Because we only establish the connector -> encoder -> 13433 * crtc links if something is active, this means the 13434 * crtc is now deactivated. Break the links. connector 13435 * -> encoder links are only establish when things are 13436 * actually up, hence no need to break them. */ 13437 WARN_ON(crtc->active); 13438 13439 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13440 WARN_ON(encoder->connectors_active); 13441 encoder->base.crtc = NULL; 13442 } 13443 } 13444 13445 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 13446 /* 13447 * We start out with underrun reporting disabled to avoid races. 13448 * For correct bookkeeping mark this on active crtcs. 13449 * 13450 * Also on gmch platforms we dont have any hardware bits to 13451 * disable the underrun reporting. Which means we need to start 13452 * out with underrun reporting disabled also on inactive pipes, 13453 * since otherwise we'll complain about the garbage we read when 13454 * e.g. coming up after runtime pm. 13455 * 13456 * No protection against concurrent access is required - at 13457 * worst a fifo underrun happens which also sets this to false. 13458 */ 13459 crtc->cpu_fifo_underrun_disabled = true; 13460 crtc->pch_fifo_underrun_disabled = true; 13461 } 13462 } 13463 13464 static void intel_sanitize_encoder(struct intel_encoder *encoder) 13465 { 13466 struct intel_connector *connector; 13467 struct drm_device *dev = encoder->base.dev; 13468 13469 /* We need to check both for a crtc link (meaning that the 13470 * encoder is active and trying to read from a pipe) and the 13471 * pipe itself being active. */ 13472 bool has_active_crtc = encoder->base.crtc && 13473 to_intel_crtc(encoder->base.crtc)->active; 13474 13475 if (encoder->connectors_active && !has_active_crtc) { 13476 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 13477 encoder->base.base.id, 13478 encoder->base.name); 13479 13480 /* Connector is active, but has no active pipe. This is 13481 * fallout from our resume register restoring. Disable 13482 * the encoder manually again. */ 13483 if (encoder->base.crtc) { 13484 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 13485 encoder->base.base.id, 13486 encoder->base.name); 13487 encoder->disable(encoder); 13488 if (encoder->post_disable) 13489 encoder->post_disable(encoder); 13490 } 13491 encoder->base.crtc = NULL; 13492 encoder->connectors_active = false; 13493 13494 /* Inconsistent output/port/pipe state happens presumably due to 13495 * a bug in one of the get_hw_state functions. Or someplace else 13496 * in our code, like the register restore mess on resume. Clamp 13497 * things to off as a safer default. */ 13498 list_for_each_entry(connector, 13499 &dev->mode_config.connector_list, 13500 base.head) { 13501 if (connector->encoder != encoder) 13502 continue; 13503 connector->base.dpms = DRM_MODE_DPMS_OFF; 13504 connector->base.encoder = NULL; 13505 } 13506 } 13507 /* Enabled encoders without active connectors will be fixed in 13508 * the crtc fixup. */ 13509 } 13510 13511 void i915_redisable_vga_power_on(struct drm_device *dev) 13512 { 13513 struct drm_i915_private *dev_priv = dev->dev_private; 13514 u32 vga_reg = i915_vgacntrl_reg(dev); 13515 13516 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 13517 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 13518 i915_disable_vga(dev); 13519 } 13520 } 13521 13522 void i915_redisable_vga(struct drm_device *dev) 13523 { 13524 struct drm_i915_private *dev_priv = dev->dev_private; 13525 13526 /* This function can be called both from intel_modeset_setup_hw_state or 13527 * at a very early point in our resume sequence, where the power well 13528 * structures are not yet restored. Since this function is at a very 13529 * paranoid "someone might have enabled VGA while we were not looking" 13530 * level, just check if the power well is enabled instead of trying to 13531 * follow the "don't touch the power well if we don't need it" policy 13532 * the rest of the driver uses. */ 13533 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 13534 return; 13535 13536 i915_redisable_vga_power_on(dev); 13537 } 13538 13539 static bool primary_get_hw_state(struct intel_crtc *crtc) 13540 { 13541 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 13542 13543 if (!crtc->active) 13544 return false; 13545 13546 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; 13547 } 13548 13549 static void intel_modeset_readout_hw_state(struct drm_device *dev) 13550 { 13551 struct drm_i915_private *dev_priv = dev->dev_private; 13552 enum i915_pipe pipe; 13553 struct intel_crtc *crtc; 13554 struct intel_encoder *encoder; 13555 struct intel_connector *connector; 13556 int i; 13557 13558 for_each_intel_crtc(dev, crtc) { 13559 memset(crtc->config, 0, sizeof(*crtc->config)); 13560 13561 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 13562 13563 crtc->active = dev_priv->display.get_pipe_config(crtc, 13564 crtc->config); 13565 13566 crtc->base.enabled = crtc->active; 13567 crtc->primary_enabled = primary_get_hw_state(crtc); 13568 13569 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 13570 crtc->base.base.id, 13571 crtc->active ? "enabled" : "disabled"); 13572 } 13573 13574 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13575 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13576 13577 pll->on = pll->get_hw_state(dev_priv, pll, 13578 &pll->config.hw_state); 13579 pll->active = 0; 13580 pll->config.crtc_mask = 0; 13581 for_each_intel_crtc(dev, crtc) { 13582 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 13583 pll->active++; 13584 pll->config.crtc_mask |= 1 << crtc->pipe; 13585 } 13586 } 13587 13588 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 13589 pll->name, pll->config.crtc_mask, pll->on); 13590 13591 if (pll->config.crtc_mask) 13592 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 13593 } 13594 13595 for_each_intel_encoder(dev, encoder) { 13596 pipe = 0; 13597 13598 if (encoder->get_hw_state(encoder, &pipe)) { 13599 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13600 encoder->base.crtc = &crtc->base; 13601 encoder->get_config(encoder, crtc->config); 13602 } else { 13603 encoder->base.crtc = NULL; 13604 } 13605 13606 encoder->connectors_active = false; 13607 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 13608 encoder->base.base.id, 13609 encoder->base.name, 13610 encoder->base.crtc ? "enabled" : "disabled", 13611 pipe_name(pipe)); 13612 } 13613 13614 list_for_each_entry(connector, &dev->mode_config.connector_list, 13615 base.head) { 13616 if (connector->get_hw_state(connector)) { 13617 connector->base.dpms = DRM_MODE_DPMS_ON; 13618 connector->encoder->connectors_active = true; 13619 connector->base.encoder = &connector->encoder->base; 13620 } else { 13621 connector->base.dpms = DRM_MODE_DPMS_OFF; 13622 connector->base.encoder = NULL; 13623 } 13624 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 13625 connector->base.base.id, 13626 connector->base.name, 13627 connector->base.encoder ? "enabled" : "disabled"); 13628 } 13629 } 13630 13631 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 13632 * and i915 state tracking structures. */ 13633 void intel_modeset_setup_hw_state(struct drm_device *dev, 13634 bool force_restore) 13635 { 13636 struct drm_i915_private *dev_priv = dev->dev_private; 13637 enum i915_pipe pipe; 13638 struct intel_crtc *crtc; 13639 struct intel_encoder *encoder; 13640 int i; 13641 13642 intel_modeset_readout_hw_state(dev); 13643 13644 /* 13645 * Now that we have the config, copy it to each CRTC struct 13646 * Note that this could go away if we move to using crtc_config 13647 * checking everywhere. 13648 */ 13649 for_each_intel_crtc(dev, crtc) { 13650 if (crtc->active && i915.fastboot) { 13651 intel_mode_from_pipe_config(&crtc->base.mode, 13652 crtc->config); 13653 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 13654 crtc->base.base.id); 13655 drm_mode_debug_printmodeline(&crtc->base.mode); 13656 } 13657 } 13658 13659 /* HW state is read out, now we need to sanitize this mess. */ 13660 for_each_intel_encoder(dev, encoder) { 13661 intel_sanitize_encoder(encoder); 13662 } 13663 13664 for_each_pipe(dev_priv, pipe) { 13665 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 13666 intel_sanitize_crtc(crtc); 13667 intel_dump_pipe_config(crtc, crtc->config, 13668 "[setup_hw_state]"); 13669 } 13670 13671 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13672 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13673 13674 if (!pll->on || pll->active) 13675 continue; 13676 13677 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 13678 13679 pll->disable(dev_priv, pll); 13680 pll->on = false; 13681 } 13682 13683 if (IS_GEN9(dev)) 13684 skl_wm_get_hw_state(dev); 13685 else if (HAS_PCH_SPLIT(dev)) 13686 ilk_wm_get_hw_state(dev); 13687 13688 if (force_restore) { 13689 i915_redisable_vga(dev); 13690 13691 /* 13692 * We need to use raw interfaces for restoring state to avoid 13693 * checking (bogus) intermediate states. 13694 */ 13695 for_each_pipe(dev_priv, pipe) { 13696 struct drm_crtc *crtc = 13697 dev_priv->pipe_to_crtc_mapping[pipe]; 13698 13699 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 13700 crtc->primary->fb); 13701 } 13702 } else { 13703 intel_modeset_update_staged_output_state(dev); 13704 } 13705 13706 intel_modeset_check_state(dev); 13707 } 13708 13709 void intel_modeset_gem_init(struct drm_device *dev) 13710 { 13711 struct drm_i915_private *dev_priv = dev->dev_private; 13712 struct drm_crtc *c; 13713 struct drm_i915_gem_object *obj; 13714 13715 mutex_lock(&dev->struct_mutex); 13716 intel_init_gt_powersave(dev); 13717 mutex_unlock(&dev->struct_mutex); 13718 13719 /* 13720 * There may be no VBT; and if the BIOS enabled SSC we can 13721 * just keep using it to avoid unnecessary flicker. Whereas if the 13722 * BIOS isn't using it, don't assume it will work even if the VBT 13723 * indicates as much. 13724 */ 13725 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13726 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 13727 DREF_SSC1_ENABLE); 13728 13729 intel_modeset_init_hw(dev); 13730 13731 intel_setup_overlay(dev); 13732 13733 /* 13734 * Make sure any fbs we allocated at startup are properly 13735 * pinned & fenced. When we do the allocation it's too early 13736 * for this. 13737 */ 13738 mutex_lock(&dev->struct_mutex); 13739 for_each_crtc(dev, c) { 13740 obj = intel_fb_obj(c->primary->fb); 13741 if (obj == NULL) 13742 continue; 13743 13744 if (intel_pin_and_fence_fb_obj(c->primary, 13745 c->primary->fb, 13746 NULL)) { 13747 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13748 to_intel_crtc(c)->pipe); 13749 drm_framebuffer_unreference(c->primary->fb); 13750 c->primary->fb = NULL; 13751 update_state_fb(c->primary); 13752 } 13753 } 13754 mutex_unlock(&dev->struct_mutex); 13755 13756 intel_backlight_register(dev); 13757 } 13758 13759 void intel_connector_unregister(struct intel_connector *intel_connector) 13760 { 13761 struct drm_connector *connector = &intel_connector->base; 13762 13763 intel_panel_destroy_backlight(connector); 13764 drm_connector_unregister(connector); 13765 } 13766 13767 void intel_modeset_cleanup(struct drm_device *dev) 13768 { 13769 struct drm_i915_private *dev_priv = dev->dev_private; 13770 struct drm_connector *connector; 13771 13772 intel_disable_gt_powersave(dev); 13773 13774 intel_backlight_unregister(dev); 13775 13776 /* 13777 * Interrupts and polling as the first thing to avoid creating havoc. 13778 * Too much stuff here (turning of connectors, ...) would 13779 * experience fancy races otherwise. 13780 */ 13781 intel_irq_uninstall(dev_priv); 13782 13783 /* 13784 * Due to the hpd irq storm handling the hotplug work can re-arm the 13785 * poll handlers. Hence disable polling after hpd handling is shut down. 13786 */ 13787 drm_kms_helper_poll_fini(dev); 13788 13789 mutex_lock(&dev->struct_mutex); 13790 13791 intel_unregister_dsm_handler(); 13792 13793 intel_fbc_disable(dev); 13794 13795 ironlake_teardown_rc6(dev); 13796 13797 mutex_unlock(&dev->struct_mutex); 13798 13799 /* flush any delayed tasks or pending work */ 13800 flush_scheduled_work(); 13801 13802 /* destroy the backlight and sysfs files before encoders/connectors */ 13803 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 13804 struct intel_connector *intel_connector; 13805 13806 intel_connector = to_intel_connector(connector); 13807 intel_connector->unregister(intel_connector); 13808 } 13809 13810 drm_mode_config_cleanup(dev); 13811 13812 intel_cleanup_overlay(dev); 13813 13814 mutex_lock(&dev->struct_mutex); 13815 intel_cleanup_gt_powersave(dev); 13816 mutex_unlock(&dev->struct_mutex); 13817 } 13818 13819 /* 13820 * Return which encoder is currently attached for connector. 13821 */ 13822 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 13823 { 13824 return &intel_attached_encoder(connector)->base; 13825 } 13826 13827 void intel_connector_attach_encoder(struct intel_connector *connector, 13828 struct intel_encoder *encoder) 13829 { 13830 connector->encoder = encoder; 13831 drm_mode_connector_attach_encoder(&connector->base, 13832 &encoder->base); 13833 } 13834 13835 /* 13836 * set vga decode state - true == enable VGA decode 13837 */ 13838 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 13839 { 13840 struct drm_i915_private *dev_priv = dev->dev_private; 13841 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 13842 u16 gmch_ctrl; 13843 13844 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 13845 DRM_ERROR("failed to read control word\n"); 13846 return -EIO; 13847 } 13848 13849 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 13850 return 0; 13851 13852 if (state) 13853 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 13854 else 13855 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 13856 13857 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 13858 DRM_ERROR("failed to write control word\n"); 13859 return -EIO; 13860 } 13861 13862 return 0; 13863 } 13864 13865 #if 0 13866 struct intel_display_error_state { 13867 13868 u32 power_well_driver; 13869 13870 int num_transcoders; 13871 13872 struct intel_cursor_error_state { 13873 u32 control; 13874 u32 position; 13875 u32 base; 13876 u32 size; 13877 } cursor[I915_MAX_PIPES]; 13878 13879 struct intel_pipe_error_state { 13880 bool power_domain_on; 13881 u32 source; 13882 u32 stat; 13883 } pipe[I915_MAX_PIPES]; 13884 13885 struct intel_plane_error_state { 13886 u32 control; 13887 u32 stride; 13888 u32 size; 13889 u32 pos; 13890 u32 addr; 13891 u32 surface; 13892 u32 tile_offset; 13893 } plane[I915_MAX_PIPES]; 13894 13895 struct intel_transcoder_error_state { 13896 bool power_domain_on; 13897 enum transcoder cpu_transcoder; 13898 13899 u32 conf; 13900 13901 u32 htotal; 13902 u32 hblank; 13903 u32 hsync; 13904 u32 vtotal; 13905 u32 vblank; 13906 u32 vsync; 13907 } transcoder[4]; 13908 }; 13909 13910 struct intel_display_error_state * 13911 intel_display_capture_error_state(struct drm_device *dev) 13912 { 13913 struct drm_i915_private *dev_priv = dev->dev_private; 13914 struct intel_display_error_state *error; 13915 int transcoders[] = { 13916 TRANSCODER_A, 13917 TRANSCODER_B, 13918 TRANSCODER_C, 13919 TRANSCODER_EDP, 13920 }; 13921 int i; 13922 13923 if (INTEL_INFO(dev)->num_pipes == 0) 13924 return NULL; 13925 13926 error = kzalloc(sizeof(*error), GFP_ATOMIC); 13927 if (error == NULL) 13928 return NULL; 13929 13930 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 13931 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 13932 13933 for_each_pipe(dev_priv, i) { 13934 error->pipe[i].power_domain_on = 13935 __intel_display_power_is_enabled(dev_priv, 13936 POWER_DOMAIN_PIPE(i)); 13937 if (!error->pipe[i].power_domain_on) 13938 continue; 13939 13940 error->cursor[i].control = I915_READ(CURCNTR(i)); 13941 error->cursor[i].position = I915_READ(CURPOS(i)); 13942 error->cursor[i].base = I915_READ(CURBASE(i)); 13943 13944 error->plane[i].control = I915_READ(DSPCNTR(i)); 13945 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 13946 if (INTEL_INFO(dev)->gen <= 3) { 13947 error->plane[i].size = I915_READ(DSPSIZE(i)); 13948 error->plane[i].pos = I915_READ(DSPPOS(i)); 13949 } 13950 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 13951 error->plane[i].addr = I915_READ(DSPADDR(i)); 13952 if (INTEL_INFO(dev)->gen >= 4) { 13953 error->plane[i].surface = I915_READ(DSPSURF(i)); 13954 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 13955 } 13956 13957 error->pipe[i].source = I915_READ(PIPESRC(i)); 13958 13959 if (HAS_GMCH_DISPLAY(dev)) 13960 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 13961 } 13962 13963 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 13964 if (HAS_DDI(dev_priv->dev)) 13965 error->num_transcoders++; /* Account for eDP. */ 13966 13967 for (i = 0; i < error->num_transcoders; i++) { 13968 enum transcoder cpu_transcoder = transcoders[i]; 13969 13970 error->transcoder[i].power_domain_on = 13971 __intel_display_power_is_enabled(dev_priv, 13972 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 13973 if (!error->transcoder[i].power_domain_on) 13974 continue; 13975 13976 error->transcoder[i].cpu_transcoder = cpu_transcoder; 13977 13978 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 13979 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 13980 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 13981 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 13982 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 13983 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 13984 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 13985 } 13986 13987 return error; 13988 } 13989 13990 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 13991 13992 void 13993 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 13994 struct drm_device *dev, 13995 struct intel_display_error_state *error) 13996 { 13997 struct drm_i915_private *dev_priv = dev->dev_private; 13998 int i; 13999 14000 if (!error) 14001 return; 14002 14003 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 14004 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 14005 err_printf(m, "PWR_WELL_CTL2: %08x\n", 14006 error->power_well_driver); 14007 for_each_pipe(dev_priv, i) { 14008 err_printf(m, "Pipe [%d]:\n", i); 14009 err_printf(m, " Power: %s\n", 14010 error->pipe[i].power_domain_on ? "on" : "off"); 14011 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 14012 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 14013 14014 err_printf(m, "Plane [%d]:\n", i); 14015 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 14016 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 14017 if (INTEL_INFO(dev)->gen <= 3) { 14018 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 14019 err_printf(m, " POS: %08x\n", error->plane[i].pos); 14020 } 14021 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 14022 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 14023 if (INTEL_INFO(dev)->gen >= 4) { 14024 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 14025 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 14026 } 14027 14028 err_printf(m, "Cursor [%d]:\n", i); 14029 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 14030 err_printf(m, " POS: %08x\n", error->cursor[i].position); 14031 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 14032 } 14033 14034 for (i = 0; i < error->num_transcoders; i++) { 14035 err_printf(m, "CPU transcoder: %c\n", 14036 transcoder_name(error->transcoder[i].cpu_transcoder)); 14037 err_printf(m, " Power: %s\n", 14038 error->transcoder[i].power_domain_on ? "on" : "off"); 14039 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 14040 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 14041 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 14042 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 14043 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 14044 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 14045 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 14046 } 14047 } 14048 #endif 14049 14050 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 14051 { 14052 struct intel_crtc *crtc; 14053 14054 for_each_intel_crtc(dev, crtc) { 14055 struct intel_unpin_work *work; 14056 14057 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 14058 14059 work = crtc->unpin_work; 14060 14061 if (work && work->event && 14062 work->event->base.file_priv == file) { 14063 kfree(work->event); 14064 work->event = NULL; 14065 } 14066 14067 lockmgr(&dev->event_lock, LK_RELEASE); 14068 } 14069 } 14070