1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats for gen <= 3 */ 45 static const uint32_t i8xx_primary_formats[] = { 46 DRM_FORMAT_C8, 47 DRM_FORMAT_RGB565, 48 DRM_FORMAT_XRGB1555, 49 DRM_FORMAT_XRGB8888, 50 }; 51 52 /* Primary plane formats for gen >= 4 */ 53 static const uint32_t i965_primary_formats[] = { 54 DRM_FORMAT_C8, 55 DRM_FORMAT_RGB565, 56 DRM_FORMAT_XRGB8888, 57 DRM_FORMAT_XBGR8888, 58 DRM_FORMAT_XRGB2101010, 59 DRM_FORMAT_XBGR2101010, 60 }; 61 62 static const uint32_t skl_primary_formats[] = { 63 DRM_FORMAT_C8, 64 DRM_FORMAT_RGB565, 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_XBGR8888, 67 DRM_FORMAT_ARGB8888, 68 DRM_FORMAT_ABGR8888, 69 DRM_FORMAT_XRGB2101010, 70 DRM_FORMAT_XBGR2101010, 71 DRM_FORMAT_YUYV, 72 DRM_FORMAT_YVYU, 73 DRM_FORMAT_UYVY, 74 DRM_FORMAT_VYUY, 75 }; 76 77 /* Cursor formats */ 78 static const uint32_t intel_cursor_formats[] = { 79 DRM_FORMAT_ARGB8888, 80 }; 81 82 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 83 84 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 85 struct intel_crtc_state *pipe_config); 86 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 87 struct intel_crtc_state *pipe_config); 88 89 static int intel_framebuffer_init(struct drm_device *dev, 90 struct intel_framebuffer *ifb, 91 struct drm_mode_fb_cmd2 *mode_cmd, 92 struct drm_i915_gem_object *obj); 93 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 94 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 95 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 96 struct intel_link_m_n *m_n, 97 struct intel_link_m_n *m2_n2); 98 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 99 static void haswell_set_pipeconf(struct drm_crtc *crtc); 100 static void intel_set_pipe_csc(struct drm_crtc *crtc); 101 static void vlv_prepare_pll(struct intel_crtc *crtc, 102 const struct intel_crtc_state *pipe_config); 103 static void chv_prepare_pll(struct intel_crtc *crtc, 104 const struct intel_crtc_state *pipe_config); 105 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 106 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 107 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 108 struct intel_crtc_state *crtc_state); 109 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 110 int num_connectors); 111 static void skylake_pfit_enable(struct intel_crtc *crtc); 112 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 113 static void ironlake_pfit_enable(struct intel_crtc *crtc); 114 static void intel_modeset_setup_hw_state(struct drm_device *dev); 115 static void intel_pre_disable_primary(struct drm_crtc *crtc); 116 117 typedef struct { 118 int min, max; 119 } intel_range_t; 120 121 typedef struct { 122 int dot_limit; 123 int p2_slow, p2_fast; 124 } intel_p2_t; 125 126 typedef struct intel_limit intel_limit_t; 127 struct intel_limit { 128 intel_range_t dot, vco, n, m, m1, m2, p, p1; 129 intel_p2_t p2; 130 }; 131 132 /* returns HPLL frequency in kHz */ 133 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 134 { 135 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 136 137 /* Obtain SKU information */ 138 mutex_lock(&dev_priv->sb_lock); 139 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 140 CCK_FUSE_HPLL_FREQ_MASK; 141 mutex_unlock(&dev_priv->sb_lock); 142 143 return vco_freq[hpll_freq] * 1000; 144 } 145 146 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 147 const char *name, u32 reg) 148 { 149 u32 val; 150 int divider; 151 152 if (dev_priv->hpll_freq == 0) 153 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 154 155 mutex_lock(&dev_priv->sb_lock); 156 val = vlv_cck_read(dev_priv, reg); 157 mutex_unlock(&dev_priv->sb_lock); 158 159 divider = val & CCK_FREQUENCY_VALUES; 160 161 WARN((val & CCK_FREQUENCY_STATUS) != 162 (divider << CCK_FREQUENCY_STATUS_SHIFT), 163 "%s change in progress\n", name); 164 165 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 166 } 167 168 int 169 intel_pch_rawclk(struct drm_device *dev) 170 { 171 struct drm_i915_private *dev_priv = dev->dev_private; 172 173 WARN_ON(!HAS_PCH_SPLIT(dev)); 174 175 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 176 } 177 178 /* hrawclock is 1/4 the FSB frequency */ 179 int intel_hrawclk(struct drm_device *dev) 180 { 181 struct drm_i915_private *dev_priv = dev->dev_private; 182 uint32_t clkcfg; 183 184 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 185 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 186 return 200; 187 188 clkcfg = I915_READ(CLKCFG); 189 switch (clkcfg & CLKCFG_FSB_MASK) { 190 case CLKCFG_FSB_400: 191 return 100; 192 case CLKCFG_FSB_533: 193 return 133; 194 case CLKCFG_FSB_667: 195 return 166; 196 case CLKCFG_FSB_800: 197 return 200; 198 case CLKCFG_FSB_1067: 199 return 266; 200 case CLKCFG_FSB_1333: 201 return 333; 202 /* these two are just a guess; one of them might be right */ 203 case CLKCFG_FSB_1600: 204 case CLKCFG_FSB_1600_ALT: 205 return 400; 206 default: 207 return 133; 208 } 209 } 210 211 static void intel_update_czclk(struct drm_i915_private *dev_priv) 212 { 213 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 214 return; 215 216 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 217 CCK_CZ_CLOCK_CONTROL); 218 219 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 220 } 221 222 static inline u32 /* units of 100MHz */ 223 intel_fdi_link_freq(struct drm_device *dev) 224 { 225 if (IS_GEN5(dev)) { 226 struct drm_i915_private *dev_priv = dev->dev_private; 227 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 228 } else 229 return 27; 230 } 231 232 static const intel_limit_t intel_limits_i8xx_dac = { 233 .dot = { .min = 25000, .max = 350000 }, 234 .vco = { .min = 908000, .max = 1512000 }, 235 .n = { .min = 2, .max = 16 }, 236 .m = { .min = 96, .max = 140 }, 237 .m1 = { .min = 18, .max = 26 }, 238 .m2 = { .min = 6, .max = 16 }, 239 .p = { .min = 4, .max = 128 }, 240 .p1 = { .min = 2, .max = 33 }, 241 .p2 = { .dot_limit = 165000, 242 .p2_slow = 4, .p2_fast = 2 }, 243 }; 244 245 static const intel_limit_t intel_limits_i8xx_dvo = { 246 .dot = { .min = 25000, .max = 350000 }, 247 .vco = { .min = 908000, .max = 1512000 }, 248 .n = { .min = 2, .max = 16 }, 249 .m = { .min = 96, .max = 140 }, 250 .m1 = { .min = 18, .max = 26 }, 251 .m2 = { .min = 6, .max = 16 }, 252 .p = { .min = 4, .max = 128 }, 253 .p1 = { .min = 2, .max = 33 }, 254 .p2 = { .dot_limit = 165000, 255 .p2_slow = 4, .p2_fast = 4 }, 256 }; 257 258 static const intel_limit_t intel_limits_i8xx_lvds = { 259 .dot = { .min = 25000, .max = 350000 }, 260 .vco = { .min = 908000, .max = 1512000 }, 261 .n = { .min = 2, .max = 16 }, 262 .m = { .min = 96, .max = 140 }, 263 .m1 = { .min = 18, .max = 26 }, 264 .m2 = { .min = 6, .max = 16 }, 265 .p = { .min = 4, .max = 128 }, 266 .p1 = { .min = 1, .max = 6 }, 267 .p2 = { .dot_limit = 165000, 268 .p2_slow = 14, .p2_fast = 7 }, 269 }; 270 271 static const intel_limit_t intel_limits_i9xx_sdvo = { 272 .dot = { .min = 20000, .max = 400000 }, 273 .vco = { .min = 1400000, .max = 2800000 }, 274 .n = { .min = 1, .max = 6 }, 275 .m = { .min = 70, .max = 120 }, 276 .m1 = { .min = 8, .max = 18 }, 277 .m2 = { .min = 3, .max = 7 }, 278 .p = { .min = 5, .max = 80 }, 279 .p1 = { .min = 1, .max = 8 }, 280 .p2 = { .dot_limit = 200000, 281 .p2_slow = 10, .p2_fast = 5 }, 282 }; 283 284 static const intel_limit_t intel_limits_i9xx_lvds = { 285 .dot = { .min = 20000, .max = 400000 }, 286 .vco = { .min = 1400000, .max = 2800000 }, 287 .n = { .min = 1, .max = 6 }, 288 .m = { .min = 70, .max = 120 }, 289 .m1 = { .min = 8, .max = 18 }, 290 .m2 = { .min = 3, .max = 7 }, 291 .p = { .min = 7, .max = 98 }, 292 .p1 = { .min = 1, .max = 8 }, 293 .p2 = { .dot_limit = 112000, 294 .p2_slow = 14, .p2_fast = 7 }, 295 }; 296 297 298 static const intel_limit_t intel_limits_g4x_sdvo = { 299 .dot = { .min = 25000, .max = 270000 }, 300 .vco = { .min = 1750000, .max = 3500000}, 301 .n = { .min = 1, .max = 4 }, 302 .m = { .min = 104, .max = 138 }, 303 .m1 = { .min = 17, .max = 23 }, 304 .m2 = { .min = 5, .max = 11 }, 305 .p = { .min = 10, .max = 30 }, 306 .p1 = { .min = 1, .max = 3}, 307 .p2 = { .dot_limit = 270000, 308 .p2_slow = 10, 309 .p2_fast = 10 310 }, 311 }; 312 313 static const intel_limit_t intel_limits_g4x_hdmi = { 314 .dot = { .min = 22000, .max = 400000 }, 315 .vco = { .min = 1750000, .max = 3500000}, 316 .n = { .min = 1, .max = 4 }, 317 .m = { .min = 104, .max = 138 }, 318 .m1 = { .min = 16, .max = 23 }, 319 .m2 = { .min = 5, .max = 11 }, 320 .p = { .min = 5, .max = 80 }, 321 .p1 = { .min = 1, .max = 8}, 322 .p2 = { .dot_limit = 165000, 323 .p2_slow = 10, .p2_fast = 5 }, 324 }; 325 326 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 327 .dot = { .min = 20000, .max = 115000 }, 328 .vco = { .min = 1750000, .max = 3500000 }, 329 .n = { .min = 1, .max = 3 }, 330 .m = { .min = 104, .max = 138 }, 331 .m1 = { .min = 17, .max = 23 }, 332 .m2 = { .min = 5, .max = 11 }, 333 .p = { .min = 28, .max = 112 }, 334 .p1 = { .min = 2, .max = 8 }, 335 .p2 = { .dot_limit = 0, 336 .p2_slow = 14, .p2_fast = 14 337 }, 338 }; 339 340 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 341 .dot = { .min = 80000, .max = 224000 }, 342 .vco = { .min = 1750000, .max = 3500000 }, 343 .n = { .min = 1, .max = 3 }, 344 .m = { .min = 104, .max = 138 }, 345 .m1 = { .min = 17, .max = 23 }, 346 .m2 = { .min = 5, .max = 11 }, 347 .p = { .min = 14, .max = 42 }, 348 .p1 = { .min = 2, .max = 6 }, 349 .p2 = { .dot_limit = 0, 350 .p2_slow = 7, .p2_fast = 7 351 }, 352 }; 353 354 static const intel_limit_t intel_limits_pineview_sdvo = { 355 .dot = { .min = 20000, .max = 400000}, 356 .vco = { .min = 1700000, .max = 3500000 }, 357 /* Pineview's Ncounter is a ring counter */ 358 .n = { .min = 3, .max = 6 }, 359 .m = { .min = 2, .max = 256 }, 360 /* Pineview only has one combined m divider, which we treat as m2. */ 361 .m1 = { .min = 0, .max = 0 }, 362 .m2 = { .min = 0, .max = 254 }, 363 .p = { .min = 5, .max = 80 }, 364 .p1 = { .min = 1, .max = 8 }, 365 .p2 = { .dot_limit = 200000, 366 .p2_slow = 10, .p2_fast = 5 }, 367 }; 368 369 static const intel_limit_t intel_limits_pineview_lvds = { 370 .dot = { .min = 20000, .max = 400000 }, 371 .vco = { .min = 1700000, .max = 3500000 }, 372 .n = { .min = 3, .max = 6 }, 373 .m = { .min = 2, .max = 256 }, 374 .m1 = { .min = 0, .max = 0 }, 375 .m2 = { .min = 0, .max = 254 }, 376 .p = { .min = 7, .max = 112 }, 377 .p1 = { .min = 1, .max = 8 }, 378 .p2 = { .dot_limit = 112000, 379 .p2_slow = 14, .p2_fast = 14 }, 380 }; 381 382 /* Ironlake / Sandybridge 383 * 384 * We calculate clock using (register_value + 2) for N/M1/M2, so here 385 * the range value for them is (actual_value - 2). 386 */ 387 static const intel_limit_t intel_limits_ironlake_dac = { 388 .dot = { .min = 25000, .max = 350000 }, 389 .vco = { .min = 1760000, .max = 3510000 }, 390 .n = { .min = 1, .max = 5 }, 391 .m = { .min = 79, .max = 127 }, 392 .m1 = { .min = 12, .max = 22 }, 393 .m2 = { .min = 5, .max = 9 }, 394 .p = { .min = 5, .max = 80 }, 395 .p1 = { .min = 1, .max = 8 }, 396 .p2 = { .dot_limit = 225000, 397 .p2_slow = 10, .p2_fast = 5 }, 398 }; 399 400 static const intel_limit_t intel_limits_ironlake_single_lvds = { 401 .dot = { .min = 25000, .max = 350000 }, 402 .vco = { .min = 1760000, .max = 3510000 }, 403 .n = { .min = 1, .max = 3 }, 404 .m = { .min = 79, .max = 118 }, 405 .m1 = { .min = 12, .max = 22 }, 406 .m2 = { .min = 5, .max = 9 }, 407 .p = { .min = 28, .max = 112 }, 408 .p1 = { .min = 2, .max = 8 }, 409 .p2 = { .dot_limit = 225000, 410 .p2_slow = 14, .p2_fast = 14 }, 411 }; 412 413 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 414 .dot = { .min = 25000, .max = 350000 }, 415 .vco = { .min = 1760000, .max = 3510000 }, 416 .n = { .min = 1, .max = 3 }, 417 .m = { .min = 79, .max = 127 }, 418 .m1 = { .min = 12, .max = 22 }, 419 .m2 = { .min = 5, .max = 9 }, 420 .p = { .min = 14, .max = 56 }, 421 .p1 = { .min = 2, .max = 8 }, 422 .p2 = { .dot_limit = 225000, 423 .p2_slow = 7, .p2_fast = 7 }, 424 }; 425 426 /* LVDS 100mhz refclk limits. */ 427 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 428 .dot = { .min = 25000, .max = 350000 }, 429 .vco = { .min = 1760000, .max = 3510000 }, 430 .n = { .min = 1, .max = 2 }, 431 .m = { .min = 79, .max = 126 }, 432 .m1 = { .min = 12, .max = 22 }, 433 .m2 = { .min = 5, .max = 9 }, 434 .p = { .min = 28, .max = 112 }, 435 .p1 = { .min = 2, .max = 8 }, 436 .p2 = { .dot_limit = 225000, 437 .p2_slow = 14, .p2_fast = 14 }, 438 }; 439 440 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 441 .dot = { .min = 25000, .max = 350000 }, 442 .vco = { .min = 1760000, .max = 3510000 }, 443 .n = { .min = 1, .max = 3 }, 444 .m = { .min = 79, .max = 126 }, 445 .m1 = { .min = 12, .max = 22 }, 446 .m2 = { .min = 5, .max = 9 }, 447 .p = { .min = 14, .max = 42 }, 448 .p1 = { .min = 2, .max = 6 }, 449 .p2 = { .dot_limit = 225000, 450 .p2_slow = 7, .p2_fast = 7 }, 451 }; 452 453 static const intel_limit_t intel_limits_vlv = { 454 /* 455 * These are the data rate limits (measured in fast clocks) 456 * since those are the strictest limits we have. The fast 457 * clock and actual rate limits are more relaxed, so checking 458 * them would make no difference. 459 */ 460 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 461 .vco = { .min = 4000000, .max = 6000000 }, 462 .n = { .min = 1, .max = 7 }, 463 .m1 = { .min = 2, .max = 3 }, 464 .m2 = { .min = 11, .max = 156 }, 465 .p1 = { .min = 2, .max = 3 }, 466 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 467 }; 468 469 static const intel_limit_t intel_limits_chv = { 470 /* 471 * These are the data rate limits (measured in fast clocks) 472 * since those are the strictest limits we have. The fast 473 * clock and actual rate limits are more relaxed, so checking 474 * them would make no difference. 475 */ 476 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 477 .vco = { .min = 4800000, .max = 6480000 }, 478 .n = { .min = 1, .max = 1 }, 479 .m1 = { .min = 2, .max = 2 }, 480 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 481 .p1 = { .min = 2, .max = 4 }, 482 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 483 }; 484 485 static const intel_limit_t intel_limits_bxt = { 486 /* FIXME: find real dot limits */ 487 .dot = { .min = 0, .max = INT_MAX }, 488 .vco = { .min = 4800000, .max = 6700000 }, 489 .n = { .min = 1, .max = 1 }, 490 .m1 = { .min = 2, .max = 2 }, 491 /* FIXME: find real m2 limits */ 492 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 493 .p1 = { .min = 2, .max = 4 }, 494 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 495 }; 496 497 static bool 498 needs_modeset(struct drm_crtc_state *state) 499 { 500 return drm_atomic_crtc_needs_modeset(state); 501 } 502 503 /** 504 * Returns whether any output on the specified pipe is of the specified type 505 */ 506 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 507 { 508 struct drm_device *dev = crtc->base.dev; 509 struct intel_encoder *encoder; 510 511 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 512 if (encoder->type == type) 513 return true; 514 515 return false; 516 } 517 518 /** 519 * Returns whether any output on the specified pipe will have the specified 520 * type after a staged modeset is complete, i.e., the same as 521 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 522 * encoder->crtc. 523 */ 524 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 525 int type) 526 { 527 struct drm_atomic_state *state = crtc_state->base.state; 528 struct drm_connector *connector; 529 struct drm_connector_state *connector_state; 530 struct intel_encoder *encoder; 531 int i, num_connectors = 0; 532 533 for_each_connector_in_state(state, connector, connector_state, i) { 534 if (connector_state->crtc != crtc_state->base.crtc) 535 continue; 536 537 num_connectors++; 538 539 encoder = to_intel_encoder(connector_state->best_encoder); 540 if (encoder->type == type) 541 return true; 542 } 543 544 WARN_ON(num_connectors == 0); 545 546 return false; 547 } 548 549 static const intel_limit_t * 550 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 551 { 552 struct drm_device *dev = crtc_state->base.crtc->dev; 553 const intel_limit_t *limit; 554 555 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 556 if (intel_is_dual_link_lvds(dev)) { 557 if (refclk == 100000) 558 limit = &intel_limits_ironlake_dual_lvds_100m; 559 else 560 limit = &intel_limits_ironlake_dual_lvds; 561 } else { 562 if (refclk == 100000) 563 limit = &intel_limits_ironlake_single_lvds_100m; 564 else 565 limit = &intel_limits_ironlake_single_lvds; 566 } 567 } else 568 limit = &intel_limits_ironlake_dac; 569 570 return limit; 571 } 572 573 static const intel_limit_t * 574 intel_g4x_limit(struct intel_crtc_state *crtc_state) 575 { 576 struct drm_device *dev = crtc_state->base.crtc->dev; 577 const intel_limit_t *limit; 578 579 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 580 if (intel_is_dual_link_lvds(dev)) 581 limit = &intel_limits_g4x_dual_channel_lvds; 582 else 583 limit = &intel_limits_g4x_single_channel_lvds; 584 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 585 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 586 limit = &intel_limits_g4x_hdmi; 587 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 588 limit = &intel_limits_g4x_sdvo; 589 } else /* The option is for other outputs */ 590 limit = &intel_limits_i9xx_sdvo; 591 592 return limit; 593 } 594 595 static const intel_limit_t * 596 intel_limit(struct intel_crtc_state *crtc_state, int refclk) 597 { 598 struct drm_device *dev = crtc_state->base.crtc->dev; 599 const intel_limit_t *limit; 600 601 if (IS_BROXTON(dev)) 602 limit = &intel_limits_bxt; 603 else if (HAS_PCH_SPLIT(dev)) 604 limit = intel_ironlake_limit(crtc_state, refclk); 605 else if (IS_G4X(dev)) { 606 limit = intel_g4x_limit(crtc_state); 607 } else if (IS_PINEVIEW(dev)) { 608 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 609 limit = &intel_limits_pineview_lvds; 610 else 611 limit = &intel_limits_pineview_sdvo; 612 } else if (IS_CHERRYVIEW(dev)) { 613 limit = &intel_limits_chv; 614 } else if (IS_VALLEYVIEW(dev)) { 615 limit = &intel_limits_vlv; 616 } else if (!IS_GEN2(dev)) { 617 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 618 limit = &intel_limits_i9xx_lvds; 619 else 620 limit = &intel_limits_i9xx_sdvo; 621 } else { 622 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 623 limit = &intel_limits_i8xx_lvds; 624 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 625 limit = &intel_limits_i8xx_dvo; 626 else 627 limit = &intel_limits_i8xx_dac; 628 } 629 return limit; 630 } 631 632 /* 633 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 634 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 635 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 636 * The helpers' return value is the rate of the clock that is fed to the 637 * display engine's pipe which can be the above fast dot clock rate or a 638 * divided-down version of it. 639 */ 640 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 641 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 642 { 643 clock->m = clock->m2 + 2; 644 clock->p = clock->p1 * clock->p2; 645 if (WARN_ON(clock->n == 0 || clock->p == 0)) 646 return 0; 647 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 648 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 649 650 return clock->dot; 651 } 652 653 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 654 { 655 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 656 } 657 658 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 659 { 660 clock->m = i9xx_dpll_compute_m(clock); 661 clock->p = clock->p1 * clock->p2; 662 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 663 return 0; 664 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 665 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 666 667 return clock->dot; 668 } 669 670 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 671 { 672 clock->m = clock->m1 * clock->m2; 673 clock->p = clock->p1 * clock->p2; 674 if (WARN_ON(clock->n == 0 || clock->p == 0)) 675 return 0; 676 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 677 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 678 679 return clock->dot / 5; 680 } 681 682 int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 683 { 684 clock->m = clock->m1 * clock->m2; 685 clock->p = clock->p1 * clock->p2; 686 if (WARN_ON(clock->n == 0 || clock->p == 0)) 687 return 0; 688 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 689 clock->n << 22); 690 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 691 692 return clock->dot / 5; 693 } 694 695 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 696 /** 697 * Returns whether the given set of divisors are valid for a given refclk with 698 * the given connectors. 699 */ 700 701 static bool intel_PLL_is_valid(struct drm_device *dev, 702 const intel_limit_t *limit, 703 const intel_clock_t *clock) 704 { 705 if (clock->n < limit->n.min || limit->n.max < clock->n) 706 INTELPllInvalid("n out of range\n"); 707 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 708 INTELPllInvalid("p1 out of range\n"); 709 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 710 INTELPllInvalid("m2 out of range\n"); 711 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 712 INTELPllInvalid("m1 out of range\n"); 713 714 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && 715 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) 716 if (clock->m1 <= clock->m2) 717 INTELPllInvalid("m1 <= m2\n"); 718 719 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { 720 if (clock->p < limit->p.min || limit->p.max < clock->p) 721 INTELPllInvalid("p out of range\n"); 722 if (clock->m < limit->m.min || limit->m.max < clock->m) 723 INTELPllInvalid("m out of range\n"); 724 } 725 726 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 727 INTELPllInvalid("vco out of range\n"); 728 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 729 * connector, etc., rather than just a single range. 730 */ 731 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 732 INTELPllInvalid("dot out of range\n"); 733 734 return true; 735 } 736 737 static int 738 i9xx_select_p2_div(const intel_limit_t *limit, 739 const struct intel_crtc_state *crtc_state, 740 int target) 741 { 742 struct drm_device *dev = crtc_state->base.crtc->dev; 743 744 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 745 /* 746 * For LVDS just rely on its current settings for dual-channel. 747 * We haven't figured out how to reliably set up different 748 * single/dual channel state, if we even can. 749 */ 750 if (intel_is_dual_link_lvds(dev)) 751 return limit->p2.p2_fast; 752 else 753 return limit->p2.p2_slow; 754 } else { 755 if (target < limit->p2.dot_limit) 756 return limit->p2.p2_slow; 757 else 758 return limit->p2.p2_fast; 759 } 760 } 761 762 static bool 763 i9xx_find_best_dpll(const intel_limit_t *limit, 764 struct intel_crtc_state *crtc_state, 765 int target, int refclk, intel_clock_t *match_clock, 766 intel_clock_t *best_clock) 767 { 768 struct drm_device *dev = crtc_state->base.crtc->dev; 769 intel_clock_t clock; 770 int err = target; 771 772 memset(best_clock, 0, sizeof(*best_clock)); 773 774 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 775 776 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 777 clock.m1++) { 778 for (clock.m2 = limit->m2.min; 779 clock.m2 <= limit->m2.max; clock.m2++) { 780 if (clock.m2 >= clock.m1) 781 break; 782 for (clock.n = limit->n.min; 783 clock.n <= limit->n.max; clock.n++) { 784 for (clock.p1 = limit->p1.min; 785 clock.p1 <= limit->p1.max; clock.p1++) { 786 int this_err; 787 788 i9xx_calc_dpll_params(refclk, &clock); 789 if (!intel_PLL_is_valid(dev, limit, 790 &clock)) 791 continue; 792 if (match_clock && 793 clock.p != match_clock->p) 794 continue; 795 796 this_err = abs(clock.dot - target); 797 if (this_err < err) { 798 *best_clock = clock; 799 err = this_err; 800 } 801 } 802 } 803 } 804 } 805 806 return (err != target); 807 } 808 809 static bool 810 pnv_find_best_dpll(const intel_limit_t *limit, 811 struct intel_crtc_state *crtc_state, 812 int target, int refclk, intel_clock_t *match_clock, 813 intel_clock_t *best_clock) 814 { 815 struct drm_device *dev = crtc_state->base.crtc->dev; 816 intel_clock_t clock; 817 int err = target; 818 819 memset(best_clock, 0, sizeof(*best_clock)); 820 821 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 822 823 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 824 clock.m1++) { 825 for (clock.m2 = limit->m2.min; 826 clock.m2 <= limit->m2.max; clock.m2++) { 827 for (clock.n = limit->n.min; 828 clock.n <= limit->n.max; clock.n++) { 829 for (clock.p1 = limit->p1.min; 830 clock.p1 <= limit->p1.max; clock.p1++) { 831 int this_err; 832 833 pnv_calc_dpll_params(refclk, &clock); 834 if (!intel_PLL_is_valid(dev, limit, 835 &clock)) 836 continue; 837 if (match_clock && 838 clock.p != match_clock->p) 839 continue; 840 841 this_err = abs(clock.dot - target); 842 if (this_err < err) { 843 *best_clock = clock; 844 err = this_err; 845 } 846 } 847 } 848 } 849 } 850 851 return (err != target); 852 } 853 854 static bool 855 g4x_find_best_dpll(const intel_limit_t *limit, 856 struct intel_crtc_state *crtc_state, 857 int target, int refclk, intel_clock_t *match_clock, 858 intel_clock_t *best_clock) 859 { 860 struct drm_device *dev = crtc_state->base.crtc->dev; 861 intel_clock_t clock; 862 int max_n; 863 bool found = false; 864 /* approximately equals target * 0.00585 */ 865 int err_most = (target >> 8) + (target >> 9); 866 867 memset(best_clock, 0, sizeof(*best_clock)); 868 869 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 870 871 max_n = limit->n.max; 872 /* based on hardware requirement, prefer smaller n to precision */ 873 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 874 /* based on hardware requirement, prefere larger m1,m2 */ 875 for (clock.m1 = limit->m1.max; 876 clock.m1 >= limit->m1.min; clock.m1--) { 877 for (clock.m2 = limit->m2.max; 878 clock.m2 >= limit->m2.min; clock.m2--) { 879 for (clock.p1 = limit->p1.max; 880 clock.p1 >= limit->p1.min; clock.p1--) { 881 int this_err; 882 883 i9xx_calc_dpll_params(refclk, &clock); 884 if (!intel_PLL_is_valid(dev, limit, 885 &clock)) 886 continue; 887 888 this_err = abs(clock.dot - target); 889 if (this_err < err_most) { 890 *best_clock = clock; 891 err_most = this_err; 892 max_n = clock.n; 893 found = true; 894 } 895 } 896 } 897 } 898 } 899 return found; 900 } 901 902 /* 903 * Check if the calculated PLL configuration is more optimal compared to the 904 * best configuration and error found so far. Return the calculated error. 905 */ 906 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 907 const intel_clock_t *calculated_clock, 908 const intel_clock_t *best_clock, 909 unsigned int best_error_ppm, 910 unsigned int *error_ppm) 911 { 912 /* 913 * For CHV ignore the error and consider only the P value. 914 * Prefer a bigger P value based on HW requirements. 915 */ 916 if (IS_CHERRYVIEW(dev)) { 917 *error_ppm = 0; 918 919 return calculated_clock->p > best_clock->p; 920 } 921 922 if (WARN_ON_ONCE(!target_freq)) 923 return false; 924 925 *error_ppm = div_u64(1000000ULL * 926 abs(target_freq - calculated_clock->dot), 927 target_freq); 928 /* 929 * Prefer a better P value over a better (smaller) error if the error 930 * is small. Ensure this preference for future configurations too by 931 * setting the error to 0. 932 */ 933 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 934 *error_ppm = 0; 935 936 return true; 937 } 938 939 return *error_ppm + 10 < best_error_ppm; 940 } 941 942 static bool 943 vlv_find_best_dpll(const intel_limit_t *limit, 944 struct intel_crtc_state *crtc_state, 945 int target, int refclk, intel_clock_t *match_clock, 946 intel_clock_t *best_clock) 947 { 948 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 949 struct drm_device *dev = crtc->base.dev; 950 intel_clock_t clock; 951 unsigned int bestppm = 1000000; 952 /* min update 19.2 MHz */ 953 int max_n = min(limit->n.max, refclk / 19200); 954 bool found = false; 955 956 target *= 5; /* fast clock */ 957 958 memset(best_clock, 0, sizeof(*best_clock)); 959 960 /* based on hardware requirement, prefer smaller n to precision */ 961 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 962 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 963 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 964 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 965 clock.p = clock.p1 * clock.p2; 966 /* based on hardware requirement, prefer bigger m1,m2 values */ 967 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 968 unsigned int ppm; 969 970 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 971 refclk * clock.m1); 972 973 vlv_calc_dpll_params(refclk, &clock); 974 975 if (!intel_PLL_is_valid(dev, limit, 976 &clock)) 977 continue; 978 979 if (!vlv_PLL_is_optimal(dev, target, 980 &clock, 981 best_clock, 982 bestppm, &ppm)) 983 continue; 984 985 *best_clock = clock; 986 bestppm = ppm; 987 found = true; 988 } 989 } 990 } 991 } 992 993 return found; 994 } 995 996 static bool 997 chv_find_best_dpll(const intel_limit_t *limit, 998 struct intel_crtc_state *crtc_state, 999 int target, int refclk, intel_clock_t *match_clock, 1000 intel_clock_t *best_clock) 1001 { 1002 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1003 struct drm_device *dev = crtc->base.dev; 1004 unsigned int best_error_ppm; 1005 intel_clock_t clock; 1006 uint64_t m2; 1007 int found = false; 1008 1009 memset(best_clock, 0, sizeof(*best_clock)); 1010 best_error_ppm = 1000000; 1011 1012 /* 1013 * Based on hardware doc, the n always set to 1, and m1 always 1014 * set to 2. If requires to support 200Mhz refclk, we need to 1015 * revisit this because n may not 1 anymore. 1016 */ 1017 clock.n = 1, clock.m1 = 2; 1018 target *= 5; /* fast clock */ 1019 1020 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 1021 for (clock.p2 = limit->p2.p2_fast; 1022 clock.p2 >= limit->p2.p2_slow; 1023 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1024 unsigned int error_ppm; 1025 1026 clock.p = clock.p1 * clock.p2; 1027 1028 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 1029 clock.n) << 22, refclk * clock.m1); 1030 1031 if (m2 > INT_MAX/clock.m1) 1032 continue; 1033 1034 clock.m2 = m2; 1035 1036 chv_calc_dpll_params(refclk, &clock); 1037 1038 if (!intel_PLL_is_valid(dev, limit, &clock)) 1039 continue; 1040 1041 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1042 best_error_ppm, &error_ppm)) 1043 continue; 1044 1045 *best_clock = clock; 1046 best_error_ppm = error_ppm; 1047 found = true; 1048 } 1049 } 1050 1051 return found; 1052 } 1053 1054 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1055 intel_clock_t *best_clock) 1056 { 1057 int refclk = i9xx_get_refclk(crtc_state, 0); 1058 1059 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state, 1060 target_clock, refclk, NULL, best_clock); 1061 } 1062 1063 bool intel_crtc_active(struct drm_crtc *crtc) 1064 { 1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1066 1067 /* Be paranoid as we can arrive here with only partial 1068 * state retrieved from the hardware during setup. 1069 * 1070 * We can ditch the adjusted_mode.crtc_clock check as soon 1071 * as Haswell has gained clock readout/fastboot support. 1072 * 1073 * We can ditch the crtc->primary->fb check as soon as we can 1074 * properly reconstruct framebuffers. 1075 * 1076 * FIXME: The intel_crtc->active here should be switched to 1077 * crtc->state->active once we have proper CRTC states wired up 1078 * for atomic. 1079 */ 1080 return intel_crtc->active && crtc->primary->state->fb && 1081 intel_crtc->config->base.adjusted_mode.crtc_clock; 1082 } 1083 1084 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1085 enum i915_pipe pipe) 1086 { 1087 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1089 1090 return intel_crtc->config->cpu_transcoder; 1091 } 1092 1093 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1094 { 1095 struct drm_i915_private *dev_priv = dev->dev_private; 1096 i915_reg_t reg = PIPEDSL(pipe); 1097 u32 line1, line2; 1098 u32 line_mask; 1099 1100 if (IS_GEN2(dev)) 1101 line_mask = DSL_LINEMASK_GEN2; 1102 else 1103 line_mask = DSL_LINEMASK_GEN3; 1104 1105 line1 = I915_READ(reg) & line_mask; 1106 msleep(5); 1107 line2 = I915_READ(reg) & line_mask; 1108 1109 return line1 == line2; 1110 } 1111 1112 /* 1113 * intel_wait_for_pipe_off - wait for pipe to turn off 1114 * @crtc: crtc whose pipe to wait for 1115 * 1116 * After disabling a pipe, we can't wait for vblank in the usual way, 1117 * spinning on the vblank interrupt status bit, since we won't actually 1118 * see an interrupt when the pipe is disabled. 1119 * 1120 * On Gen4 and above: 1121 * wait for the pipe register state bit to turn off 1122 * 1123 * Otherwise: 1124 * wait for the display line value to settle (it usually 1125 * ends up stopping at the start of the next frame). 1126 * 1127 */ 1128 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1129 { 1130 struct drm_device *dev = crtc->base.dev; 1131 struct drm_i915_private *dev_priv = dev->dev_private; 1132 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1133 enum i915_pipe pipe = crtc->pipe; 1134 1135 if (INTEL_INFO(dev)->gen >= 4) { 1136 i915_reg_t reg = PIPECONF(cpu_transcoder); 1137 1138 /* Wait for the Pipe State to go off */ 1139 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1140 100)) 1141 WARN(1, "pipe_off wait timed out\n"); 1142 } else { 1143 /* Wait for the display line to settle */ 1144 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1145 WARN(1, "pipe_off wait timed out\n"); 1146 } 1147 } 1148 1149 static const char *state_string(bool enabled) 1150 { 1151 return enabled ? "on" : "off"; 1152 } 1153 1154 /* Only for pre-ILK configs */ 1155 void assert_pll(struct drm_i915_private *dev_priv, 1156 enum i915_pipe pipe, bool state) 1157 { 1158 u32 val; 1159 bool cur_state; 1160 1161 val = I915_READ(DPLL(pipe)); 1162 cur_state = !!(val & DPLL_VCO_ENABLE); 1163 I915_STATE_WARN(cur_state != state, 1164 "PLL state assertion failure (expected %s, current %s)\n", 1165 state_string(state), state_string(cur_state)); 1166 } 1167 1168 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1169 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1170 { 1171 u32 val; 1172 bool cur_state; 1173 1174 mutex_lock(&dev_priv->sb_lock); 1175 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1176 mutex_unlock(&dev_priv->sb_lock); 1177 1178 cur_state = val & DSI_PLL_VCO_EN; 1179 I915_STATE_WARN(cur_state != state, 1180 "DSI PLL state assertion failure (expected %s, current %s)\n", 1181 state_string(state), state_string(cur_state)); 1182 } 1183 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1184 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1185 1186 struct intel_shared_dpll * 1187 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1188 { 1189 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1190 1191 if (crtc->config->shared_dpll < 0) 1192 return NULL; 1193 1194 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1195 } 1196 1197 /* For ILK+ */ 1198 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1199 struct intel_shared_dpll *pll, 1200 bool state) 1201 { 1202 bool cur_state; 1203 struct intel_dpll_hw_state hw_state; 1204 1205 if (WARN (!pll, 1206 "asserting DPLL %s with no DPLL\n", state_string(state))) 1207 return; 1208 1209 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1210 I915_STATE_WARN(cur_state != state, 1211 "%s assertion failure (expected %s, current %s)\n", 1212 pll->name, state_string(state), state_string(cur_state)); 1213 } 1214 1215 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1216 enum i915_pipe pipe, bool state) 1217 { 1218 bool cur_state; 1219 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1220 pipe); 1221 1222 if (HAS_DDI(dev_priv->dev)) { 1223 /* DDI does not have a specific FDI_TX register */ 1224 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1225 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1226 } else { 1227 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1228 cur_state = !!(val & FDI_TX_ENABLE); 1229 } 1230 I915_STATE_WARN(cur_state != state, 1231 "FDI TX state assertion failure (expected %s, current %s)\n", 1232 state_string(state), state_string(cur_state)); 1233 } 1234 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1235 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1236 1237 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1238 enum i915_pipe pipe, bool state) 1239 { 1240 u32 val; 1241 bool cur_state; 1242 1243 val = I915_READ(FDI_RX_CTL(pipe)); 1244 cur_state = !!(val & FDI_RX_ENABLE); 1245 I915_STATE_WARN(cur_state != state, 1246 "FDI RX state assertion failure (expected %s, current %s)\n", 1247 state_string(state), state_string(cur_state)); 1248 } 1249 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1250 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1251 1252 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1253 enum i915_pipe pipe) 1254 { 1255 u32 val; 1256 1257 /* ILK FDI PLL is always enabled */ 1258 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1259 return; 1260 1261 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1262 if (HAS_DDI(dev_priv->dev)) 1263 return; 1264 1265 val = I915_READ(FDI_TX_CTL(pipe)); 1266 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1267 } 1268 1269 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1270 enum i915_pipe pipe, bool state) 1271 { 1272 u32 val; 1273 bool cur_state; 1274 1275 val = I915_READ(FDI_RX_CTL(pipe)); 1276 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1277 I915_STATE_WARN(cur_state != state, 1278 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1279 state_string(state), state_string(cur_state)); 1280 } 1281 1282 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1283 enum i915_pipe pipe) 1284 { 1285 struct drm_device *dev = dev_priv->dev; 1286 i915_reg_t pp_reg; 1287 u32 val; 1288 enum i915_pipe panel_pipe = PIPE_A; 1289 bool locked = true; 1290 1291 if (WARN_ON(HAS_DDI(dev))) 1292 return; 1293 1294 if (HAS_PCH_SPLIT(dev)) { 1295 u32 port_sel; 1296 1297 pp_reg = PCH_PP_CONTROL; 1298 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1299 1300 if (port_sel == PANEL_PORT_SELECT_LVDS && 1301 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1302 panel_pipe = PIPE_B; 1303 /* XXX: else fix for eDP */ 1304 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1305 /* presumably write lock depends on pipe, not port select */ 1306 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1307 panel_pipe = pipe; 1308 } else { 1309 pp_reg = PP_CONTROL; 1310 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1311 panel_pipe = PIPE_B; 1312 } 1313 1314 val = I915_READ(pp_reg); 1315 if (!(val & PANEL_POWER_ON) || 1316 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1317 locked = false; 1318 1319 I915_STATE_WARN(panel_pipe == pipe && locked, 1320 "panel assertion failure, pipe %c regs locked\n", 1321 pipe_name(pipe)); 1322 } 1323 1324 static void assert_cursor(struct drm_i915_private *dev_priv, 1325 enum i915_pipe pipe, bool state) 1326 { 1327 struct drm_device *dev = dev_priv->dev; 1328 bool cur_state; 1329 1330 if (IS_845G(dev) || IS_I865G(dev)) 1331 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1332 else 1333 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1334 1335 I915_STATE_WARN(cur_state != state, 1336 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1337 pipe_name(pipe), state_string(state), state_string(cur_state)); 1338 } 1339 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1340 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1341 1342 void assert_pipe(struct drm_i915_private *dev_priv, 1343 enum i915_pipe pipe, bool state) 1344 { 1345 bool cur_state; 1346 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1347 pipe); 1348 enum intel_display_power_domain power_domain; 1349 1350 /* if we need the pipe quirk it must be always on */ 1351 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1352 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1353 state = true; 1354 1355 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1356 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1357 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1358 cur_state = !!(val & PIPECONF_ENABLE); 1359 1360 intel_display_power_put(dev_priv, power_domain); 1361 } else { 1362 cur_state = false; 1363 } 1364 1365 I915_STATE_WARN(cur_state != state, 1366 "pipe %c assertion failure (expected %s, current %s)\n", 1367 pipe_name(pipe), state_string(state), state_string(cur_state)); 1368 } 1369 1370 static void assert_plane(struct drm_i915_private *dev_priv, 1371 enum plane plane, bool state) 1372 { 1373 u32 val; 1374 bool cur_state; 1375 1376 val = I915_READ(DSPCNTR(plane)); 1377 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1378 I915_STATE_WARN(cur_state != state, 1379 "plane %c assertion failure (expected %s, current %s)\n", 1380 plane_name(plane), state_string(state), state_string(cur_state)); 1381 } 1382 1383 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1384 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1385 1386 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1387 enum i915_pipe pipe) 1388 { 1389 struct drm_device *dev = dev_priv->dev; 1390 int i; 1391 1392 /* Primary planes are fixed to pipes on gen4+ */ 1393 if (INTEL_INFO(dev)->gen >= 4) { 1394 u32 val = I915_READ(DSPCNTR(pipe)); 1395 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1396 "plane %c assertion failure, should be disabled but not\n", 1397 plane_name(pipe)); 1398 return; 1399 } 1400 1401 /* Need to check both planes against the pipe */ 1402 for_each_pipe(dev_priv, i) { 1403 u32 val = I915_READ(DSPCNTR(i)); 1404 enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1405 DISPPLANE_SEL_PIPE_SHIFT; 1406 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1407 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1408 plane_name(i), pipe_name(pipe)); 1409 } 1410 } 1411 1412 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1413 enum i915_pipe pipe) 1414 { 1415 struct drm_device *dev = dev_priv->dev; 1416 int sprite; 1417 1418 if (INTEL_INFO(dev)->gen >= 9) { 1419 for_each_sprite(dev_priv, pipe, sprite) { 1420 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1421 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1422 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1423 sprite, pipe_name(pipe)); 1424 } 1425 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1426 for_each_sprite(dev_priv, pipe, sprite) { 1427 u32 val = I915_READ(SPCNTR(pipe, sprite)); 1428 I915_STATE_WARN(val & SP_ENABLE, 1429 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1430 sprite_name(pipe, sprite), pipe_name(pipe)); 1431 } 1432 } else if (INTEL_INFO(dev)->gen >= 7) { 1433 u32 val = I915_READ(SPRCTL(pipe)); 1434 I915_STATE_WARN(val & SPRITE_ENABLE, 1435 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1436 plane_name(pipe), pipe_name(pipe)); 1437 } else if (INTEL_INFO(dev)->gen >= 5) { 1438 u32 val = I915_READ(DVSCNTR(pipe)); 1439 I915_STATE_WARN(val & DVS_ENABLE, 1440 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1441 plane_name(pipe), pipe_name(pipe)); 1442 } 1443 } 1444 1445 static void assert_vblank_disabled(struct drm_crtc *crtc) 1446 { 1447 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1448 drm_crtc_vblank_put(crtc); 1449 } 1450 1451 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1452 { 1453 u32 val; 1454 bool enabled; 1455 1456 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1457 1458 val = I915_READ(PCH_DREF_CONTROL); 1459 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1460 DREF_SUPERSPREAD_SOURCE_MASK)); 1461 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1462 } 1463 1464 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1465 enum i915_pipe pipe) 1466 { 1467 u32 val; 1468 bool enabled; 1469 1470 val = I915_READ(PCH_TRANSCONF(pipe)); 1471 enabled = !!(val & TRANS_ENABLE); 1472 I915_STATE_WARN(enabled, 1473 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1474 pipe_name(pipe)); 1475 } 1476 1477 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1478 enum i915_pipe pipe, u32 port_sel, u32 val) 1479 { 1480 if ((val & DP_PORT_EN) == 0) 1481 return false; 1482 1483 if (HAS_PCH_CPT(dev_priv->dev)) { 1484 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1485 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1486 return false; 1487 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1488 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1489 return false; 1490 } else { 1491 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1492 return false; 1493 } 1494 return true; 1495 } 1496 1497 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1498 enum i915_pipe pipe, u32 val) 1499 { 1500 if ((val & SDVO_ENABLE) == 0) 1501 return false; 1502 1503 if (HAS_PCH_CPT(dev_priv->dev)) { 1504 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1505 return false; 1506 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1507 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1508 return false; 1509 } else { 1510 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1511 return false; 1512 } 1513 return true; 1514 } 1515 1516 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1517 enum i915_pipe pipe, u32 val) 1518 { 1519 if ((val & LVDS_PORT_EN) == 0) 1520 return false; 1521 1522 if (HAS_PCH_CPT(dev_priv->dev)) { 1523 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1524 return false; 1525 } else { 1526 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1527 return false; 1528 } 1529 return true; 1530 } 1531 1532 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1533 enum i915_pipe pipe, u32 val) 1534 { 1535 if ((val & ADPA_DAC_ENABLE) == 0) 1536 return false; 1537 if (HAS_PCH_CPT(dev_priv->dev)) { 1538 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1539 return false; 1540 } else { 1541 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1542 return false; 1543 } 1544 return true; 1545 } 1546 1547 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1548 enum i915_pipe pipe, i915_reg_t reg, 1549 u32 port_sel) 1550 { 1551 u32 val = I915_READ(reg); 1552 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1553 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1554 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1555 1556 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1557 && (val & DP_PIPEB_SELECT), 1558 "IBX PCH dp port still using transcoder B\n"); 1559 } 1560 1561 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1562 enum i915_pipe pipe, i915_reg_t reg) 1563 { 1564 u32 val = I915_READ(reg); 1565 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1566 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1567 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1568 1569 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1570 && (val & SDVO_PIPE_B_SELECT), 1571 "IBX PCH hdmi port still using transcoder B\n"); 1572 } 1573 1574 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1575 enum i915_pipe pipe) 1576 { 1577 u32 val; 1578 1579 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1580 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1581 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1582 1583 val = I915_READ(PCH_ADPA); 1584 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1585 "PCH VGA enabled on transcoder %c, should be disabled\n", 1586 pipe_name(pipe)); 1587 1588 val = I915_READ(PCH_LVDS); 1589 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1590 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1591 pipe_name(pipe)); 1592 1593 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1594 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1595 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1596 } 1597 1598 static void vlv_enable_pll(struct intel_crtc *crtc, 1599 const struct intel_crtc_state *pipe_config) 1600 { 1601 struct drm_device *dev = crtc->base.dev; 1602 struct drm_i915_private *dev_priv = dev->dev_private; 1603 i915_reg_t reg = DPLL(crtc->pipe); 1604 u32 dpll = pipe_config->dpll_hw_state.dpll; 1605 1606 assert_pipe_disabled(dev_priv, crtc->pipe); 1607 1608 /* PLL is protected by panel, make sure we can write it */ 1609 if (IS_MOBILE(dev_priv->dev)) 1610 assert_panel_unlocked(dev_priv, crtc->pipe); 1611 1612 I915_WRITE(reg, dpll); 1613 POSTING_READ(reg); 1614 udelay(150); 1615 1616 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1617 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1618 1619 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1620 POSTING_READ(DPLL_MD(crtc->pipe)); 1621 1622 /* We do this three times for luck */ 1623 I915_WRITE(reg, dpll); 1624 POSTING_READ(reg); 1625 udelay(150); /* wait for warmup */ 1626 I915_WRITE(reg, dpll); 1627 POSTING_READ(reg); 1628 udelay(150); /* wait for warmup */ 1629 I915_WRITE(reg, dpll); 1630 POSTING_READ(reg); 1631 udelay(150); /* wait for warmup */ 1632 } 1633 1634 static void chv_enable_pll(struct intel_crtc *crtc, 1635 const struct intel_crtc_state *pipe_config) 1636 { 1637 struct drm_device *dev = crtc->base.dev; 1638 struct drm_i915_private *dev_priv = dev->dev_private; 1639 int pipe = crtc->pipe; 1640 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1641 u32 tmp; 1642 1643 assert_pipe_disabled(dev_priv, crtc->pipe); 1644 1645 mutex_lock(&dev_priv->sb_lock); 1646 1647 /* Enable back the 10bit clock to display controller */ 1648 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1649 tmp |= DPIO_DCLKP_EN; 1650 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1651 1652 mutex_unlock(&dev_priv->sb_lock); 1653 1654 /* 1655 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1656 */ 1657 udelay(1); 1658 1659 /* Enable PLL */ 1660 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1661 1662 /* Check PLL is locked */ 1663 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1664 DRM_ERROR("PLL %d failed to lock\n", pipe); 1665 1666 /* not sure when this should be written */ 1667 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1668 POSTING_READ(DPLL_MD(pipe)); 1669 } 1670 1671 static int intel_num_dvo_pipes(struct drm_device *dev) 1672 { 1673 struct intel_crtc *crtc; 1674 int count = 0; 1675 1676 for_each_intel_crtc(dev, crtc) 1677 count += crtc->base.state->active && 1678 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1679 1680 return count; 1681 } 1682 1683 static void i9xx_enable_pll(struct intel_crtc *crtc) 1684 { 1685 struct drm_device *dev = crtc->base.dev; 1686 struct drm_i915_private *dev_priv = dev->dev_private; 1687 i915_reg_t reg = DPLL(crtc->pipe); 1688 u32 dpll = crtc->config->dpll_hw_state.dpll; 1689 1690 assert_pipe_disabled(dev_priv, crtc->pipe); 1691 1692 /* No really, not for ILK+ */ 1693 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1694 1695 /* PLL is protected by panel, make sure we can write it */ 1696 if (IS_MOBILE(dev) && !IS_I830(dev)) 1697 assert_panel_unlocked(dev_priv, crtc->pipe); 1698 1699 /* Enable DVO 2x clock on both PLLs if necessary */ 1700 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1701 /* 1702 * It appears to be important that we don't enable this 1703 * for the current pipe before otherwise configuring the 1704 * PLL. No idea how this should be handled if multiple 1705 * DVO outputs are enabled simultaneosly. 1706 */ 1707 dpll |= DPLL_DVO_2X_MODE; 1708 I915_WRITE(DPLL(!crtc->pipe), 1709 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1710 } 1711 1712 /* 1713 * Apparently we need to have VGA mode enabled prior to changing 1714 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1715 * dividers, even though the register value does change. 1716 */ 1717 I915_WRITE(reg, 0); 1718 1719 I915_WRITE(reg, dpll); 1720 1721 /* Wait for the clocks to stabilize. */ 1722 POSTING_READ(reg); 1723 udelay(150); 1724 1725 if (INTEL_INFO(dev)->gen >= 4) { 1726 I915_WRITE(DPLL_MD(crtc->pipe), 1727 crtc->config->dpll_hw_state.dpll_md); 1728 } else { 1729 /* The pixel multiplier can only be updated once the 1730 * DPLL is enabled and the clocks are stable. 1731 * 1732 * So write it again. 1733 */ 1734 I915_WRITE(reg, dpll); 1735 } 1736 1737 /* We do this three times for luck */ 1738 I915_WRITE(reg, dpll); 1739 POSTING_READ(reg); 1740 udelay(150); /* wait for warmup */ 1741 I915_WRITE(reg, dpll); 1742 POSTING_READ(reg); 1743 udelay(150); /* wait for warmup */ 1744 I915_WRITE(reg, dpll); 1745 POSTING_READ(reg); 1746 udelay(150); /* wait for warmup */ 1747 } 1748 1749 /** 1750 * i9xx_disable_pll - disable a PLL 1751 * @dev_priv: i915 private structure 1752 * @pipe: pipe PLL to disable 1753 * 1754 * Disable the PLL for @pipe, making sure the pipe is off first. 1755 * 1756 * Note! This is for pre-ILK only. 1757 */ 1758 static void i9xx_disable_pll(struct intel_crtc *crtc) 1759 { 1760 struct drm_device *dev = crtc->base.dev; 1761 struct drm_i915_private *dev_priv = dev->dev_private; 1762 enum i915_pipe pipe = crtc->pipe; 1763 1764 /* Disable DVO 2x clock on both PLLs if necessary */ 1765 if (IS_I830(dev) && 1766 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1767 !intel_num_dvo_pipes(dev)) { 1768 I915_WRITE(DPLL(PIPE_B), 1769 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1770 I915_WRITE(DPLL(PIPE_A), 1771 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1772 } 1773 1774 /* Don't disable pipe or pipe PLLs if needed */ 1775 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1776 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1777 return; 1778 1779 /* Make sure the pipe isn't still relying on us */ 1780 assert_pipe_disabled(dev_priv, pipe); 1781 1782 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1783 POSTING_READ(DPLL(pipe)); 1784 } 1785 1786 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1787 { 1788 u32 val; 1789 1790 /* Make sure the pipe isn't still relying on us */ 1791 assert_pipe_disabled(dev_priv, pipe); 1792 1793 /* 1794 * Leave integrated clock source and reference clock enabled for pipe B. 1795 * The latter is needed for VGA hotplug / manual detection. 1796 */ 1797 val = DPLL_VGA_MODE_DIS; 1798 if (pipe == PIPE_B) 1799 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV; 1800 I915_WRITE(DPLL(pipe), val); 1801 POSTING_READ(DPLL(pipe)); 1802 1803 } 1804 1805 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1806 { 1807 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1808 u32 val; 1809 1810 /* Make sure the pipe isn't still relying on us */ 1811 assert_pipe_disabled(dev_priv, pipe); 1812 1813 /* Set PLL en = 0 */ 1814 val = DPLL_SSC_REF_CLK_CHV | 1815 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1816 if (pipe != PIPE_A) 1817 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1818 I915_WRITE(DPLL(pipe), val); 1819 POSTING_READ(DPLL(pipe)); 1820 1821 mutex_lock(&dev_priv->sb_lock); 1822 1823 /* Disable 10bit clock to display controller */ 1824 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1825 val &= ~DPIO_DCLKP_EN; 1826 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1827 1828 mutex_unlock(&dev_priv->sb_lock); 1829 } 1830 1831 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1832 struct intel_digital_port *dport, 1833 unsigned int expected_mask) 1834 { 1835 u32 port_mask; 1836 i915_reg_t dpll_reg; 1837 1838 switch (dport->port) { 1839 case PORT_B: 1840 port_mask = DPLL_PORTB_READY_MASK; 1841 dpll_reg = DPLL(0); 1842 break; 1843 case PORT_C: 1844 port_mask = DPLL_PORTC_READY_MASK; 1845 dpll_reg = DPLL(0); 1846 expected_mask <<= 4; 1847 break; 1848 case PORT_D: 1849 port_mask = DPLL_PORTD_READY_MASK; 1850 dpll_reg = DPIO_PHY_STATUS; 1851 break; 1852 default: 1853 BUG(); 1854 } 1855 1856 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1857 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1858 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1859 } 1860 1861 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1862 { 1863 struct drm_device *dev = crtc->base.dev; 1864 struct drm_i915_private *dev_priv = dev->dev_private; 1865 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1866 1867 if (WARN_ON(pll == NULL)) 1868 return; 1869 1870 WARN_ON(!pll->config.crtc_mask); 1871 if (pll->active == 0) { 1872 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1873 WARN_ON(pll->on); 1874 assert_shared_dpll_disabled(dev_priv, pll); 1875 1876 pll->mode_set(dev_priv, pll); 1877 } 1878 } 1879 1880 /** 1881 * intel_enable_shared_dpll - enable PCH PLL 1882 * @dev_priv: i915 private structure 1883 * @pipe: pipe PLL to enable 1884 * 1885 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1886 * drives the transcoder clock. 1887 */ 1888 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1889 { 1890 struct drm_device *dev = crtc->base.dev; 1891 struct drm_i915_private *dev_priv = dev->dev_private; 1892 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1893 1894 if (WARN_ON(pll == NULL)) 1895 return; 1896 1897 if (WARN_ON(pll->config.crtc_mask == 0)) 1898 return; 1899 1900 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1901 pll->name, pll->active, pll->on, 1902 crtc->base.base.id); 1903 1904 if (pll->active++) { 1905 WARN_ON(!pll->on); 1906 assert_shared_dpll_enabled(dev_priv, pll); 1907 return; 1908 } 1909 WARN_ON(pll->on); 1910 1911 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1912 1913 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1914 pll->enable(dev_priv, pll); 1915 pll->on = true; 1916 } 1917 1918 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1919 { 1920 struct drm_device *dev = crtc->base.dev; 1921 struct drm_i915_private *dev_priv = dev->dev_private; 1922 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1923 1924 /* PCH only available on ILK+ */ 1925 if (INTEL_INFO(dev)->gen < 5) 1926 return; 1927 1928 if (pll == NULL) 1929 return; 1930 1931 if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base))))) 1932 return; 1933 1934 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1935 pll->name, pll->active, pll->on, 1936 crtc->base.base.id); 1937 1938 if (WARN_ON(pll->active == 0)) { 1939 assert_shared_dpll_disabled(dev_priv, pll); 1940 return; 1941 } 1942 1943 assert_shared_dpll_enabled(dev_priv, pll); 1944 WARN_ON(!pll->on); 1945 if (--pll->active) 1946 return; 1947 1948 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1949 pll->disable(dev_priv, pll); 1950 pll->on = false; 1951 1952 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1953 } 1954 1955 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1956 enum i915_pipe pipe) 1957 { 1958 struct drm_device *dev = dev_priv->dev; 1959 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1960 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1961 i915_reg_t reg; 1962 uint32_t val, pipeconf_val; 1963 1964 /* PCH only available on ILK+ */ 1965 BUG_ON(!HAS_PCH_SPLIT(dev)); 1966 1967 /* Make sure PCH DPLL is enabled */ 1968 assert_shared_dpll_enabled(dev_priv, 1969 intel_crtc_to_shared_dpll(intel_crtc)); 1970 1971 /* FDI must be feeding us bits for PCH ports */ 1972 assert_fdi_tx_enabled(dev_priv, pipe); 1973 assert_fdi_rx_enabled(dev_priv, pipe); 1974 1975 if (HAS_PCH_CPT(dev)) { 1976 /* Workaround: Set the timing override bit before enabling the 1977 * pch transcoder. */ 1978 reg = TRANS_CHICKEN2(pipe); 1979 val = I915_READ(reg); 1980 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1981 I915_WRITE(reg, val); 1982 } 1983 1984 reg = PCH_TRANSCONF(pipe); 1985 val = I915_READ(reg); 1986 pipeconf_val = I915_READ(PIPECONF(pipe)); 1987 1988 if (HAS_PCH_IBX(dev_priv->dev)) { 1989 /* 1990 * Make the BPC in transcoder be consistent with 1991 * that in pipeconf reg. For HDMI we must use 8bpc 1992 * here for both 8bpc and 12bpc. 1993 */ 1994 val &= ~PIPECONF_BPC_MASK; 1995 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) 1996 val |= PIPECONF_8BPC; 1997 else 1998 val |= pipeconf_val & PIPECONF_BPC_MASK; 1999 } 2000 2001 val &= ~TRANS_INTERLACE_MASK; 2002 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 2003 if (HAS_PCH_IBX(dev_priv->dev) && 2004 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 2005 val |= TRANS_LEGACY_INTERLACED_ILK; 2006 else 2007 val |= TRANS_INTERLACED; 2008 else 2009 val |= TRANS_PROGRESSIVE; 2010 2011 I915_WRITE(reg, val | TRANS_ENABLE); 2012 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2013 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 2014 } 2015 2016 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 2017 enum transcoder cpu_transcoder) 2018 { 2019 u32 val, pipeconf_val; 2020 2021 /* PCH only available on ILK+ */ 2022 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 2023 2024 /* FDI must be feeding us bits for PCH ports */ 2025 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 2026 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2027 2028 /* Workaround: set timing override bit. */ 2029 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2030 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2031 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2032 2033 val = TRANS_ENABLE; 2034 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2035 2036 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 2037 PIPECONF_INTERLACED_ILK) 2038 val |= TRANS_INTERLACED; 2039 else 2040 val |= TRANS_PROGRESSIVE; 2041 2042 I915_WRITE(LPT_TRANSCONF, val); 2043 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 2044 DRM_ERROR("Failed to enable PCH transcoder\n"); 2045 } 2046 2047 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 2048 enum i915_pipe pipe) 2049 { 2050 struct drm_device *dev = dev_priv->dev; 2051 i915_reg_t reg; 2052 uint32_t val; 2053 2054 /* FDI relies on the transcoder */ 2055 assert_fdi_tx_disabled(dev_priv, pipe); 2056 assert_fdi_rx_disabled(dev_priv, pipe); 2057 2058 /* Ports must be off as well */ 2059 assert_pch_ports_disabled(dev_priv, pipe); 2060 2061 reg = PCH_TRANSCONF(pipe); 2062 val = I915_READ(reg); 2063 val &= ~TRANS_ENABLE; 2064 I915_WRITE(reg, val); 2065 /* wait for PCH transcoder off, transcoder state */ 2066 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2067 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2068 2069 if (HAS_PCH_CPT(dev)) { 2070 /* Workaround: Clear the timing override chicken bit again. */ 2071 reg = TRANS_CHICKEN2(pipe); 2072 val = I915_READ(reg); 2073 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2074 I915_WRITE(reg, val); 2075 } 2076 } 2077 2078 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 2079 { 2080 u32 val; 2081 2082 val = I915_READ(LPT_TRANSCONF); 2083 val &= ~TRANS_ENABLE; 2084 I915_WRITE(LPT_TRANSCONF, val); 2085 /* wait for PCH transcoder off, transcoder state */ 2086 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2087 DRM_ERROR("Failed to disable PCH transcoder\n"); 2088 2089 /* Workaround: clear timing override bit. */ 2090 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2091 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2092 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2093 } 2094 2095 /** 2096 * intel_enable_pipe - enable a pipe, asserting requirements 2097 * @crtc: crtc responsible for the pipe 2098 * 2099 * Enable @crtc's pipe, making sure that various hardware specific requirements 2100 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2101 */ 2102 static void intel_enable_pipe(struct intel_crtc *crtc) 2103 { 2104 struct drm_device *dev = crtc->base.dev; 2105 struct drm_i915_private *dev_priv = dev->dev_private; 2106 enum i915_pipe pipe = crtc->pipe; 2107 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2108 enum i915_pipe pch_transcoder; 2109 i915_reg_t reg; 2110 u32 val; 2111 2112 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 2113 2114 assert_planes_disabled(dev_priv, pipe); 2115 assert_cursor_disabled(dev_priv, pipe); 2116 assert_sprites_disabled(dev_priv, pipe); 2117 2118 if (HAS_PCH_LPT(dev_priv->dev)) 2119 pch_transcoder = TRANSCODER_A; 2120 else 2121 pch_transcoder = pipe; 2122 2123 /* 2124 * A pipe without a PLL won't actually be able to drive bits from 2125 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2126 * need the check. 2127 */ 2128 if (HAS_GMCH_DISPLAY(dev_priv->dev)) 2129 if (crtc->config->has_dsi_encoder) 2130 assert_dsi_pll_enabled(dev_priv); 2131 else 2132 assert_pll_enabled(dev_priv, pipe); 2133 else { 2134 if (crtc->config->has_pch_encoder) { 2135 /* if driving the PCH, we need FDI enabled */ 2136 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2137 assert_fdi_tx_pll_enabled(dev_priv, 2138 (enum i915_pipe) cpu_transcoder); 2139 } 2140 /* FIXME: assert CPU port conditions for SNB+ */ 2141 } 2142 2143 reg = PIPECONF(cpu_transcoder); 2144 val = I915_READ(reg); 2145 if (val & PIPECONF_ENABLE) { 2146 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2147 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2148 return; 2149 } 2150 2151 I915_WRITE(reg, val | PIPECONF_ENABLE); 2152 POSTING_READ(reg); 2153 } 2154 2155 /** 2156 * intel_disable_pipe - disable a pipe, asserting requirements 2157 * @crtc: crtc whose pipes is to be disabled 2158 * 2159 * Disable the pipe of @crtc, making sure that various hardware 2160 * specific requirements are met, if applicable, e.g. plane 2161 * disabled, panel fitter off, etc. 2162 * 2163 * Will wait until the pipe has shut down before returning. 2164 */ 2165 static void intel_disable_pipe(struct intel_crtc *crtc) 2166 { 2167 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2168 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2169 enum i915_pipe pipe = crtc->pipe; 2170 i915_reg_t reg; 2171 u32 val; 2172 2173 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2174 2175 /* 2176 * Make sure planes won't keep trying to pump pixels to us, 2177 * or we might hang the display. 2178 */ 2179 assert_planes_disabled(dev_priv, pipe); 2180 assert_cursor_disabled(dev_priv, pipe); 2181 assert_sprites_disabled(dev_priv, pipe); 2182 2183 reg = PIPECONF(cpu_transcoder); 2184 val = I915_READ(reg); 2185 if ((val & PIPECONF_ENABLE) == 0) 2186 return; 2187 2188 /* 2189 * Double wide has implications for planes 2190 * so best keep it disabled when not needed. 2191 */ 2192 if (crtc->config->double_wide) 2193 val &= ~PIPECONF_DOUBLE_WIDE; 2194 2195 /* Don't disable pipe or pipe PLLs if needed */ 2196 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2197 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2198 val &= ~PIPECONF_ENABLE; 2199 2200 I915_WRITE(reg, val); 2201 if ((val & PIPECONF_ENABLE) == 0) 2202 intel_wait_for_pipe_off(crtc); 2203 } 2204 2205 static bool need_vtd_wa(struct drm_device *dev) 2206 { 2207 #ifdef CONFIG_INTEL_IOMMU 2208 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2209 return true; 2210 #endif 2211 return false; 2212 } 2213 2214 unsigned int 2215 intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 2216 uint64_t fb_format_modifier, unsigned int plane) 2217 { 2218 unsigned int tile_height; 2219 uint32_t pixel_bytes; 2220 2221 switch (fb_format_modifier) { 2222 case DRM_FORMAT_MOD_NONE: 2223 tile_height = 1; 2224 break; 2225 case I915_FORMAT_MOD_X_TILED: 2226 tile_height = IS_GEN2(dev) ? 16 : 8; 2227 break; 2228 case I915_FORMAT_MOD_Y_TILED: 2229 tile_height = 32; 2230 break; 2231 case I915_FORMAT_MOD_Yf_TILED: 2232 pixel_bytes = drm_format_plane_cpp(pixel_format, plane); 2233 switch (pixel_bytes) { 2234 default: 2235 case 1: 2236 tile_height = 64; 2237 break; 2238 case 2: 2239 case 4: 2240 tile_height = 32; 2241 break; 2242 case 8: 2243 tile_height = 16; 2244 break; 2245 case 16: 2246 WARN_ONCE(1, 2247 "128-bit pixels are not supported for display!"); 2248 tile_height = 16; 2249 break; 2250 } 2251 break; 2252 default: 2253 MISSING_CASE(fb_format_modifier); 2254 tile_height = 1; 2255 break; 2256 } 2257 2258 return tile_height; 2259 } 2260 2261 unsigned int 2262 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2263 uint32_t pixel_format, uint64_t fb_format_modifier) 2264 { 2265 return ALIGN(height, intel_tile_height(dev, pixel_format, 2266 fb_format_modifier, 0)); 2267 } 2268 2269 static void 2270 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2271 const struct drm_plane_state *plane_state) 2272 { 2273 struct intel_rotation_info *info = &view->params.rotation_info; 2274 unsigned int tile_height, tile_pitch; 2275 2276 *view = i915_ggtt_view_normal; 2277 2278 if (!plane_state) 2279 return; 2280 2281 if (!intel_rotation_90_or_270(plane_state->rotation)) 2282 return; 2283 2284 *view = i915_ggtt_view_rotated; 2285 2286 info->height = fb->height; 2287 info->pixel_format = fb->pixel_format; 2288 info->pitch = fb->pitches[0]; 2289 info->uv_offset = fb->offsets[1]; 2290 info->fb_modifier = fb->modifier[0]; 2291 2292 tile_height = intel_tile_height(fb->dev, fb->pixel_format, 2293 fb->modifier[0], 0); 2294 tile_pitch = PAGE_SIZE / tile_height; 2295 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_pitch); 2296 info->height_pages = DIV_ROUND_UP(fb->height, tile_height); 2297 info->size = info->width_pages * info->height_pages * PAGE_SIZE; 2298 2299 if (info->pixel_format == DRM_FORMAT_NV12) { 2300 tile_height = intel_tile_height(fb->dev, fb->pixel_format, 2301 fb->modifier[0], 1); 2302 tile_pitch = PAGE_SIZE / tile_height; 2303 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[0], tile_pitch); 2304 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, 2305 tile_height); 2306 info->size_uv = info->width_pages_uv * info->height_pages_uv * 2307 PAGE_SIZE; 2308 } 2309 } 2310 2311 static unsigned int intel_linear_alignment(struct drm_i915_private *dev_priv) 2312 { 2313 if (INTEL_INFO(dev_priv)->gen >= 9) 2314 return 256 * 1024; 2315 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || 2316 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2317 return 128 * 1024; 2318 else if (INTEL_INFO(dev_priv)->gen >= 4) 2319 return 4 * 1024; 2320 else 2321 return 0; 2322 } 2323 2324 int 2325 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2326 struct drm_framebuffer *fb, 2327 const struct drm_plane_state *plane_state) 2328 { 2329 struct drm_device *dev = fb->dev; 2330 struct drm_i915_private *dev_priv = dev->dev_private; 2331 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2332 struct i915_ggtt_view view; 2333 u32 alignment; 2334 int ret; 2335 2336 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2337 2338 switch (fb->modifier[0]) { 2339 case DRM_FORMAT_MOD_NONE: 2340 alignment = intel_linear_alignment(dev_priv); 2341 break; 2342 case I915_FORMAT_MOD_X_TILED: 2343 if (INTEL_INFO(dev)->gen >= 9) 2344 alignment = 256 * 1024; 2345 else { 2346 /* pin() will align the object as required by fence */ 2347 alignment = 0; 2348 } 2349 break; 2350 case I915_FORMAT_MOD_Y_TILED: 2351 case I915_FORMAT_MOD_Yf_TILED: 2352 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, 2353 "Y tiling bo slipped through, driver bug!\n")) 2354 return -EINVAL; 2355 alignment = 1 * 1024 * 1024; 2356 break; 2357 default: 2358 MISSING_CASE(fb->modifier[0]); 2359 return -EINVAL; 2360 } 2361 2362 intel_fill_fb_ggtt_view(&view, fb, plane_state); 2363 2364 /* Note that the w/a also requires 64 PTE of padding following the 2365 * bo. We currently fill all unused PTE with the shadow page and so 2366 * we should always have valid PTE following the scanout preventing 2367 * the VT-d warning. 2368 */ 2369 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2370 alignment = 256 * 1024; 2371 2372 /* 2373 * Global gtt pte registers are special registers which actually forward 2374 * writes to a chunk of system memory. Which means that there is no risk 2375 * that the register values disappear as soon as we call 2376 * intel_runtime_pm_put(), so it is correct to wrap only the 2377 * pin/unpin/fence and not more. 2378 */ 2379 intel_runtime_pm_get(dev_priv); 2380 2381 ret = i915_gem_object_pin_to_display_plane(obj, alignment, 2382 &view); 2383 if (ret) 2384 goto err_pm; 2385 2386 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2387 * fence, whereas 965+ only requires a fence if using 2388 * framebuffer compression. For simplicity, we always install 2389 * a fence as the cost is not that onerous. 2390 */ 2391 if (view.type == I915_GGTT_VIEW_NORMAL) { 2392 ret = i915_gem_object_get_fence(obj); 2393 if (ret == -EDEADLK) { 2394 /* 2395 * -EDEADLK means there are no free fences 2396 * no pending flips. 2397 * 2398 * This is propagated to atomic, but it uses 2399 * -EDEADLK to force a locking recovery, so 2400 * change the returned error to -EBUSY. 2401 */ 2402 ret = -EBUSY; 2403 goto err_unpin; 2404 } else if (ret) 2405 goto err_unpin; 2406 2407 i915_gem_object_pin_fence(obj); 2408 } 2409 2410 intel_runtime_pm_put(dev_priv); 2411 return 0; 2412 2413 err_unpin: 2414 i915_gem_object_unpin_from_display_plane(obj, &view); 2415 err_pm: 2416 intel_runtime_pm_put(dev_priv); 2417 return ret; 2418 } 2419 2420 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, 2421 const struct drm_plane_state *plane_state) 2422 { 2423 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2424 struct i915_ggtt_view view; 2425 2426 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2427 2428 intel_fill_fb_ggtt_view(&view, fb, plane_state); 2429 2430 if (view.type == I915_GGTT_VIEW_NORMAL) 2431 i915_gem_object_unpin_fence(obj); 2432 2433 i915_gem_object_unpin_from_display_plane(obj, &view); 2434 } 2435 2436 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2437 * is assumed to be a power-of-two. */ 2438 unsigned long intel_gen4_compute_page_offset(struct drm_i915_private *dev_priv, 2439 int *x, int *y, 2440 unsigned int tiling_mode, 2441 unsigned int cpp, 2442 unsigned int pitch) 2443 { 2444 if (tiling_mode != I915_TILING_NONE) { 2445 unsigned int tile_rows, tiles; 2446 2447 tile_rows = *y / 8; 2448 *y %= 8; 2449 2450 tiles = *x / (512/cpp); 2451 *x %= 512/cpp; 2452 2453 return tile_rows * pitch * 8 + tiles * 4096; 2454 } else { 2455 unsigned int alignment = intel_linear_alignment(dev_priv) - 1; 2456 unsigned int offset; 2457 2458 offset = *y * pitch + *x * cpp; 2459 *y = (offset & alignment) / pitch; 2460 *x = ((offset & alignment) - *y * pitch) / cpp; 2461 return offset & ~alignment; 2462 } 2463 } 2464 2465 static int i9xx_format_to_fourcc(int format) 2466 { 2467 switch (format) { 2468 case DISPPLANE_8BPP: 2469 return DRM_FORMAT_C8; 2470 case DISPPLANE_BGRX555: 2471 return DRM_FORMAT_XRGB1555; 2472 case DISPPLANE_BGRX565: 2473 return DRM_FORMAT_RGB565; 2474 default: 2475 case DISPPLANE_BGRX888: 2476 return DRM_FORMAT_XRGB8888; 2477 case DISPPLANE_RGBX888: 2478 return DRM_FORMAT_XBGR8888; 2479 case DISPPLANE_BGRX101010: 2480 return DRM_FORMAT_XRGB2101010; 2481 case DISPPLANE_RGBX101010: 2482 return DRM_FORMAT_XBGR2101010; 2483 } 2484 } 2485 2486 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2487 { 2488 switch (format) { 2489 case PLANE_CTL_FORMAT_RGB_565: 2490 return DRM_FORMAT_RGB565; 2491 default: 2492 case PLANE_CTL_FORMAT_XRGB_8888: 2493 if (rgb_order) { 2494 if (alpha) 2495 return DRM_FORMAT_ABGR8888; 2496 else 2497 return DRM_FORMAT_XBGR8888; 2498 } else { 2499 if (alpha) 2500 return DRM_FORMAT_ARGB8888; 2501 else 2502 return DRM_FORMAT_XRGB8888; 2503 } 2504 case PLANE_CTL_FORMAT_XRGB_2101010: 2505 if (rgb_order) 2506 return DRM_FORMAT_XBGR2101010; 2507 else 2508 return DRM_FORMAT_XRGB2101010; 2509 } 2510 } 2511 2512 static bool 2513 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2514 struct intel_initial_plane_config *plane_config) 2515 { 2516 struct drm_device *dev = crtc->base.dev; 2517 struct drm_i915_private *dev_priv = to_i915(dev); 2518 struct drm_i915_gem_object *obj = NULL; 2519 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2520 struct drm_framebuffer *fb = &plane_config->fb->base; 2521 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2522 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2523 PAGE_SIZE); 2524 2525 size_aligned -= base_aligned; 2526 2527 if (plane_config->size == 0) 2528 return false; 2529 2530 /* If the FB is too big, just don't use it since fbdev is not very 2531 * important and we should probably use that space with FBC or other 2532 * features. */ 2533 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) 2534 return false; 2535 2536 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2537 base_aligned, 2538 base_aligned, 2539 size_aligned); 2540 if (!obj) 2541 return false; 2542 2543 obj->tiling_mode = plane_config->tiling; 2544 if (obj->tiling_mode == I915_TILING_X) 2545 obj->stride = fb->pitches[0]; 2546 2547 mode_cmd.pixel_format = fb->pixel_format; 2548 mode_cmd.width = fb->width; 2549 mode_cmd.height = fb->height; 2550 mode_cmd.pitches[0] = fb->pitches[0]; 2551 mode_cmd.modifier[0] = fb->modifier[0]; 2552 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2553 2554 mutex_lock(&dev->struct_mutex); 2555 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2556 &mode_cmd, obj)) { 2557 DRM_DEBUG_KMS("intel fb init failed\n"); 2558 goto out_unref_obj; 2559 } 2560 mutex_unlock(&dev->struct_mutex); 2561 2562 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2563 return true; 2564 2565 out_unref_obj: 2566 drm_gem_object_unreference(&obj->base); 2567 mutex_unlock(&dev->struct_mutex); 2568 return false; 2569 } 2570 2571 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2572 static void 2573 update_state_fb(struct drm_plane *plane) 2574 { 2575 if (plane->fb == plane->state->fb) 2576 return; 2577 2578 if (plane->state->fb) 2579 drm_framebuffer_unreference(plane->state->fb); 2580 plane->state->fb = plane->fb; 2581 if (plane->state->fb) 2582 drm_framebuffer_reference(plane->state->fb); 2583 } 2584 2585 static void 2586 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2587 struct intel_initial_plane_config *plane_config) 2588 { 2589 struct drm_device *dev = intel_crtc->base.dev; 2590 struct drm_i915_private *dev_priv = dev->dev_private; 2591 struct drm_crtc *c; 2592 struct intel_crtc *i; 2593 struct drm_i915_gem_object *obj; 2594 struct drm_plane *primary = intel_crtc->base.primary; 2595 struct drm_plane_state *plane_state = primary->state; 2596 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2597 struct intel_plane *intel_plane = to_intel_plane(primary); 2598 struct drm_framebuffer *fb; 2599 2600 if (!plane_config->fb) 2601 return; 2602 2603 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2604 fb = &plane_config->fb->base; 2605 goto valid_fb; 2606 } 2607 2608 kfree(plane_config->fb); 2609 2610 /* 2611 * Failed to alloc the obj, check to see if we should share 2612 * an fb with another CRTC instead 2613 */ 2614 for_each_crtc(dev, c) { 2615 i = to_intel_crtc(c); 2616 2617 if (c == &intel_crtc->base) 2618 continue; 2619 2620 if (!i->active) 2621 continue; 2622 2623 fb = c->primary->fb; 2624 if (!fb) 2625 continue; 2626 2627 obj = intel_fb_obj(fb); 2628 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2629 drm_framebuffer_reference(fb); 2630 goto valid_fb; 2631 } 2632 } 2633 2634 /* 2635 * We've failed to reconstruct the BIOS FB. Current display state 2636 * indicates that the primary plane is visible, but has a NULL FB, 2637 * which will lead to problems later if we don't fix it up. The 2638 * simplest solution is to just disable the primary plane now and 2639 * pretend the BIOS never had it enabled. 2640 */ 2641 to_intel_plane_state(plane_state)->visible = false; 2642 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); 2643 intel_pre_disable_primary(&intel_crtc->base); 2644 intel_plane->disable_plane(primary, &intel_crtc->base); 2645 2646 return; 2647 2648 valid_fb: 2649 plane_state->src_x = 0; 2650 plane_state->src_y = 0; 2651 plane_state->src_w = fb->width << 16; 2652 plane_state->src_h = fb->height << 16; 2653 2654 plane_state->crtc_x = 0; 2655 plane_state->crtc_y = 0; 2656 plane_state->crtc_w = fb->width; 2657 plane_state->crtc_h = fb->height; 2658 2659 obj = intel_fb_obj(fb); 2660 if (obj->tiling_mode != I915_TILING_NONE) 2661 dev_priv->preserve_bios_swizzle = true; 2662 2663 drm_framebuffer_reference(fb); 2664 primary->fb = primary->state->fb = fb; 2665 primary->crtc = primary->state->crtc = &intel_crtc->base; 2666 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); 2667 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; 2668 } 2669 2670 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2671 struct drm_framebuffer *fb, 2672 int x, int y) 2673 { 2674 struct drm_device *dev = crtc->dev; 2675 struct drm_i915_private *dev_priv = dev->dev_private; 2676 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2677 struct drm_plane *primary = crtc->primary; 2678 bool visible = to_intel_plane_state(primary->state)->visible; 2679 struct drm_i915_gem_object *obj; 2680 int plane = intel_crtc->plane; 2681 unsigned long linear_offset; 2682 u32 dspcntr; 2683 i915_reg_t reg = DSPCNTR(plane); 2684 int pixel_size; 2685 2686 if (!visible || !fb) { 2687 I915_WRITE(reg, 0); 2688 if (INTEL_INFO(dev)->gen >= 4) 2689 I915_WRITE(DSPSURF(plane), 0); 2690 else 2691 I915_WRITE(DSPADDR(plane), 0); 2692 POSTING_READ(reg); 2693 return; 2694 } 2695 2696 obj = intel_fb_obj(fb); 2697 if (WARN_ON(obj == NULL)) 2698 return; 2699 2700 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2701 2702 dspcntr = DISPPLANE_GAMMA_ENABLE; 2703 2704 dspcntr |= DISPLAY_PLANE_ENABLE; 2705 2706 if (INTEL_INFO(dev)->gen < 4) { 2707 if (intel_crtc->pipe == PIPE_B) 2708 dspcntr |= DISPPLANE_SEL_PIPE_B; 2709 2710 /* pipesrc and dspsize control the size that is scaled from, 2711 * which should always be the user's requested size. 2712 */ 2713 I915_WRITE(DSPSIZE(plane), 2714 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2715 (intel_crtc->config->pipe_src_w - 1)); 2716 I915_WRITE(DSPPOS(plane), 0); 2717 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2718 I915_WRITE(PRIMSIZE(plane), 2719 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2720 (intel_crtc->config->pipe_src_w - 1)); 2721 I915_WRITE(PRIMPOS(plane), 0); 2722 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2723 } 2724 2725 switch (fb->pixel_format) { 2726 case DRM_FORMAT_C8: 2727 dspcntr |= DISPPLANE_8BPP; 2728 break; 2729 case DRM_FORMAT_XRGB1555: 2730 dspcntr |= DISPPLANE_BGRX555; 2731 break; 2732 case DRM_FORMAT_RGB565: 2733 dspcntr |= DISPPLANE_BGRX565; 2734 break; 2735 case DRM_FORMAT_XRGB8888: 2736 dspcntr |= DISPPLANE_BGRX888; 2737 break; 2738 case DRM_FORMAT_XBGR8888: 2739 dspcntr |= DISPPLANE_RGBX888; 2740 break; 2741 case DRM_FORMAT_XRGB2101010: 2742 dspcntr |= DISPPLANE_BGRX101010; 2743 break; 2744 case DRM_FORMAT_XBGR2101010: 2745 dspcntr |= DISPPLANE_RGBX101010; 2746 break; 2747 default: 2748 BUG(); 2749 } 2750 2751 if (INTEL_INFO(dev)->gen >= 4 && 2752 obj->tiling_mode != I915_TILING_NONE) 2753 dspcntr |= DISPPLANE_TILED; 2754 2755 if (IS_G4X(dev)) 2756 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2757 2758 linear_offset = y * fb->pitches[0] + x * pixel_size; 2759 2760 if (INTEL_INFO(dev)->gen >= 4) { 2761 intel_crtc->dspaddr_offset = 2762 intel_gen4_compute_page_offset(dev_priv, 2763 &x, &y, obj->tiling_mode, 2764 pixel_size, 2765 fb->pitches[0]); 2766 linear_offset -= intel_crtc->dspaddr_offset; 2767 } else { 2768 intel_crtc->dspaddr_offset = linear_offset; 2769 } 2770 2771 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2772 dspcntr |= DISPPLANE_ROTATE_180; 2773 2774 x += (intel_crtc->config->pipe_src_w - 1); 2775 y += (intel_crtc->config->pipe_src_h - 1); 2776 2777 /* Finding the last pixel of the last line of the display 2778 data and adding to linear_offset*/ 2779 linear_offset += 2780 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2781 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2782 } 2783 2784 intel_crtc->adjusted_x = x; 2785 intel_crtc->adjusted_y = y; 2786 2787 I915_WRITE(reg, dspcntr); 2788 2789 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2790 if (INTEL_INFO(dev)->gen >= 4) { 2791 I915_WRITE(DSPSURF(plane), 2792 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2793 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2794 I915_WRITE(DSPLINOFF(plane), linear_offset); 2795 } else 2796 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2797 POSTING_READ(reg); 2798 } 2799 2800 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2801 struct drm_framebuffer *fb, 2802 int x, int y) 2803 { 2804 struct drm_device *dev = crtc->dev; 2805 struct drm_i915_private *dev_priv = dev->dev_private; 2806 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2807 struct drm_plane *primary = crtc->primary; 2808 bool visible = to_intel_plane_state(primary->state)->visible; 2809 struct drm_i915_gem_object *obj; 2810 int plane = intel_crtc->plane; 2811 unsigned long linear_offset; 2812 u32 dspcntr; 2813 i915_reg_t reg = DSPCNTR(plane); 2814 int pixel_size; 2815 2816 if (!visible || !fb) { 2817 I915_WRITE(reg, 0); 2818 I915_WRITE(DSPSURF(plane), 0); 2819 POSTING_READ(reg); 2820 return; 2821 } 2822 2823 obj = intel_fb_obj(fb); 2824 if (WARN_ON(obj == NULL)) 2825 return; 2826 2827 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2828 2829 dspcntr = DISPPLANE_GAMMA_ENABLE; 2830 2831 dspcntr |= DISPLAY_PLANE_ENABLE; 2832 2833 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2834 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2835 2836 switch (fb->pixel_format) { 2837 case DRM_FORMAT_C8: 2838 dspcntr |= DISPPLANE_8BPP; 2839 break; 2840 case DRM_FORMAT_RGB565: 2841 dspcntr |= DISPPLANE_BGRX565; 2842 break; 2843 case DRM_FORMAT_XRGB8888: 2844 dspcntr |= DISPPLANE_BGRX888; 2845 break; 2846 case DRM_FORMAT_XBGR8888: 2847 dspcntr |= DISPPLANE_RGBX888; 2848 break; 2849 case DRM_FORMAT_XRGB2101010: 2850 dspcntr |= DISPPLANE_BGRX101010; 2851 break; 2852 case DRM_FORMAT_XBGR2101010: 2853 dspcntr |= DISPPLANE_RGBX101010; 2854 break; 2855 default: 2856 BUG(); 2857 } 2858 2859 if (obj->tiling_mode != I915_TILING_NONE) 2860 dspcntr |= DISPPLANE_TILED; 2861 2862 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2863 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2864 2865 linear_offset = y * fb->pitches[0] + x * pixel_size; 2866 intel_crtc->dspaddr_offset = 2867 intel_gen4_compute_page_offset(dev_priv, 2868 &x, &y, obj->tiling_mode, 2869 pixel_size, 2870 fb->pitches[0]); 2871 linear_offset -= intel_crtc->dspaddr_offset; 2872 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2873 dspcntr |= DISPPLANE_ROTATE_180; 2874 2875 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2876 x += (intel_crtc->config->pipe_src_w - 1); 2877 y += (intel_crtc->config->pipe_src_h - 1); 2878 2879 /* Finding the last pixel of the last line of the display 2880 data and adding to linear_offset*/ 2881 linear_offset += 2882 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2883 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2884 } 2885 } 2886 2887 intel_crtc->adjusted_x = x; 2888 intel_crtc->adjusted_y = y; 2889 2890 I915_WRITE(reg, dspcntr); 2891 2892 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2893 I915_WRITE(DSPSURF(plane), 2894 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2895 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2896 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2897 } else { 2898 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2899 I915_WRITE(DSPLINOFF(plane), linear_offset); 2900 } 2901 POSTING_READ(reg); 2902 } 2903 2904 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, 2905 uint32_t pixel_format) 2906 { 2907 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; 2908 2909 /* 2910 * The stride is either expressed as a multiple of 64 bytes 2911 * chunks for linear buffers or in number of tiles for tiled 2912 * buffers. 2913 */ 2914 switch (fb_modifier) { 2915 case DRM_FORMAT_MOD_NONE: 2916 return 64; 2917 case I915_FORMAT_MOD_X_TILED: 2918 if (INTEL_INFO(dev)->gen == 2) 2919 return 128; 2920 return 512; 2921 case I915_FORMAT_MOD_Y_TILED: 2922 /* No need to check for old gens and Y tiling since this is 2923 * about the display engine and those will be blocked before 2924 * we get here. 2925 */ 2926 return 128; 2927 case I915_FORMAT_MOD_Yf_TILED: 2928 if (bits_per_pixel == 8) 2929 return 64; 2930 else 2931 return 128; 2932 default: 2933 MISSING_CASE(fb_modifier); 2934 return 64; 2935 } 2936 } 2937 2938 u32 intel_plane_obj_offset(struct intel_plane *intel_plane, 2939 struct drm_i915_gem_object *obj, 2940 unsigned int plane) 2941 { 2942 struct i915_ggtt_view view; 2943 struct i915_vma *vma; 2944 u64 offset; 2945 2946 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, 2947 intel_plane->base.state); 2948 2949 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2950 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2951 view.type)) 2952 return -1; 2953 2954 offset = vma->node.start; 2955 2956 if (plane == 1) { 2957 offset += vma->ggtt_view.params.rotation_info.uv_start_page * 2958 PAGE_SIZE; 2959 } 2960 2961 WARN_ON(upper_32_bits(offset)); 2962 2963 return lower_32_bits(offset); 2964 } 2965 2966 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2967 { 2968 struct drm_device *dev = intel_crtc->base.dev; 2969 struct drm_i915_private *dev_priv = dev->dev_private; 2970 2971 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2972 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2973 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2974 } 2975 2976 /* 2977 * This function detaches (aka. unbinds) unused scalers in hardware 2978 */ 2979 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 2980 { 2981 struct intel_crtc_scaler_state *scaler_state; 2982 int i; 2983 2984 scaler_state = &intel_crtc->config->scaler_state; 2985 2986 /* loop through and disable scalers that aren't in use */ 2987 for (i = 0; i < intel_crtc->num_scalers; i++) { 2988 if (!scaler_state->scalers[i].in_use) 2989 skl_detach_scaler(intel_crtc, i); 2990 } 2991 } 2992 2993 u32 skl_plane_ctl_format(uint32_t pixel_format) 2994 { 2995 switch (pixel_format) { 2996 case DRM_FORMAT_C8: 2997 return PLANE_CTL_FORMAT_INDEXED; 2998 case DRM_FORMAT_RGB565: 2999 return PLANE_CTL_FORMAT_RGB_565; 3000 case DRM_FORMAT_XBGR8888: 3001 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3002 case DRM_FORMAT_XRGB8888: 3003 return PLANE_CTL_FORMAT_XRGB_8888; 3004 /* 3005 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3006 * to be already pre-multiplied. We need to add a knob (or a different 3007 * DRM_FORMAT) for user-space to configure that. 3008 */ 3009 case DRM_FORMAT_ABGR8888: 3010 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3011 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3012 case DRM_FORMAT_ARGB8888: 3013 return PLANE_CTL_FORMAT_XRGB_8888 | 3014 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3015 case DRM_FORMAT_XRGB2101010: 3016 return PLANE_CTL_FORMAT_XRGB_2101010; 3017 case DRM_FORMAT_XBGR2101010: 3018 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3019 case DRM_FORMAT_YUYV: 3020 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3021 case DRM_FORMAT_YVYU: 3022 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3023 case DRM_FORMAT_UYVY: 3024 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3025 case DRM_FORMAT_VYUY: 3026 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3027 default: 3028 MISSING_CASE(pixel_format); 3029 } 3030 3031 return 0; 3032 } 3033 3034 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3035 { 3036 switch (fb_modifier) { 3037 case DRM_FORMAT_MOD_NONE: 3038 break; 3039 case I915_FORMAT_MOD_X_TILED: 3040 return PLANE_CTL_TILED_X; 3041 case I915_FORMAT_MOD_Y_TILED: 3042 return PLANE_CTL_TILED_Y; 3043 case I915_FORMAT_MOD_Yf_TILED: 3044 return PLANE_CTL_TILED_YF; 3045 default: 3046 MISSING_CASE(fb_modifier); 3047 } 3048 3049 return 0; 3050 } 3051 3052 u32 skl_plane_ctl_rotation(unsigned int rotation) 3053 { 3054 switch (rotation) { 3055 case BIT(DRM_ROTATE_0): 3056 break; 3057 /* 3058 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 3059 * while i915 HW rotation is clockwise, thats why this swapping. 3060 */ 3061 case BIT(DRM_ROTATE_90): 3062 return PLANE_CTL_ROTATE_270; 3063 case BIT(DRM_ROTATE_180): 3064 return PLANE_CTL_ROTATE_180; 3065 case BIT(DRM_ROTATE_270): 3066 return PLANE_CTL_ROTATE_90; 3067 default: 3068 MISSING_CASE(rotation); 3069 } 3070 3071 return 0; 3072 } 3073 3074 static void skylake_update_primary_plane(struct drm_crtc *crtc, 3075 struct drm_framebuffer *fb, 3076 int x, int y) 3077 { 3078 struct drm_device *dev = crtc->dev; 3079 struct drm_i915_private *dev_priv = dev->dev_private; 3080 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3081 struct drm_plane *plane = crtc->primary; 3082 bool visible = to_intel_plane_state(plane->state)->visible; 3083 struct drm_i915_gem_object *obj; 3084 int pipe = intel_crtc->pipe; 3085 u32 plane_ctl, stride_div, stride; 3086 u32 tile_height, plane_offset, plane_size; 3087 unsigned int rotation; 3088 int x_offset, y_offset; 3089 u32 surf_addr; 3090 struct intel_crtc_state *crtc_state = intel_crtc->config; 3091 struct intel_plane_state *plane_state; 3092 int src_x = 0, src_y = 0, src_w = 0, src_h = 0; 3093 int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0; 3094 int scaler_id = -1; 3095 3096 plane_state = to_intel_plane_state(plane->state); 3097 3098 if (!visible || !fb) { 3099 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3100 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3101 POSTING_READ(PLANE_CTL(pipe, 0)); 3102 return; 3103 } 3104 3105 plane_ctl = PLANE_CTL_ENABLE | 3106 PLANE_CTL_PIPE_GAMMA_ENABLE | 3107 PLANE_CTL_PIPE_CSC_ENABLE; 3108 3109 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3110 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3111 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3112 3113 rotation = plane->state->rotation; 3114 plane_ctl |= skl_plane_ctl_rotation(rotation); 3115 3116 obj = intel_fb_obj(fb); 3117 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3118 fb->pixel_format); 3119 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); 3120 3121 WARN_ON(drm_rect_width(&plane_state->src) == 0); 3122 3123 scaler_id = plane_state->scaler_id; 3124 src_x = plane_state->src.x1 >> 16; 3125 src_y = plane_state->src.y1 >> 16; 3126 src_w = drm_rect_width(&plane_state->src) >> 16; 3127 src_h = drm_rect_height(&plane_state->src) >> 16; 3128 dst_x = plane_state->dst.x1; 3129 dst_y = plane_state->dst.y1; 3130 dst_w = drm_rect_width(&plane_state->dst); 3131 dst_h = drm_rect_height(&plane_state->dst); 3132 3133 WARN_ON(x != src_x || y != src_y); 3134 3135 if (intel_rotation_90_or_270(rotation)) { 3136 /* stride = Surface height in tiles */ 3137 tile_height = intel_tile_height(dev, fb->pixel_format, 3138 fb->modifier[0], 0); 3139 stride = DIV_ROUND_UP(fb->height, tile_height); 3140 x_offset = stride * tile_height - y - src_h; 3141 y_offset = x; 3142 plane_size = (src_w - 1) << 16 | (src_h - 1); 3143 } else { 3144 stride = fb->pitches[0] / stride_div; 3145 x_offset = x; 3146 y_offset = y; 3147 plane_size = (src_h - 1) << 16 | (src_w - 1); 3148 } 3149 plane_offset = y_offset << 16 | x_offset; 3150 3151 intel_crtc->adjusted_x = x_offset; 3152 intel_crtc->adjusted_y = y_offset; 3153 3154 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3155 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3156 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3157 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3158 3159 if (scaler_id >= 0) { 3160 uint32_t ps_ctrl = 0; 3161 3162 WARN_ON(!dst_w || !dst_h); 3163 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3164 crtc_state->scaler_state.scalers[scaler_id].mode; 3165 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3166 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3167 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3168 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3169 I915_WRITE(PLANE_POS(pipe, 0), 0); 3170 } else { 3171 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3172 } 3173 3174 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3175 3176 POSTING_READ(PLANE_SURF(pipe, 0)); 3177 } 3178 3179 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3180 static int 3181 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3182 int x, int y, enum mode_set_atomic state) 3183 { 3184 struct drm_device *dev = crtc->dev; 3185 struct drm_i915_private *dev_priv = dev->dev_private; 3186 3187 if (dev_priv->fbc.deactivate) 3188 dev_priv->fbc.deactivate(dev_priv); 3189 3190 dev_priv->display.update_primary_plane(crtc, fb, x, y); 3191 3192 return 0; 3193 } 3194 3195 static void intel_complete_page_flips(struct drm_device *dev) 3196 { 3197 struct drm_crtc *crtc; 3198 3199 for_each_crtc(dev, crtc) { 3200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3201 enum plane plane = intel_crtc->plane; 3202 3203 intel_prepare_page_flip(dev, plane); 3204 intel_finish_page_flip_plane(dev, plane); 3205 } 3206 } 3207 3208 static void intel_update_primary_planes(struct drm_device *dev) 3209 { 3210 struct drm_crtc *crtc; 3211 3212 for_each_crtc(dev, crtc) { 3213 struct intel_plane *plane = to_intel_plane(crtc->primary); 3214 struct intel_plane_state *plane_state; 3215 3216 drm_modeset_lock_crtc(crtc, &plane->base); 3217 plane_state = to_intel_plane_state(plane->base.state); 3218 3219 if (crtc->state->active && plane_state->base.fb) 3220 plane->commit_plane(&plane->base, plane_state); 3221 3222 drm_modeset_unlock_crtc(crtc); 3223 } 3224 } 3225 3226 void intel_prepare_reset(struct drm_device *dev) 3227 { 3228 /* no reset support for gen2 */ 3229 if (IS_GEN2(dev)) 3230 return; 3231 3232 /* reset doesn't touch the display */ 3233 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3234 return; 3235 3236 drm_modeset_lock_all(dev); 3237 /* 3238 * Disabling the crtcs gracefully seems nicer. Also the 3239 * g33 docs say we should at least disable all the planes. 3240 */ 3241 intel_display_suspend(dev); 3242 } 3243 3244 void intel_finish_reset(struct drm_device *dev) 3245 { 3246 struct drm_i915_private *dev_priv = to_i915(dev); 3247 3248 /* 3249 * Flips in the rings will be nuked by the reset, 3250 * so complete all pending flips so that user space 3251 * will get its events and not get stuck. 3252 */ 3253 intel_complete_page_flips(dev); 3254 3255 /* no reset support for gen2 */ 3256 if (IS_GEN2(dev)) 3257 return; 3258 3259 /* reset doesn't touch the display */ 3260 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3261 /* 3262 * Flips in the rings have been nuked by the reset, 3263 * so update the base address of all primary 3264 * planes to the the last fb to make sure we're 3265 * showing the correct fb after a reset. 3266 * 3267 * FIXME: Atomic will make this obsolete since we won't schedule 3268 * CS-based flips (which might get lost in gpu resets) any more. 3269 */ 3270 intel_update_primary_planes(dev); 3271 return; 3272 } 3273 3274 /* 3275 * The display has been reset as well, 3276 * so need a full re-initialization. 3277 */ 3278 intel_runtime_pm_disable_interrupts(dev_priv); 3279 intel_runtime_pm_enable_interrupts(dev_priv); 3280 3281 intel_modeset_init_hw(dev); 3282 3283 spin_lock_irq(&dev_priv->irq_lock); 3284 if (dev_priv->display.hpd_irq_setup) 3285 dev_priv->display.hpd_irq_setup(dev); 3286 spin_unlock_irq(&dev_priv->irq_lock); 3287 3288 intel_display_resume(dev); 3289 3290 intel_hpd_init(dev_priv); 3291 3292 drm_modeset_unlock_all(dev); 3293 } 3294 3295 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3296 { 3297 struct drm_device *dev = crtc->dev; 3298 struct drm_i915_private *dev_priv = dev->dev_private; 3299 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3300 bool pending; 3301 3302 if (i915_reset_in_progress(&dev_priv->gpu_error) || 3303 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 3304 return false; 3305 3306 spin_lock_irq(&dev->event_lock); 3307 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3308 spin_unlock_irq(&dev->event_lock); 3309 3310 return pending; 3311 } 3312 3313 static void intel_update_pipe_config(struct intel_crtc *crtc, 3314 struct intel_crtc_state *old_crtc_state) 3315 { 3316 struct drm_device *dev = crtc->base.dev; 3317 struct drm_i915_private *dev_priv = dev->dev_private; 3318 struct intel_crtc_state *pipe_config = 3319 to_intel_crtc_state(crtc->base.state); 3320 3321 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3322 crtc->base.mode = crtc->base.state->mode; 3323 3324 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", 3325 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, 3326 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3327 3328 if (HAS_DDI(dev)) 3329 intel_set_pipe_csc(&crtc->base); 3330 3331 /* 3332 * Update pipe size and adjust fitter if needed: the reason for this is 3333 * that in compute_mode_changes we check the native mode (not the pfit 3334 * mode) to see if we can flip rather than do a full mode set. In the 3335 * fastboot case, we'll flip, but if we don't update the pipesrc and 3336 * pfit state, we'll end up with a big fb scanned out into the wrong 3337 * sized surface. 3338 */ 3339 3340 I915_WRITE(PIPESRC(crtc->pipe), 3341 ((pipe_config->pipe_src_w - 1) << 16) | 3342 (pipe_config->pipe_src_h - 1)); 3343 3344 /* on skylake this is done by detaching scalers */ 3345 if (INTEL_INFO(dev)->gen >= 9) { 3346 skl_detach_scalers(crtc); 3347 3348 if (pipe_config->pch_pfit.enabled) 3349 skylake_pfit_enable(crtc); 3350 } else if (HAS_PCH_SPLIT(dev)) { 3351 if (pipe_config->pch_pfit.enabled) 3352 ironlake_pfit_enable(crtc); 3353 else if (old_crtc_state->pch_pfit.enabled) 3354 ironlake_pfit_disable(crtc, true); 3355 } 3356 } 3357 3358 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3359 { 3360 struct drm_device *dev = crtc->dev; 3361 struct drm_i915_private *dev_priv = dev->dev_private; 3362 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3363 int pipe = intel_crtc->pipe; 3364 i915_reg_t reg; 3365 u32 temp; 3366 3367 /* enable normal train */ 3368 reg = FDI_TX_CTL(pipe); 3369 temp = I915_READ(reg); 3370 if (IS_IVYBRIDGE(dev)) { 3371 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3372 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3373 } else { 3374 temp &= ~FDI_LINK_TRAIN_NONE; 3375 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3376 } 3377 I915_WRITE(reg, temp); 3378 3379 reg = FDI_RX_CTL(pipe); 3380 temp = I915_READ(reg); 3381 if (HAS_PCH_CPT(dev)) { 3382 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3383 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3384 } else { 3385 temp &= ~FDI_LINK_TRAIN_NONE; 3386 temp |= FDI_LINK_TRAIN_NONE; 3387 } 3388 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3389 3390 /* wait one idle pattern time */ 3391 POSTING_READ(reg); 3392 udelay(1000); 3393 3394 /* IVB wants error correction enabled */ 3395 if (IS_IVYBRIDGE(dev)) 3396 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3397 FDI_FE_ERRC_ENABLE); 3398 } 3399 3400 /* The FDI link training functions for ILK/Ibexpeak. */ 3401 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3402 { 3403 struct drm_device *dev = crtc->dev; 3404 struct drm_i915_private *dev_priv = dev->dev_private; 3405 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3406 int pipe = intel_crtc->pipe; 3407 i915_reg_t reg; 3408 u32 temp, tries; 3409 3410 /* FDI needs bits from pipe first */ 3411 assert_pipe_enabled(dev_priv, pipe); 3412 3413 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3414 for train result */ 3415 reg = FDI_RX_IMR(pipe); 3416 temp = I915_READ(reg); 3417 temp &= ~FDI_RX_SYMBOL_LOCK; 3418 temp &= ~FDI_RX_BIT_LOCK; 3419 I915_WRITE(reg, temp); 3420 I915_READ(reg); 3421 udelay(150); 3422 3423 /* enable CPU FDI TX and PCH FDI RX */ 3424 reg = FDI_TX_CTL(pipe); 3425 temp = I915_READ(reg); 3426 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3427 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3428 temp &= ~FDI_LINK_TRAIN_NONE; 3429 temp |= FDI_LINK_TRAIN_PATTERN_1; 3430 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3431 3432 reg = FDI_RX_CTL(pipe); 3433 temp = I915_READ(reg); 3434 temp &= ~FDI_LINK_TRAIN_NONE; 3435 temp |= FDI_LINK_TRAIN_PATTERN_1; 3436 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3437 3438 POSTING_READ(reg); 3439 udelay(150); 3440 3441 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3442 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3443 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3444 FDI_RX_PHASE_SYNC_POINTER_EN); 3445 3446 reg = FDI_RX_IIR(pipe); 3447 for (tries = 0; tries < 5; tries++) { 3448 temp = I915_READ(reg); 3449 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3450 3451 if ((temp & FDI_RX_BIT_LOCK)) { 3452 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3453 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3454 break; 3455 } 3456 } 3457 if (tries == 5) 3458 DRM_ERROR("FDI train 1 fail!\n"); 3459 3460 /* Train 2 */ 3461 reg = FDI_TX_CTL(pipe); 3462 temp = I915_READ(reg); 3463 temp &= ~FDI_LINK_TRAIN_NONE; 3464 temp |= FDI_LINK_TRAIN_PATTERN_2; 3465 I915_WRITE(reg, temp); 3466 3467 reg = FDI_RX_CTL(pipe); 3468 temp = I915_READ(reg); 3469 temp &= ~FDI_LINK_TRAIN_NONE; 3470 temp |= FDI_LINK_TRAIN_PATTERN_2; 3471 I915_WRITE(reg, temp); 3472 3473 POSTING_READ(reg); 3474 udelay(150); 3475 3476 reg = FDI_RX_IIR(pipe); 3477 for (tries = 0; tries < 5; tries++) { 3478 temp = I915_READ(reg); 3479 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3480 3481 if (temp & FDI_RX_SYMBOL_LOCK) { 3482 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3483 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3484 break; 3485 } 3486 } 3487 if (tries == 5) 3488 DRM_ERROR("FDI train 2 fail!\n"); 3489 3490 DRM_DEBUG_KMS("FDI train done\n"); 3491 3492 } 3493 3494 static const int snb_b_fdi_train_param[] = { 3495 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3496 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3497 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3498 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3499 }; 3500 3501 /* The FDI link training functions for SNB/Cougarpoint. */ 3502 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3503 { 3504 struct drm_device *dev = crtc->dev; 3505 struct drm_i915_private *dev_priv = dev->dev_private; 3506 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3507 int pipe = intel_crtc->pipe; 3508 i915_reg_t reg; 3509 u32 temp, i, retry; 3510 3511 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3512 for train result */ 3513 reg = FDI_RX_IMR(pipe); 3514 temp = I915_READ(reg); 3515 temp &= ~FDI_RX_SYMBOL_LOCK; 3516 temp &= ~FDI_RX_BIT_LOCK; 3517 I915_WRITE(reg, temp); 3518 3519 POSTING_READ(reg); 3520 udelay(150); 3521 3522 /* enable CPU FDI TX and PCH FDI RX */ 3523 reg = FDI_TX_CTL(pipe); 3524 temp = I915_READ(reg); 3525 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3526 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3527 temp &= ~FDI_LINK_TRAIN_NONE; 3528 temp |= FDI_LINK_TRAIN_PATTERN_1; 3529 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3530 /* SNB-B */ 3531 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3532 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3533 3534 I915_WRITE(FDI_RX_MISC(pipe), 3535 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3536 3537 reg = FDI_RX_CTL(pipe); 3538 temp = I915_READ(reg); 3539 if (HAS_PCH_CPT(dev)) { 3540 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3541 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3542 } else { 3543 temp &= ~FDI_LINK_TRAIN_NONE; 3544 temp |= FDI_LINK_TRAIN_PATTERN_1; 3545 } 3546 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3547 3548 POSTING_READ(reg); 3549 udelay(150); 3550 3551 for (i = 0; i < 4; i++) { 3552 reg = FDI_TX_CTL(pipe); 3553 temp = I915_READ(reg); 3554 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3555 temp |= snb_b_fdi_train_param[i]; 3556 I915_WRITE(reg, temp); 3557 3558 POSTING_READ(reg); 3559 udelay(500); 3560 3561 for (retry = 0; retry < 5; retry++) { 3562 reg = FDI_RX_IIR(pipe); 3563 temp = I915_READ(reg); 3564 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3565 if (temp & FDI_RX_BIT_LOCK) { 3566 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3567 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3568 break; 3569 } 3570 udelay(50); 3571 } 3572 if (retry < 5) 3573 break; 3574 } 3575 if (i == 4) 3576 DRM_ERROR("FDI train 1 fail!\n"); 3577 3578 /* Train 2 */ 3579 reg = FDI_TX_CTL(pipe); 3580 temp = I915_READ(reg); 3581 temp &= ~FDI_LINK_TRAIN_NONE; 3582 temp |= FDI_LINK_TRAIN_PATTERN_2; 3583 if (IS_GEN6(dev)) { 3584 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3585 /* SNB-B */ 3586 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3587 } 3588 I915_WRITE(reg, temp); 3589 3590 reg = FDI_RX_CTL(pipe); 3591 temp = I915_READ(reg); 3592 if (HAS_PCH_CPT(dev)) { 3593 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3594 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3595 } else { 3596 temp &= ~FDI_LINK_TRAIN_NONE; 3597 temp |= FDI_LINK_TRAIN_PATTERN_2; 3598 } 3599 I915_WRITE(reg, temp); 3600 3601 POSTING_READ(reg); 3602 udelay(150); 3603 3604 for (i = 0; i < 4; i++) { 3605 reg = FDI_TX_CTL(pipe); 3606 temp = I915_READ(reg); 3607 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3608 temp |= snb_b_fdi_train_param[i]; 3609 I915_WRITE(reg, temp); 3610 3611 POSTING_READ(reg); 3612 udelay(500); 3613 3614 for (retry = 0; retry < 5; retry++) { 3615 reg = FDI_RX_IIR(pipe); 3616 temp = I915_READ(reg); 3617 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3618 if (temp & FDI_RX_SYMBOL_LOCK) { 3619 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3620 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3621 break; 3622 } 3623 udelay(50); 3624 } 3625 if (retry < 5) 3626 break; 3627 } 3628 if (i == 4) 3629 DRM_ERROR("FDI train 2 fail!\n"); 3630 3631 DRM_DEBUG_KMS("FDI train done.\n"); 3632 } 3633 3634 /* Manual link training for Ivy Bridge A0 parts */ 3635 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3636 { 3637 struct drm_device *dev = crtc->dev; 3638 struct drm_i915_private *dev_priv = dev->dev_private; 3639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3640 int pipe = intel_crtc->pipe; 3641 i915_reg_t reg; 3642 u32 temp, i, j; 3643 3644 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3645 for train result */ 3646 reg = FDI_RX_IMR(pipe); 3647 temp = I915_READ(reg); 3648 temp &= ~FDI_RX_SYMBOL_LOCK; 3649 temp &= ~FDI_RX_BIT_LOCK; 3650 I915_WRITE(reg, temp); 3651 3652 POSTING_READ(reg); 3653 udelay(150); 3654 3655 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3656 I915_READ(FDI_RX_IIR(pipe))); 3657 3658 /* Try each vswing and preemphasis setting twice before moving on */ 3659 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3660 /* disable first in case we need to retry */ 3661 reg = FDI_TX_CTL(pipe); 3662 temp = I915_READ(reg); 3663 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3664 temp &= ~FDI_TX_ENABLE; 3665 I915_WRITE(reg, temp); 3666 3667 reg = FDI_RX_CTL(pipe); 3668 temp = I915_READ(reg); 3669 temp &= ~FDI_LINK_TRAIN_AUTO; 3670 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3671 temp &= ~FDI_RX_ENABLE; 3672 I915_WRITE(reg, temp); 3673 3674 /* enable CPU FDI TX and PCH FDI RX */ 3675 reg = FDI_TX_CTL(pipe); 3676 temp = I915_READ(reg); 3677 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3678 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3679 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3680 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3681 temp |= snb_b_fdi_train_param[j/2]; 3682 temp |= FDI_COMPOSITE_SYNC; 3683 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3684 3685 I915_WRITE(FDI_RX_MISC(pipe), 3686 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3687 3688 reg = FDI_RX_CTL(pipe); 3689 temp = I915_READ(reg); 3690 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3691 temp |= FDI_COMPOSITE_SYNC; 3692 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3693 3694 POSTING_READ(reg); 3695 udelay(1); /* should be 0.5us */ 3696 3697 for (i = 0; i < 4; i++) { 3698 reg = FDI_RX_IIR(pipe); 3699 temp = I915_READ(reg); 3700 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3701 3702 if (temp & FDI_RX_BIT_LOCK || 3703 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3704 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3705 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3706 i); 3707 break; 3708 } 3709 udelay(1); /* should be 0.5us */ 3710 } 3711 if (i == 4) { 3712 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3713 continue; 3714 } 3715 3716 /* Train 2 */ 3717 reg = FDI_TX_CTL(pipe); 3718 temp = I915_READ(reg); 3719 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3720 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3721 I915_WRITE(reg, temp); 3722 3723 reg = FDI_RX_CTL(pipe); 3724 temp = I915_READ(reg); 3725 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3726 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3727 I915_WRITE(reg, temp); 3728 3729 POSTING_READ(reg); 3730 udelay(2); /* should be 1.5us */ 3731 3732 for (i = 0; i < 4; i++) { 3733 reg = FDI_RX_IIR(pipe); 3734 temp = I915_READ(reg); 3735 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3736 3737 if (temp & FDI_RX_SYMBOL_LOCK || 3738 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3739 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3740 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3741 i); 3742 goto train_done; 3743 } 3744 udelay(2); /* should be 1.5us */ 3745 } 3746 if (i == 4) 3747 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3748 } 3749 3750 train_done: 3751 DRM_DEBUG_KMS("FDI train done.\n"); 3752 } 3753 3754 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3755 { 3756 struct drm_device *dev = intel_crtc->base.dev; 3757 struct drm_i915_private *dev_priv = dev->dev_private; 3758 int pipe = intel_crtc->pipe; 3759 i915_reg_t reg; 3760 u32 temp; 3761 3762 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3763 reg = FDI_RX_CTL(pipe); 3764 temp = I915_READ(reg); 3765 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3766 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3767 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3768 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3769 3770 POSTING_READ(reg); 3771 udelay(200); 3772 3773 /* Switch from Rawclk to PCDclk */ 3774 temp = I915_READ(reg); 3775 I915_WRITE(reg, temp | FDI_PCDCLK); 3776 3777 POSTING_READ(reg); 3778 udelay(200); 3779 3780 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3781 reg = FDI_TX_CTL(pipe); 3782 temp = I915_READ(reg); 3783 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3784 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3785 3786 POSTING_READ(reg); 3787 udelay(100); 3788 } 3789 } 3790 3791 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3792 { 3793 struct drm_device *dev = intel_crtc->base.dev; 3794 struct drm_i915_private *dev_priv = dev->dev_private; 3795 int pipe = intel_crtc->pipe; 3796 i915_reg_t reg; 3797 u32 temp; 3798 3799 /* Switch from PCDclk to Rawclk */ 3800 reg = FDI_RX_CTL(pipe); 3801 temp = I915_READ(reg); 3802 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3803 3804 /* Disable CPU FDI TX PLL */ 3805 reg = FDI_TX_CTL(pipe); 3806 temp = I915_READ(reg); 3807 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3808 3809 POSTING_READ(reg); 3810 udelay(100); 3811 3812 reg = FDI_RX_CTL(pipe); 3813 temp = I915_READ(reg); 3814 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3815 3816 /* Wait for the clocks to turn off. */ 3817 POSTING_READ(reg); 3818 udelay(100); 3819 } 3820 3821 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3822 { 3823 struct drm_device *dev = crtc->dev; 3824 struct drm_i915_private *dev_priv = dev->dev_private; 3825 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3826 int pipe = intel_crtc->pipe; 3827 i915_reg_t reg; 3828 u32 temp; 3829 3830 /* disable CPU FDI tx and PCH FDI rx */ 3831 reg = FDI_TX_CTL(pipe); 3832 temp = I915_READ(reg); 3833 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3834 POSTING_READ(reg); 3835 3836 reg = FDI_RX_CTL(pipe); 3837 temp = I915_READ(reg); 3838 temp &= ~(0x7 << 16); 3839 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3840 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3841 3842 POSTING_READ(reg); 3843 udelay(100); 3844 3845 /* Ironlake workaround, disable clock pointer after downing FDI */ 3846 if (HAS_PCH_IBX(dev)) 3847 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3848 3849 /* still set train pattern 1 */ 3850 reg = FDI_TX_CTL(pipe); 3851 temp = I915_READ(reg); 3852 temp &= ~FDI_LINK_TRAIN_NONE; 3853 temp |= FDI_LINK_TRAIN_PATTERN_1; 3854 I915_WRITE(reg, temp); 3855 3856 reg = FDI_RX_CTL(pipe); 3857 temp = I915_READ(reg); 3858 if (HAS_PCH_CPT(dev)) { 3859 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3860 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3861 } else { 3862 temp &= ~FDI_LINK_TRAIN_NONE; 3863 temp |= FDI_LINK_TRAIN_PATTERN_1; 3864 } 3865 /* BPC in FDI rx is consistent with that in PIPECONF */ 3866 temp &= ~(0x07 << 16); 3867 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3868 I915_WRITE(reg, temp); 3869 3870 POSTING_READ(reg); 3871 udelay(100); 3872 } 3873 3874 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3875 { 3876 struct intel_crtc *crtc; 3877 3878 /* Note that we don't need to be called with mode_config.lock here 3879 * as our list of CRTC objects is static for the lifetime of the 3880 * device and so cannot disappear as we iterate. Similarly, we can 3881 * happily treat the predicates as racy, atomic checks as userspace 3882 * cannot claim and pin a new fb without at least acquring the 3883 * struct_mutex and so serialising with us. 3884 */ 3885 for_each_intel_crtc(dev, crtc) { 3886 if (atomic_read(&crtc->unpin_work_count) == 0) 3887 continue; 3888 3889 if (crtc->unpin_work) 3890 intel_wait_for_vblank(dev, crtc->pipe); 3891 3892 return true; 3893 } 3894 3895 return false; 3896 } 3897 3898 static void page_flip_completed(struct intel_crtc *intel_crtc) 3899 { 3900 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3901 struct intel_unpin_work *work = intel_crtc->unpin_work; 3902 3903 /* ensure that the unpin work is consistent wrt ->pending. */ 3904 smp_rmb(); 3905 intel_crtc->unpin_work = NULL; 3906 3907 if (work->event) 3908 drm_send_vblank_event(intel_crtc->base.dev, 3909 intel_crtc->pipe, 3910 work->event); 3911 3912 drm_crtc_vblank_put(&intel_crtc->base); 3913 3914 wake_up_all(&dev_priv->pending_flip_queue); 3915 queue_work(dev_priv->wq, &work->work); 3916 3917 trace_i915_flip_complete(intel_crtc->plane, 3918 work->pending_flip_obj); 3919 } 3920 3921 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3922 { 3923 struct drm_device *dev = crtc->dev; 3924 struct drm_i915_private *dev_priv = dev->dev_private; 3925 long ret; 3926 3927 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3928 3929 ret = wait_event_interruptible_timeout( 3930 dev_priv->pending_flip_queue, 3931 !intel_crtc_has_pending_flip(crtc), 3932 60*HZ); 3933 3934 if (ret < 0) 3935 return ret; 3936 3937 if (ret == 0) { 3938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3939 3940 spin_lock_irq(&dev->event_lock); 3941 if (intel_crtc->unpin_work) { 3942 WARN_ONCE(1, "Removing stuck page flip\n"); 3943 page_flip_completed(intel_crtc); 3944 } 3945 spin_unlock_irq(&dev->event_lock); 3946 } 3947 3948 return 0; 3949 } 3950 3951 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 3952 { 3953 u32 temp; 3954 3955 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3956 3957 mutex_lock(&dev_priv->sb_lock); 3958 3959 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3960 temp |= SBI_SSCCTL_DISABLE; 3961 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3962 3963 mutex_unlock(&dev_priv->sb_lock); 3964 } 3965 3966 /* Program iCLKIP clock to the desired frequency */ 3967 static void lpt_program_iclkip(struct drm_crtc *crtc) 3968 { 3969 struct drm_device *dev = crtc->dev; 3970 struct drm_i915_private *dev_priv = dev->dev_private; 3971 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3972 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3973 u32 temp; 3974 3975 lpt_disable_iclkip(dev_priv); 3976 3977 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3978 if (clock == 20000) { 3979 auxdiv = 1; 3980 divsel = 0x41; 3981 phaseinc = 0x20; 3982 } else { 3983 /* The iCLK virtual clock root frequency is in MHz, 3984 * but the adjusted_mode->crtc_clock in in KHz. To get the 3985 * divisors, it is necessary to divide one by another, so we 3986 * convert the virtual clock precision to KHz here for higher 3987 * precision. 3988 */ 3989 u32 iclk_virtual_root_freq = 172800 * 1000; 3990 u32 iclk_pi_range = 64; 3991 u32 desired_divisor, msb_divisor_value, pi_value; 3992 3993 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock); 3994 msb_divisor_value = desired_divisor / iclk_pi_range; 3995 pi_value = desired_divisor % iclk_pi_range; 3996 3997 auxdiv = 0; 3998 divsel = msb_divisor_value - 2; 3999 phaseinc = pi_value; 4000 } 4001 4002 /* This should not happen with any sane values */ 4003 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 4004 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 4005 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 4006 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 4007 4008 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 4009 clock, 4010 auxdiv, 4011 divsel, 4012 phasedir, 4013 phaseinc); 4014 4015 mutex_lock(&dev_priv->sb_lock); 4016 4017 /* Program SSCDIVINTPHASE6 */ 4018 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4019 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4020 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4021 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4022 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4023 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4024 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4025 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4026 4027 /* Program SSCAUXDIV */ 4028 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4029 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4030 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4031 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4032 4033 /* Enable modulator and associated divider */ 4034 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4035 temp &= ~SBI_SSCCTL_DISABLE; 4036 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4037 4038 mutex_unlock(&dev_priv->sb_lock); 4039 4040 /* Wait for initialization time */ 4041 udelay(24); 4042 4043 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4044 } 4045 4046 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4047 enum i915_pipe pch_transcoder) 4048 { 4049 struct drm_device *dev = crtc->base.dev; 4050 struct drm_i915_private *dev_priv = dev->dev_private; 4051 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4052 4053 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4054 I915_READ(HTOTAL(cpu_transcoder))); 4055 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4056 I915_READ(HBLANK(cpu_transcoder))); 4057 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4058 I915_READ(HSYNC(cpu_transcoder))); 4059 4060 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4061 I915_READ(VTOTAL(cpu_transcoder))); 4062 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4063 I915_READ(VBLANK(cpu_transcoder))); 4064 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4065 I915_READ(VSYNC(cpu_transcoder))); 4066 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4067 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4068 } 4069 4070 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4071 { 4072 struct drm_i915_private *dev_priv = dev->dev_private; 4073 uint32_t temp; 4074 4075 temp = I915_READ(SOUTH_CHICKEN1); 4076 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4077 return; 4078 4079 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4080 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4081 4082 temp &= ~FDI_BC_BIFURCATION_SELECT; 4083 if (enable) 4084 temp |= FDI_BC_BIFURCATION_SELECT; 4085 4086 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4087 I915_WRITE(SOUTH_CHICKEN1, temp); 4088 POSTING_READ(SOUTH_CHICKEN1); 4089 } 4090 4091 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4092 { 4093 struct drm_device *dev = intel_crtc->base.dev; 4094 4095 switch (intel_crtc->pipe) { 4096 case PIPE_A: 4097 break; 4098 case PIPE_B: 4099 if (intel_crtc->config->fdi_lanes > 2) 4100 cpt_set_fdi_bc_bifurcation(dev, false); 4101 else 4102 cpt_set_fdi_bc_bifurcation(dev, true); 4103 4104 break; 4105 case PIPE_C: 4106 cpt_set_fdi_bc_bifurcation(dev, true); 4107 4108 break; 4109 default: 4110 BUG(); 4111 } 4112 } 4113 4114 /* Return which DP Port should be selected for Transcoder DP control */ 4115 static enum port 4116 intel_trans_dp_port_sel(struct drm_crtc *crtc) 4117 { 4118 struct drm_device *dev = crtc->dev; 4119 struct intel_encoder *encoder; 4120 4121 for_each_encoder_on_crtc(dev, crtc, encoder) { 4122 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 4123 encoder->type == INTEL_OUTPUT_EDP) 4124 return enc_to_dig_port(&encoder->base)->port; 4125 } 4126 4127 return -1; 4128 } 4129 4130 /* 4131 * Enable PCH resources required for PCH ports: 4132 * - PCH PLLs 4133 * - FDI training & RX/TX 4134 * - update transcoder timings 4135 * - DP transcoding bits 4136 * - transcoder 4137 */ 4138 static void ironlake_pch_enable(struct drm_crtc *crtc) 4139 { 4140 struct drm_device *dev = crtc->dev; 4141 struct drm_i915_private *dev_priv = dev->dev_private; 4142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4143 int pipe = intel_crtc->pipe; 4144 u32 temp; 4145 4146 assert_pch_transcoder_disabled(dev_priv, pipe); 4147 4148 if (IS_IVYBRIDGE(dev)) 4149 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4150 4151 /* Write the TU size bits before fdi link training, so that error 4152 * detection works. */ 4153 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4154 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4155 4156 /* 4157 * Sometimes spurious CPU pipe underruns happen during FDI 4158 * training, at least with VGA+HDMI cloning. Suppress them. 4159 */ 4160 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4161 4162 /* For PCH output, training FDI link */ 4163 dev_priv->display.fdi_link_train(crtc); 4164 4165 /* We need to program the right clock selection before writing the pixel 4166 * mutliplier into the DPLL. */ 4167 if (HAS_PCH_CPT(dev)) { 4168 u32 sel; 4169 4170 temp = I915_READ(PCH_DPLL_SEL); 4171 temp |= TRANS_DPLL_ENABLE(pipe); 4172 sel = TRANS_DPLLB_SEL(pipe); 4173 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 4174 temp |= sel; 4175 else 4176 temp &= ~sel; 4177 I915_WRITE(PCH_DPLL_SEL, temp); 4178 } 4179 4180 /* XXX: pch pll's can be enabled any time before we enable the PCH 4181 * transcoder, and we actually should do this to not upset any PCH 4182 * transcoder that already use the clock when we share it. 4183 * 4184 * Note that enable_shared_dpll tries to do the right thing, but 4185 * get_shared_dpll unconditionally resets the pll - we need that to have 4186 * the right LVDS enable sequence. */ 4187 intel_enable_shared_dpll(intel_crtc); 4188 4189 /* set transcoder timing, panel must allow it */ 4190 assert_panel_unlocked(dev_priv, pipe); 4191 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4192 4193 intel_fdi_normal_train(crtc); 4194 4195 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4196 4197 /* For PCH DP, enable TRANS_DP_CTL */ 4198 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4199 const struct drm_display_mode *adjusted_mode = 4200 &intel_crtc->config->base.adjusted_mode; 4201 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4202 i915_reg_t reg = TRANS_DP_CTL(pipe); 4203 temp = I915_READ(reg); 4204 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4205 TRANS_DP_SYNC_MASK | 4206 TRANS_DP_BPC_MASK); 4207 temp |= TRANS_DP_OUTPUT_ENABLE; 4208 temp |= bpc << 9; /* same format but at 11:9 */ 4209 4210 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4211 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4212 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4213 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4214 4215 switch (intel_trans_dp_port_sel(crtc)) { 4216 case PORT_B: 4217 temp |= TRANS_DP_PORT_SEL_B; 4218 break; 4219 case PORT_C: 4220 temp |= TRANS_DP_PORT_SEL_C; 4221 break; 4222 case PORT_D: 4223 temp |= TRANS_DP_PORT_SEL_D; 4224 break; 4225 default: 4226 BUG(); 4227 } 4228 4229 I915_WRITE(reg, temp); 4230 } 4231 4232 ironlake_enable_pch_transcoder(dev_priv, pipe); 4233 } 4234 4235 static void lpt_pch_enable(struct drm_crtc *crtc) 4236 { 4237 struct drm_device *dev = crtc->dev; 4238 struct drm_i915_private *dev_priv = dev->dev_private; 4239 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4240 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4241 4242 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4243 4244 lpt_program_iclkip(crtc); 4245 4246 /* Set transcoder timing. */ 4247 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4248 4249 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4250 } 4251 4252 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 4253 struct intel_crtc_state *crtc_state) 4254 { 4255 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 4256 struct intel_shared_dpll *pll; 4257 struct intel_shared_dpll_config *shared_dpll; 4258 enum intel_dpll_id i; 4259 int max = dev_priv->num_shared_dpll; 4260 4261 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4262 4263 if (HAS_PCH_IBX(dev_priv->dev)) { 4264 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 4265 i = (enum intel_dpll_id) crtc->pipe; 4266 pll = &dev_priv->shared_dplls[i]; 4267 4268 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4269 crtc->base.base.id, pll->name); 4270 4271 WARN_ON(shared_dpll[i].crtc_mask); 4272 4273 goto found; 4274 } 4275 4276 if (IS_BROXTON(dev_priv->dev)) { 4277 /* PLL is attached to port in bxt */ 4278 struct intel_encoder *encoder; 4279 struct intel_digital_port *intel_dig_port; 4280 4281 encoder = intel_ddi_get_crtc_new_encoder(crtc_state); 4282 if (WARN_ON(!encoder)) 4283 return NULL; 4284 4285 intel_dig_port = enc_to_dig_port(&encoder->base); 4286 /* 1:1 mapping between ports and PLLs */ 4287 i = (enum intel_dpll_id)intel_dig_port->port; 4288 pll = &dev_priv->shared_dplls[i]; 4289 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4290 crtc->base.base.id, pll->name); 4291 WARN_ON(shared_dpll[i].crtc_mask); 4292 4293 goto found; 4294 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) 4295 /* Do not consider SPLL */ 4296 max = 2; 4297 4298 for (i = 0; i < max; i++) { 4299 pll = &dev_priv->shared_dplls[i]; 4300 4301 /* Only want to check enabled timings first */ 4302 if (shared_dpll[i].crtc_mask == 0) 4303 continue; 4304 4305 if (memcmp(&crtc_state->dpll_hw_state, 4306 &shared_dpll[i].hw_state, 4307 sizeof(crtc_state->dpll_hw_state)) == 0) { 4308 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 4309 crtc->base.base.id, pll->name, 4310 shared_dpll[i].crtc_mask, 4311 pll->active); 4312 goto found; 4313 } 4314 } 4315 4316 /* Ok no matching timings, maybe there's a free one? */ 4317 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4318 pll = &dev_priv->shared_dplls[i]; 4319 if (shared_dpll[i].crtc_mask == 0) { 4320 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 4321 crtc->base.base.id, pll->name); 4322 goto found; 4323 } 4324 } 4325 4326 return NULL; 4327 4328 found: 4329 if (shared_dpll[i].crtc_mask == 0) 4330 shared_dpll[i].hw_state = 4331 crtc_state->dpll_hw_state; 4332 4333 crtc_state->shared_dpll = i; 4334 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 4335 pipe_name(crtc->pipe)); 4336 4337 shared_dpll[i].crtc_mask |= 1 << crtc->pipe; 4338 4339 return pll; 4340 } 4341 4342 static void intel_shared_dpll_commit(struct drm_atomic_state *state) 4343 { 4344 struct drm_i915_private *dev_priv = to_i915(state->dev); 4345 struct intel_shared_dpll_config *shared_dpll; 4346 struct intel_shared_dpll *pll; 4347 enum intel_dpll_id i; 4348 4349 if (!to_intel_atomic_state(state)->dpll_set) 4350 return; 4351 4352 shared_dpll = to_intel_atomic_state(state)->shared_dpll; 4353 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4354 pll = &dev_priv->shared_dplls[i]; 4355 pll->config = shared_dpll[i]; 4356 } 4357 } 4358 4359 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4360 { 4361 struct drm_i915_private *dev_priv = dev->dev_private; 4362 i915_reg_t dslreg = PIPEDSL(pipe); 4363 u32 temp; 4364 4365 temp = I915_READ(dslreg); 4366 udelay(500); 4367 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4368 if (wait_for(I915_READ(dslreg) != temp, 5)) 4369 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4370 } 4371 } 4372 4373 static int 4374 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4375 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4376 int src_w, int src_h, int dst_w, int dst_h) 4377 { 4378 struct intel_crtc_scaler_state *scaler_state = 4379 &crtc_state->scaler_state; 4380 struct intel_crtc *intel_crtc = 4381 to_intel_crtc(crtc_state->base.crtc); 4382 int need_scaling; 4383 4384 need_scaling = intel_rotation_90_or_270(rotation) ? 4385 (src_h != dst_w || src_w != dst_h): 4386 (src_w != dst_w || src_h != dst_h); 4387 4388 /* 4389 * if plane is being disabled or scaler is no more required or force detach 4390 * - free scaler binded to this plane/crtc 4391 * - in order to do this, update crtc->scaler_usage 4392 * 4393 * Here scaler state in crtc_state is set free so that 4394 * scaler can be assigned to other user. Actual register 4395 * update to free the scaler is done in plane/panel-fit programming. 4396 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4397 */ 4398 if (force_detach || !need_scaling) { 4399 if (*scaler_id >= 0) { 4400 scaler_state->scaler_users &= ~(1 << scaler_user); 4401 scaler_state->scalers[*scaler_id].in_use = 0; 4402 4403 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4404 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4405 intel_crtc->pipe, scaler_user, *scaler_id, 4406 scaler_state->scaler_users); 4407 *scaler_id = -1; 4408 } 4409 return 0; 4410 } 4411 4412 /* range checks */ 4413 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4414 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4415 4416 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4417 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4418 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4419 "size is out of scaler range\n", 4420 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4421 return -EINVAL; 4422 } 4423 4424 /* mark this plane as a scaler user in crtc_state */ 4425 scaler_state->scaler_users |= (1 << scaler_user); 4426 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4427 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4428 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4429 scaler_state->scaler_users); 4430 4431 return 0; 4432 } 4433 4434 /** 4435 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4436 * 4437 * @state: crtc's scaler state 4438 * 4439 * Return 4440 * 0 - scaler_usage updated successfully 4441 * error - requested scaling cannot be supported or other error condition 4442 */ 4443 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4444 { 4445 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4446 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4447 4448 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4449 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4450 4451 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4452 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4453 state->pipe_src_w, state->pipe_src_h, 4454 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4455 } 4456 4457 /** 4458 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4459 * 4460 * @state: crtc's scaler state 4461 * @plane_state: atomic plane state to update 4462 * 4463 * Return 4464 * 0 - scaler_usage updated successfully 4465 * error - requested scaling cannot be supported or other error condition 4466 */ 4467 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4468 struct intel_plane_state *plane_state) 4469 { 4470 4471 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4472 struct intel_plane *intel_plane = 4473 to_intel_plane(plane_state->base.plane); 4474 struct drm_framebuffer *fb = plane_state->base.fb; 4475 int ret; 4476 4477 bool force_detach = !fb || !plane_state->visible; 4478 4479 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4480 intel_plane->base.base.id, intel_crtc->pipe, 4481 drm_plane_index(&intel_plane->base)); 4482 4483 ret = skl_update_scaler(crtc_state, force_detach, 4484 drm_plane_index(&intel_plane->base), 4485 &plane_state->scaler_id, 4486 plane_state->base.rotation, 4487 drm_rect_width(&plane_state->src) >> 16, 4488 drm_rect_height(&plane_state->src) >> 16, 4489 drm_rect_width(&plane_state->dst), 4490 drm_rect_height(&plane_state->dst)); 4491 4492 if (ret || plane_state->scaler_id < 0) 4493 return ret; 4494 4495 /* check colorkey */ 4496 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4497 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4498 intel_plane->base.base.id); 4499 return -EINVAL; 4500 } 4501 4502 /* Check src format */ 4503 switch (fb->pixel_format) { 4504 case DRM_FORMAT_RGB565: 4505 case DRM_FORMAT_XBGR8888: 4506 case DRM_FORMAT_XRGB8888: 4507 case DRM_FORMAT_ABGR8888: 4508 case DRM_FORMAT_ARGB8888: 4509 case DRM_FORMAT_XRGB2101010: 4510 case DRM_FORMAT_XBGR2101010: 4511 case DRM_FORMAT_YUYV: 4512 case DRM_FORMAT_YVYU: 4513 case DRM_FORMAT_UYVY: 4514 case DRM_FORMAT_VYUY: 4515 break; 4516 default: 4517 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4518 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4519 return -EINVAL; 4520 } 4521 4522 return 0; 4523 } 4524 4525 static void skylake_scaler_disable(struct intel_crtc *crtc) 4526 { 4527 int i; 4528 4529 for (i = 0; i < crtc->num_scalers; i++) 4530 skl_detach_scaler(crtc, i); 4531 } 4532 4533 static void skylake_pfit_enable(struct intel_crtc *crtc) 4534 { 4535 struct drm_device *dev = crtc->base.dev; 4536 struct drm_i915_private *dev_priv = dev->dev_private; 4537 int pipe = crtc->pipe; 4538 struct intel_crtc_scaler_state *scaler_state = 4539 &crtc->config->scaler_state; 4540 4541 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4542 4543 if (crtc->config->pch_pfit.enabled) { 4544 int id; 4545 4546 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4547 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4548 return; 4549 } 4550 4551 id = scaler_state->scaler_id; 4552 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4553 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4554 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4555 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4556 4557 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4558 } 4559 } 4560 4561 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4562 { 4563 struct drm_device *dev = crtc->base.dev; 4564 struct drm_i915_private *dev_priv = dev->dev_private; 4565 int pipe = crtc->pipe; 4566 4567 if (crtc->config->pch_pfit.enabled) { 4568 /* Force use of hard-coded filter coefficients 4569 * as some pre-programmed values are broken, 4570 * e.g. x201. 4571 */ 4572 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4573 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4574 PF_PIPE_SEL_IVB(pipe)); 4575 else 4576 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4577 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4578 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4579 } 4580 } 4581 4582 void hsw_enable_ips(struct intel_crtc *crtc) 4583 { 4584 struct drm_device *dev = crtc->base.dev; 4585 struct drm_i915_private *dev_priv = dev->dev_private; 4586 4587 if (!crtc->config->ips_enabled) 4588 return; 4589 4590 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4591 intel_wait_for_vblank(dev, crtc->pipe); 4592 4593 assert_plane_enabled(dev_priv, crtc->plane); 4594 if (IS_BROADWELL(dev)) { 4595 mutex_lock(&dev_priv->rps.hw_lock); 4596 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4597 mutex_unlock(&dev_priv->rps.hw_lock); 4598 /* Quoting Art Runyan: "its not safe to expect any particular 4599 * value in IPS_CTL bit 31 after enabling IPS through the 4600 * mailbox." Moreover, the mailbox may return a bogus state, 4601 * so we need to just enable it and continue on. 4602 */ 4603 } else { 4604 I915_WRITE(IPS_CTL, IPS_ENABLE); 4605 /* The bit only becomes 1 in the next vblank, so this wait here 4606 * is essentially intel_wait_for_vblank. If we don't have this 4607 * and don't wait for vblanks until the end of crtc_enable, then 4608 * the HW state readout code will complain that the expected 4609 * IPS_CTL value is not the one we read. */ 4610 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4611 DRM_ERROR("Timed out waiting for IPS enable\n"); 4612 } 4613 } 4614 4615 void hsw_disable_ips(struct intel_crtc *crtc) 4616 { 4617 struct drm_device *dev = crtc->base.dev; 4618 struct drm_i915_private *dev_priv = dev->dev_private; 4619 4620 if (!crtc->config->ips_enabled) 4621 return; 4622 4623 assert_plane_enabled(dev_priv, crtc->plane); 4624 if (IS_BROADWELL(dev)) { 4625 mutex_lock(&dev_priv->rps.hw_lock); 4626 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4627 mutex_unlock(&dev_priv->rps.hw_lock); 4628 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4629 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4630 DRM_ERROR("Timed out waiting for IPS disable\n"); 4631 } else { 4632 I915_WRITE(IPS_CTL, 0); 4633 POSTING_READ(IPS_CTL); 4634 } 4635 4636 /* We need to wait for a vblank before we can disable the plane. */ 4637 intel_wait_for_vblank(dev, crtc->pipe); 4638 } 4639 4640 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4641 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4642 { 4643 struct drm_device *dev = crtc->dev; 4644 struct drm_i915_private *dev_priv = dev->dev_private; 4645 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4646 enum i915_pipe pipe = intel_crtc->pipe; 4647 int i; 4648 bool reenable_ips = false; 4649 4650 /* The clocks have to be on to load the palette. */ 4651 if (!crtc->state->active) 4652 return; 4653 4654 if (HAS_GMCH_DISPLAY(dev_priv->dev)) { 4655 if (intel_crtc->config->has_dsi_encoder) 4656 assert_dsi_pll_enabled(dev_priv); 4657 else 4658 assert_pll_enabled(dev_priv, pipe); 4659 } 4660 4661 /* Workaround : Do not read or write the pipe palette/gamma data while 4662 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4663 */ 4664 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4665 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4666 GAMMA_MODE_MODE_SPLIT)) { 4667 hsw_disable_ips(intel_crtc); 4668 reenable_ips = true; 4669 } 4670 4671 for (i = 0; i < 256; i++) { 4672 i915_reg_t palreg; 4673 4674 if (HAS_GMCH_DISPLAY(dev)) 4675 palreg = PALETTE(pipe, i); 4676 else 4677 palreg = LGC_PALETTE(pipe, i); 4678 4679 I915_WRITE(palreg, 4680 (intel_crtc->lut_r[i] << 16) | 4681 (intel_crtc->lut_g[i] << 8) | 4682 intel_crtc->lut_b[i]); 4683 } 4684 4685 if (reenable_ips) 4686 hsw_enable_ips(intel_crtc); 4687 } 4688 4689 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4690 { 4691 if (intel_crtc->overlay) { 4692 struct drm_device *dev = intel_crtc->base.dev; 4693 struct drm_i915_private *dev_priv = dev->dev_private; 4694 4695 mutex_lock(&dev->struct_mutex); 4696 dev_priv->mm.interruptible = false; 4697 (void) intel_overlay_switch_off(intel_crtc->overlay); 4698 dev_priv->mm.interruptible = true; 4699 mutex_unlock(&dev->struct_mutex); 4700 } 4701 4702 /* Let userspace switch the overlay on again. In most cases userspace 4703 * has to recompute where to put it anyway. 4704 */ 4705 } 4706 4707 /** 4708 * intel_post_enable_primary - Perform operations after enabling primary plane 4709 * @crtc: the CRTC whose primary plane was just enabled 4710 * 4711 * Performs potentially sleeping operations that must be done after the primary 4712 * plane is enabled, such as updating FBC and IPS. Note that this may be 4713 * called due to an explicit primary plane update, or due to an implicit 4714 * re-enable that is caused when a sprite plane is updated to no longer 4715 * completely hide the primary plane. 4716 */ 4717 static void 4718 intel_post_enable_primary(struct drm_crtc *crtc) 4719 { 4720 struct drm_device *dev = crtc->dev; 4721 struct drm_i915_private *dev_priv = dev->dev_private; 4722 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4723 int pipe = intel_crtc->pipe; 4724 4725 /* 4726 * FIXME IPS should be fine as long as one plane is 4727 * enabled, but in practice it seems to have problems 4728 * when going from primary only to sprite only and vice 4729 * versa. 4730 */ 4731 hsw_enable_ips(intel_crtc); 4732 4733 /* 4734 * Gen2 reports pipe underruns whenever all planes are disabled. 4735 * So don't enable underrun reporting before at least some planes 4736 * are enabled. 4737 * FIXME: Need to fix the logic to work when we turn off all planes 4738 * but leave the pipe running. 4739 */ 4740 if (IS_GEN2(dev)) 4741 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4742 4743 /* Underruns don't always raise interrupts, so check manually. */ 4744 intel_check_cpu_fifo_underruns(dev_priv); 4745 intel_check_pch_fifo_underruns(dev_priv); 4746 } 4747 4748 /** 4749 * intel_pre_disable_primary - Perform operations before disabling primary plane 4750 * @crtc: the CRTC whose primary plane is to be disabled 4751 * 4752 * Performs potentially sleeping operations that must be done before the 4753 * primary plane is disabled, such as updating FBC and IPS. Note that this may 4754 * be called due to an explicit primary plane update, or due to an implicit 4755 * disable that is caused when a sprite plane completely hides the primary 4756 * plane. 4757 */ 4758 static void 4759 intel_pre_disable_primary(struct drm_crtc *crtc) 4760 { 4761 struct drm_device *dev = crtc->dev; 4762 struct drm_i915_private *dev_priv = dev->dev_private; 4763 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4764 int pipe = intel_crtc->pipe; 4765 4766 /* 4767 * Gen2 reports pipe underruns whenever all planes are disabled. 4768 * So diasble underrun reporting before all the planes get disabled. 4769 * FIXME: Need to fix the logic to work when we turn off all planes 4770 * but leave the pipe running. 4771 */ 4772 if (IS_GEN2(dev)) 4773 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4774 4775 /* 4776 * Vblank time updates from the shadow to live plane control register 4777 * are blocked if the memory self-refresh mode is active at that 4778 * moment. So to make sure the plane gets truly disabled, disable 4779 * first the self-refresh mode. The self-refresh enable bit in turn 4780 * will be checked/applied by the HW only at the next frame start 4781 * event which is after the vblank start event, so we need to have a 4782 * wait-for-vblank between disabling the plane and the pipe. 4783 */ 4784 if (HAS_GMCH_DISPLAY(dev)) { 4785 intel_set_memory_cxsr(dev_priv, false); 4786 dev_priv->wm.vlv.cxsr = false; 4787 intel_wait_for_vblank(dev, pipe); 4788 } 4789 4790 /* 4791 * FIXME IPS should be fine as long as one plane is 4792 * enabled, but in practice it seems to have problems 4793 * when going from primary only to sprite only and vice 4794 * versa. 4795 */ 4796 hsw_disable_ips(intel_crtc); 4797 } 4798 4799 static void intel_post_plane_update(struct intel_crtc *crtc) 4800 { 4801 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4802 struct intel_crtc_state *pipe_config = 4803 to_intel_crtc_state(crtc->base.state); 4804 struct drm_device *dev = crtc->base.dev; 4805 4806 if (atomic->wait_vblank) 4807 intel_wait_for_vblank(dev, crtc->pipe); 4808 4809 intel_frontbuffer_flip(dev, atomic->fb_bits); 4810 4811 crtc->wm.cxsr_allowed = true; 4812 4813 if (pipe_config->wm_changed && pipe_config->base.active) 4814 intel_update_watermarks(&crtc->base); 4815 4816 if (atomic->update_fbc) 4817 intel_fbc_update(crtc); 4818 4819 if (atomic->post_enable_primary) 4820 intel_post_enable_primary(&crtc->base); 4821 4822 memset(atomic, 0, sizeof(*atomic)); 4823 } 4824 4825 static void intel_pre_plane_update(struct intel_crtc *crtc) 4826 { 4827 struct drm_device *dev = crtc->base.dev; 4828 struct drm_i915_private *dev_priv = dev->dev_private; 4829 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4830 struct intel_crtc_state *pipe_config = 4831 to_intel_crtc_state(crtc->base.state); 4832 4833 if (atomic->disable_fbc) 4834 intel_fbc_deactivate(crtc); 4835 4836 if (crtc->atomic.disable_ips) 4837 hsw_disable_ips(crtc); 4838 4839 if (atomic->pre_disable_primary) 4840 intel_pre_disable_primary(&crtc->base); 4841 4842 if (pipe_config->disable_cxsr) { 4843 crtc->wm.cxsr_allowed = false; 4844 intel_set_memory_cxsr(dev_priv, false); 4845 } 4846 4847 if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) 4848 intel_update_watermarks(&crtc->base); 4849 } 4850 4851 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 4852 { 4853 struct drm_device *dev = crtc->dev; 4854 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4855 struct drm_plane *p; 4856 int pipe = intel_crtc->pipe; 4857 4858 intel_crtc_dpms_overlay_disable(intel_crtc); 4859 4860 drm_for_each_plane_mask(p, dev, plane_mask) 4861 to_intel_plane(p)->disable_plane(p, crtc); 4862 4863 /* 4864 * FIXME: Once we grow proper nuclear flip support out of this we need 4865 * to compute the mask of flip planes precisely. For the time being 4866 * consider this a flip to a NULL plane. 4867 */ 4868 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4869 } 4870 4871 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4872 { 4873 struct drm_device *dev = crtc->dev; 4874 struct drm_i915_private *dev_priv = dev->dev_private; 4875 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4876 struct intel_encoder *encoder; 4877 int pipe = intel_crtc->pipe; 4878 4879 if (WARN_ON(intel_crtc->active)) 4880 return; 4881 4882 if (intel_crtc->config->has_pch_encoder) 4883 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4884 4885 if (intel_crtc->config->has_pch_encoder) 4886 intel_prepare_shared_dpll(intel_crtc); 4887 4888 if (intel_crtc->config->has_dp_encoder) 4889 intel_dp_set_m_n(intel_crtc, M1_N1); 4890 4891 intel_set_pipe_timings(intel_crtc); 4892 4893 if (intel_crtc->config->has_pch_encoder) { 4894 intel_cpu_transcoder_set_m_n(intel_crtc, 4895 &intel_crtc->config->fdi_m_n, NULL); 4896 } 4897 4898 ironlake_set_pipeconf(crtc); 4899 4900 intel_crtc->active = true; 4901 4902 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4903 4904 for_each_encoder_on_crtc(dev, crtc, encoder) 4905 if (encoder->pre_enable) 4906 encoder->pre_enable(encoder); 4907 4908 if (intel_crtc->config->has_pch_encoder) { 4909 /* Note: FDI PLL enabling _must_ be done before we enable the 4910 * cpu pipes, hence this is separate from all the other fdi/pch 4911 * enabling. */ 4912 ironlake_fdi_pll_enable(intel_crtc); 4913 } else { 4914 assert_fdi_tx_disabled(dev_priv, pipe); 4915 assert_fdi_rx_disabled(dev_priv, pipe); 4916 } 4917 4918 ironlake_pfit_enable(intel_crtc); 4919 4920 /* 4921 * On ILK+ LUT must be loaded before the pipe is running but with 4922 * clocks enabled 4923 */ 4924 intel_crtc_load_lut(crtc); 4925 4926 intel_update_watermarks(crtc); 4927 intel_enable_pipe(intel_crtc); 4928 4929 if (intel_crtc->config->has_pch_encoder) 4930 ironlake_pch_enable(crtc); 4931 4932 assert_vblank_disabled(crtc); 4933 drm_crtc_vblank_on(crtc); 4934 4935 for_each_encoder_on_crtc(dev, crtc, encoder) 4936 encoder->enable(encoder); 4937 4938 if (HAS_PCH_CPT(dev)) 4939 cpt_verify_modeset(dev, intel_crtc->pipe); 4940 4941 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 4942 if (intel_crtc->config->has_pch_encoder) 4943 intel_wait_for_vblank(dev, pipe); 4944 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4945 4946 intel_fbc_enable(intel_crtc); 4947 } 4948 4949 /* IPS only exists on ULT machines and is tied to pipe A. */ 4950 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4951 { 4952 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4953 } 4954 4955 static void haswell_crtc_enable(struct drm_crtc *crtc) 4956 { 4957 struct drm_device *dev = crtc->dev; 4958 struct drm_i915_private *dev_priv = dev->dev_private; 4959 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4960 struct intel_encoder *encoder; 4961 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4962 struct intel_crtc_state *pipe_config = 4963 to_intel_crtc_state(crtc->state); 4964 4965 if (WARN_ON(intel_crtc->active)) 4966 return; 4967 4968 if (intel_crtc->config->has_pch_encoder) 4969 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4970 false); 4971 4972 if (intel_crtc_to_shared_dpll(intel_crtc)) 4973 intel_enable_shared_dpll(intel_crtc); 4974 4975 if (intel_crtc->config->has_dp_encoder) 4976 intel_dp_set_m_n(intel_crtc, M1_N1); 4977 4978 intel_set_pipe_timings(intel_crtc); 4979 4980 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 4981 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 4982 intel_crtc->config->pixel_multiplier - 1); 4983 } 4984 4985 if (intel_crtc->config->has_pch_encoder) { 4986 intel_cpu_transcoder_set_m_n(intel_crtc, 4987 &intel_crtc->config->fdi_m_n, NULL); 4988 } 4989 4990 haswell_set_pipeconf(crtc); 4991 4992 intel_set_pipe_csc(crtc); 4993 4994 intel_crtc->active = true; 4995 4996 if (intel_crtc->config->has_pch_encoder) 4997 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4998 else 4999 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5000 5001 for_each_encoder_on_crtc(dev, crtc, encoder) { 5002 if (encoder->pre_enable) 5003 encoder->pre_enable(encoder); 5004 } 5005 5006 if (intel_crtc->config->has_pch_encoder) 5007 dev_priv->display.fdi_link_train(crtc); 5008 5009 if (!intel_crtc->config->has_dsi_encoder) 5010 intel_ddi_enable_pipe_clock(intel_crtc); 5011 5012 if (INTEL_INFO(dev)->gen >= 9) 5013 skylake_pfit_enable(intel_crtc); 5014 else 5015 ironlake_pfit_enable(intel_crtc); 5016 5017 /* 5018 * On ILK+ LUT must be loaded before the pipe is running but with 5019 * clocks enabled 5020 */ 5021 intel_crtc_load_lut(crtc); 5022 5023 intel_ddi_set_pipe_settings(crtc); 5024 if (!intel_crtc->config->has_dsi_encoder) 5025 intel_ddi_enable_transcoder_func(crtc); 5026 5027 intel_update_watermarks(crtc); 5028 intel_enable_pipe(intel_crtc); 5029 5030 if (intel_crtc->config->has_pch_encoder) 5031 lpt_pch_enable(crtc); 5032 5033 if (intel_crtc->config->dp_encoder_is_mst) 5034 intel_ddi_set_vc_payload_alloc(crtc, true); 5035 5036 assert_vblank_disabled(crtc); 5037 drm_crtc_vblank_on(crtc); 5038 5039 for_each_encoder_on_crtc(dev, crtc, encoder) { 5040 encoder->enable(encoder); 5041 intel_opregion_notify_encoder(encoder, true); 5042 } 5043 5044 if (intel_crtc->config->has_pch_encoder) { 5045 intel_wait_for_vblank(dev, pipe); 5046 intel_wait_for_vblank(dev, pipe); 5047 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5048 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5049 true); 5050 } 5051 5052 /* If we change the relative order between pipe/planes enabling, we need 5053 * to change the workaround. */ 5054 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5055 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { 5056 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5057 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5058 } 5059 5060 intel_fbc_enable(intel_crtc); 5061 } 5062 5063 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5064 { 5065 struct drm_device *dev = crtc->base.dev; 5066 struct drm_i915_private *dev_priv = dev->dev_private; 5067 int pipe = crtc->pipe; 5068 5069 /* To avoid upsetting the power well on haswell only disable the pfit if 5070 * it's in use. The hw state code will make sure we get this right. */ 5071 if (force || crtc->config->pch_pfit.enabled) { 5072 I915_WRITE(PF_CTL(pipe), 0); 5073 I915_WRITE(PF_WIN_POS(pipe), 0); 5074 I915_WRITE(PF_WIN_SZ(pipe), 0); 5075 } 5076 } 5077 5078 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5079 { 5080 struct drm_device *dev = crtc->dev; 5081 struct drm_i915_private *dev_priv = dev->dev_private; 5082 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5083 struct intel_encoder *encoder; 5084 int pipe = intel_crtc->pipe; 5085 5086 if (intel_crtc->config->has_pch_encoder) 5087 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5088 5089 for_each_encoder_on_crtc(dev, crtc, encoder) 5090 encoder->disable(encoder); 5091 5092 drm_crtc_vblank_off(crtc); 5093 assert_vblank_disabled(crtc); 5094 5095 /* 5096 * Sometimes spurious CPU pipe underruns happen when the 5097 * pipe is already disabled, but FDI RX/TX is still enabled. 5098 * Happens at least with VGA+HDMI cloning. Suppress them. 5099 */ 5100 if (intel_crtc->config->has_pch_encoder) 5101 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5102 5103 intel_disable_pipe(intel_crtc); 5104 5105 ironlake_pfit_disable(intel_crtc, false); 5106 5107 if (intel_crtc->config->has_pch_encoder) { 5108 ironlake_fdi_disable(crtc); 5109 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5110 } 5111 5112 for_each_encoder_on_crtc(dev, crtc, encoder) 5113 if (encoder->post_disable) 5114 encoder->post_disable(encoder); 5115 5116 if (intel_crtc->config->has_pch_encoder) { 5117 ironlake_disable_pch_transcoder(dev_priv, pipe); 5118 5119 if (HAS_PCH_CPT(dev)) { 5120 i915_reg_t reg; 5121 u32 temp; 5122 5123 /* disable TRANS_DP_CTL */ 5124 reg = TRANS_DP_CTL(pipe); 5125 temp = I915_READ(reg); 5126 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5127 TRANS_DP_PORT_SEL_MASK); 5128 temp |= TRANS_DP_PORT_SEL_NONE; 5129 I915_WRITE(reg, temp); 5130 5131 /* disable DPLL_SEL */ 5132 temp = I915_READ(PCH_DPLL_SEL); 5133 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5134 I915_WRITE(PCH_DPLL_SEL, temp); 5135 } 5136 5137 ironlake_fdi_pll_disable(intel_crtc); 5138 } 5139 5140 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5141 5142 intel_fbc_disable_crtc(intel_crtc); 5143 } 5144 5145 static void haswell_crtc_disable(struct drm_crtc *crtc) 5146 { 5147 struct drm_device *dev = crtc->dev; 5148 struct drm_i915_private *dev_priv = dev->dev_private; 5149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5150 struct intel_encoder *encoder; 5151 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5152 5153 if (intel_crtc->config->has_pch_encoder) 5154 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5155 false); 5156 5157 for_each_encoder_on_crtc(dev, crtc, encoder) { 5158 intel_opregion_notify_encoder(encoder, false); 5159 encoder->disable(encoder); 5160 } 5161 5162 drm_crtc_vblank_off(crtc); 5163 assert_vblank_disabled(crtc); 5164 5165 intel_disable_pipe(intel_crtc); 5166 5167 if (intel_crtc->config->dp_encoder_is_mst) 5168 intel_ddi_set_vc_payload_alloc(crtc, false); 5169 5170 if (!intel_crtc->config->has_dsi_encoder) 5171 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5172 5173 if (INTEL_INFO(dev)->gen >= 9) 5174 skylake_scaler_disable(intel_crtc); 5175 else 5176 ironlake_pfit_disable(intel_crtc, false); 5177 5178 if (!intel_crtc->config->has_dsi_encoder) 5179 intel_ddi_disable_pipe_clock(intel_crtc); 5180 5181 for_each_encoder_on_crtc(dev, crtc, encoder) 5182 if (encoder->post_disable) 5183 encoder->post_disable(encoder); 5184 5185 if (intel_crtc->config->has_pch_encoder) { 5186 lpt_disable_pch_transcoder(dev_priv); 5187 lpt_disable_iclkip(dev_priv); 5188 intel_ddi_fdi_disable(crtc); 5189 5190 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5191 true); 5192 } 5193 5194 intel_fbc_disable_crtc(intel_crtc); 5195 } 5196 5197 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5198 { 5199 struct drm_device *dev = crtc->base.dev; 5200 struct drm_i915_private *dev_priv = dev->dev_private; 5201 struct intel_crtc_state *pipe_config = crtc->config; 5202 5203 if (!pipe_config->gmch_pfit.control) 5204 return; 5205 5206 /* 5207 * The panel fitter should only be adjusted whilst the pipe is disabled, 5208 * according to register description and PRM. 5209 */ 5210 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5211 assert_pipe_disabled(dev_priv, crtc->pipe); 5212 5213 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5214 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5215 5216 /* Border color in case we don't scale up to the full screen. Black by 5217 * default, change to something else for debugging. */ 5218 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5219 } 5220 5221 static enum intel_display_power_domain port_to_power_domain(enum port port) 5222 { 5223 switch (port) { 5224 case PORT_A: 5225 return POWER_DOMAIN_PORT_DDI_A_LANES; 5226 case PORT_B: 5227 return POWER_DOMAIN_PORT_DDI_B_LANES; 5228 case PORT_C: 5229 return POWER_DOMAIN_PORT_DDI_C_LANES; 5230 case PORT_D: 5231 return POWER_DOMAIN_PORT_DDI_D_LANES; 5232 case PORT_E: 5233 return POWER_DOMAIN_PORT_DDI_E_LANES; 5234 default: 5235 MISSING_CASE(port); 5236 return POWER_DOMAIN_PORT_OTHER; 5237 } 5238 } 5239 5240 static enum intel_display_power_domain port_to_aux_power_domain(enum port port) 5241 { 5242 switch (port) { 5243 case PORT_A: 5244 return POWER_DOMAIN_AUX_A; 5245 case PORT_B: 5246 return POWER_DOMAIN_AUX_B; 5247 case PORT_C: 5248 return POWER_DOMAIN_AUX_C; 5249 case PORT_D: 5250 return POWER_DOMAIN_AUX_D; 5251 case PORT_E: 5252 /* FIXME: Check VBT for actual wiring of PORT E */ 5253 return POWER_DOMAIN_AUX_D; 5254 default: 5255 MISSING_CASE(port); 5256 return POWER_DOMAIN_AUX_A; 5257 } 5258 } 5259 5260 enum intel_display_power_domain 5261 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5262 { 5263 struct drm_device *dev = intel_encoder->base.dev; 5264 struct intel_digital_port *intel_dig_port; 5265 5266 switch (intel_encoder->type) { 5267 case INTEL_OUTPUT_UNKNOWN: 5268 /* Only DDI platforms should ever use this output type */ 5269 WARN_ON_ONCE(!HAS_DDI(dev)); 5270 case INTEL_OUTPUT_DISPLAYPORT: 5271 case INTEL_OUTPUT_HDMI: 5272 case INTEL_OUTPUT_EDP: 5273 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5274 return port_to_power_domain(intel_dig_port->port); 5275 case INTEL_OUTPUT_DP_MST: 5276 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5277 return port_to_power_domain(intel_dig_port->port); 5278 case INTEL_OUTPUT_ANALOG: 5279 return POWER_DOMAIN_PORT_CRT; 5280 case INTEL_OUTPUT_DSI: 5281 return POWER_DOMAIN_PORT_DSI; 5282 default: 5283 return POWER_DOMAIN_PORT_OTHER; 5284 } 5285 } 5286 5287 enum intel_display_power_domain 5288 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) 5289 { 5290 struct drm_device *dev = intel_encoder->base.dev; 5291 struct intel_digital_port *intel_dig_port; 5292 5293 switch (intel_encoder->type) { 5294 case INTEL_OUTPUT_UNKNOWN: 5295 case INTEL_OUTPUT_HDMI: 5296 /* 5297 * Only DDI platforms should ever use these output types. 5298 * We can get here after the HDMI detect code has already set 5299 * the type of the shared encoder. Since we can't be sure 5300 * what's the status of the given connectors, play safe and 5301 * run the DP detection too. 5302 */ 5303 WARN_ON_ONCE(!HAS_DDI(dev)); 5304 case INTEL_OUTPUT_DISPLAYPORT: 5305 case INTEL_OUTPUT_EDP: 5306 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5307 return port_to_aux_power_domain(intel_dig_port->port); 5308 case INTEL_OUTPUT_DP_MST: 5309 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5310 return port_to_aux_power_domain(intel_dig_port->port); 5311 default: 5312 MISSING_CASE(intel_encoder->type); 5313 return POWER_DOMAIN_AUX_A; 5314 } 5315 } 5316 5317 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5318 { 5319 struct drm_device *dev = crtc->dev; 5320 struct intel_encoder *intel_encoder; 5321 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5322 enum i915_pipe pipe = intel_crtc->pipe; 5323 unsigned long mask; 5324 enum transcoder transcoder = intel_crtc->config->cpu_transcoder; 5325 5326 if (!crtc->state->active) 5327 return 0; 5328 5329 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5330 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5331 if (intel_crtc->config->pch_pfit.enabled || 5332 intel_crtc->config->pch_pfit.force_thru) 5333 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5334 5335 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5336 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5337 5338 return mask; 5339 } 5340 5341 static unsigned long modeset_get_crtc_power_domains(struct drm_crtc *crtc) 5342 { 5343 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5345 enum intel_display_power_domain domain; 5346 unsigned long domains, new_domains, old_domains; 5347 5348 old_domains = intel_crtc->enabled_power_domains; 5349 intel_crtc->enabled_power_domains = new_domains = get_crtc_power_domains(crtc); 5350 5351 domains = new_domains & ~old_domains; 5352 5353 for_each_power_domain(domain, domains) 5354 intel_display_power_get(dev_priv, domain); 5355 5356 return old_domains & ~new_domains; 5357 } 5358 5359 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5360 unsigned long domains) 5361 { 5362 enum intel_display_power_domain domain; 5363 5364 for_each_power_domain(domain, domains) 5365 intel_display_power_put(dev_priv, domain); 5366 } 5367 5368 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) 5369 { 5370 struct drm_device *dev = state->dev; 5371 struct drm_i915_private *dev_priv = dev->dev_private; 5372 unsigned long put_domains[I915_MAX_PIPES] = {}; 5373 struct drm_crtc_state *crtc_state; 5374 struct drm_crtc *crtc; 5375 int i; 5376 5377 for_each_crtc_in_state(state, crtc, crtc_state, i) { 5378 if (needs_modeset(crtc->state)) 5379 put_domains[to_intel_crtc(crtc)->pipe] = 5380 modeset_get_crtc_power_domains(crtc); 5381 } 5382 5383 if (dev_priv->display.modeset_commit_cdclk) { 5384 unsigned int cdclk = to_intel_atomic_state(state)->cdclk; 5385 5386 if (cdclk != dev_priv->cdclk_freq && 5387 !WARN_ON(!state->allow_modeset)) 5388 dev_priv->display.modeset_commit_cdclk(state); 5389 } 5390 5391 for (i = 0; i < I915_MAX_PIPES; i++) 5392 if (put_domains[i]) 5393 modeset_put_power_domains(dev_priv, put_domains[i]); 5394 } 5395 5396 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 5397 { 5398 int max_cdclk_freq = dev_priv->max_cdclk_freq; 5399 5400 if (INTEL_INFO(dev_priv)->gen >= 9 || 5401 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5402 return max_cdclk_freq; 5403 else if (IS_CHERRYVIEW(dev_priv)) 5404 return max_cdclk_freq*95/100; 5405 else if (INTEL_INFO(dev_priv)->gen < 4) 5406 return 2*max_cdclk_freq*90/100; 5407 else 5408 return max_cdclk_freq*90/100; 5409 } 5410 5411 static void intel_update_max_cdclk(struct drm_device *dev) 5412 { 5413 struct drm_i915_private *dev_priv = dev->dev_private; 5414 5415 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5416 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5417 5418 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5419 dev_priv->max_cdclk_freq = 675000; 5420 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5421 dev_priv->max_cdclk_freq = 540000; 5422 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5423 dev_priv->max_cdclk_freq = 450000; 5424 else 5425 dev_priv->max_cdclk_freq = 337500; 5426 } else if (IS_BROADWELL(dev)) { 5427 /* 5428 * FIXME with extra cooling we can allow 5429 * 540 MHz for ULX and 675 Mhz for ULT. 5430 * How can we know if extra cooling is 5431 * available? PCI ID, VTB, something else? 5432 */ 5433 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 5434 dev_priv->max_cdclk_freq = 450000; 5435 else if (IS_BDW_ULX(dev)) 5436 dev_priv->max_cdclk_freq = 450000; 5437 else if (IS_BDW_ULT(dev)) 5438 dev_priv->max_cdclk_freq = 540000; 5439 else 5440 dev_priv->max_cdclk_freq = 675000; 5441 } else if (IS_CHERRYVIEW(dev)) { 5442 dev_priv->max_cdclk_freq = 320000; 5443 } else if (IS_VALLEYVIEW(dev)) { 5444 dev_priv->max_cdclk_freq = 400000; 5445 } else { 5446 /* otherwise assume cdclk is fixed */ 5447 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5448 } 5449 5450 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 5451 5452 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5453 dev_priv->max_cdclk_freq); 5454 5455 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", 5456 dev_priv->max_dotclk_freq); 5457 } 5458 5459 static void intel_update_cdclk(struct drm_device *dev) 5460 { 5461 struct drm_i915_private *dev_priv = dev->dev_private; 5462 5463 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5464 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5465 dev_priv->cdclk_freq); 5466 5467 /* 5468 * Program the gmbus_freq based on the cdclk frequency. 5469 * BSpec erroneously claims we should aim for 4MHz, but 5470 * in fact 1MHz is the correct frequency. 5471 */ 5472 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5473 /* 5474 * Program the gmbus_freq based on the cdclk frequency. 5475 * BSpec erroneously claims we should aim for 4MHz, but 5476 * in fact 1MHz is the correct frequency. 5477 */ 5478 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5479 } 5480 5481 if (dev_priv->max_cdclk_freq == 0) 5482 intel_update_max_cdclk(dev); 5483 } 5484 5485 static void broxton_set_cdclk(struct drm_device *dev, int frequency) 5486 { 5487 struct drm_i915_private *dev_priv = dev->dev_private; 5488 uint32_t divider; 5489 uint32_t ratio; 5490 uint32_t current_freq; 5491 int ret; 5492 5493 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5494 switch (frequency) { 5495 case 144000: 5496 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5497 ratio = BXT_DE_PLL_RATIO(60); 5498 break; 5499 case 288000: 5500 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5501 ratio = BXT_DE_PLL_RATIO(60); 5502 break; 5503 case 384000: 5504 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5505 ratio = BXT_DE_PLL_RATIO(60); 5506 break; 5507 case 576000: 5508 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5509 ratio = BXT_DE_PLL_RATIO(60); 5510 break; 5511 case 624000: 5512 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5513 ratio = BXT_DE_PLL_RATIO(65); 5514 break; 5515 case 19200: 5516 /* 5517 * Bypass frequency with DE PLL disabled. Init ratio, divider 5518 * to suppress GCC warning. 5519 */ 5520 ratio = 0; 5521 divider = 0; 5522 break; 5523 default: 5524 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5525 5526 return; 5527 } 5528 5529 mutex_lock(&dev_priv->rps.hw_lock); 5530 /* Inform power controller of upcoming frequency change */ 5531 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5532 0x80000000); 5533 mutex_unlock(&dev_priv->rps.hw_lock); 5534 5535 if (ret) { 5536 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5537 ret, frequency); 5538 return; 5539 } 5540 5541 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5542 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5543 current_freq = current_freq * 500 + 1000; 5544 5545 /* 5546 * DE PLL has to be disabled when 5547 * - setting to 19.2MHz (bypass, PLL isn't used) 5548 * - before setting to 624MHz (PLL needs toggling) 5549 * - before setting to any frequency from 624MHz (PLL needs toggling) 5550 */ 5551 if (frequency == 19200 || frequency == 624000 || 5552 current_freq == 624000) { 5553 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5554 /* Timeout 200us */ 5555 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5556 1)) 5557 DRM_ERROR("timout waiting for DE PLL unlock\n"); 5558 } 5559 5560 if (frequency != 19200) { 5561 uint32_t val; 5562 5563 val = I915_READ(BXT_DE_PLL_CTL); 5564 val &= ~BXT_DE_PLL_RATIO_MASK; 5565 val |= ratio; 5566 I915_WRITE(BXT_DE_PLL_CTL, val); 5567 5568 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5569 /* Timeout 200us */ 5570 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) 5571 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5572 5573 val = I915_READ(CDCLK_CTL); 5574 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; 5575 val |= divider; 5576 /* 5577 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5578 * enable otherwise. 5579 */ 5580 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5581 if (frequency >= 500000) 5582 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5583 5584 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5585 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5586 val |= (frequency - 1000) / 500; 5587 I915_WRITE(CDCLK_CTL, val); 5588 } 5589 5590 mutex_lock(&dev_priv->rps.hw_lock); 5591 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5592 DIV_ROUND_UP(frequency, 25000)); 5593 mutex_unlock(&dev_priv->rps.hw_lock); 5594 5595 if (ret) { 5596 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5597 ret, frequency); 5598 return; 5599 } 5600 5601 intel_update_cdclk(dev); 5602 } 5603 5604 void broxton_init_cdclk(struct drm_device *dev) 5605 { 5606 struct drm_i915_private *dev_priv = dev->dev_private; 5607 uint32_t val; 5608 5609 /* 5610 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5611 * or else the reset will hang because there is no PCH to respond. 5612 * Move the handshake programming to initialization sequence. 5613 * Previously was left up to BIOS. 5614 */ 5615 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5616 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 5617 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 5618 5619 /* Enable PG1 for cdclk */ 5620 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5621 5622 /* check if cd clock is enabled */ 5623 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) { 5624 DRM_DEBUG_KMS("Display already initialized\n"); 5625 return; 5626 } 5627 5628 /* 5629 * FIXME: 5630 * - The initial CDCLK needs to be read from VBT. 5631 * Need to make this change after VBT has changes for BXT. 5632 * - check if setting the max (or any) cdclk freq is really necessary 5633 * here, it belongs to modeset time 5634 */ 5635 broxton_set_cdclk(dev, 624000); 5636 5637 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5638 POSTING_READ(DBUF_CTL); 5639 5640 udelay(10); 5641 5642 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5643 DRM_ERROR("DBuf power enable timeout!\n"); 5644 } 5645 5646 void broxton_uninit_cdclk(struct drm_device *dev) 5647 { 5648 struct drm_i915_private *dev_priv = dev->dev_private; 5649 5650 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5651 POSTING_READ(DBUF_CTL); 5652 5653 udelay(10); 5654 5655 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5656 DRM_ERROR("DBuf power disable timeout!\n"); 5657 5658 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5659 broxton_set_cdclk(dev, 19200); 5660 5661 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5662 } 5663 5664 static const struct skl_cdclk_entry { 5665 unsigned int freq; 5666 unsigned int vco; 5667 } skl_cdclk_frequencies[] = { 5668 { .freq = 308570, .vco = 8640 }, 5669 { .freq = 337500, .vco = 8100 }, 5670 { .freq = 432000, .vco = 8640 }, 5671 { .freq = 450000, .vco = 8100 }, 5672 { .freq = 540000, .vco = 8100 }, 5673 { .freq = 617140, .vco = 8640 }, 5674 { .freq = 675000, .vco = 8100 }, 5675 }; 5676 5677 static unsigned int skl_cdclk_decimal(unsigned int freq) 5678 { 5679 return (freq - 1000) / 500; 5680 } 5681 5682 static unsigned int skl_cdclk_get_vco(unsigned int freq) 5683 { 5684 unsigned int i; 5685 5686 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5687 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; 5688 5689 if (e->freq == freq) 5690 return e->vco; 5691 } 5692 5693 return 8100; 5694 } 5695 5696 static void 5697 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5698 { 5699 unsigned int min_freq; 5700 u32 val; 5701 5702 /* select the minimum CDCLK before enabling DPLL 0 */ 5703 val = I915_READ(CDCLK_CTL); 5704 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; 5705 val |= CDCLK_FREQ_337_308; 5706 5707 if (required_vco == 8640) 5708 min_freq = 308570; 5709 else 5710 min_freq = 337500; 5711 5712 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); 5713 5714 I915_WRITE(CDCLK_CTL, val); 5715 POSTING_READ(CDCLK_CTL); 5716 5717 /* 5718 * We always enable DPLL0 with the lowest link rate possible, but still 5719 * taking into account the VCO required to operate the eDP panel at the 5720 * desired frequency. The usual DP link rates operate with a VCO of 5721 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5722 * The modeset code is responsible for the selection of the exact link 5723 * rate later on, with the constraint of choosing a frequency that 5724 * works with required_vco. 5725 */ 5726 val = I915_READ(DPLL_CTRL1); 5727 5728 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5729 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5730 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5731 if (required_vco == 8640) 5732 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5733 SKL_DPLL0); 5734 else 5735 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5736 SKL_DPLL0); 5737 5738 I915_WRITE(DPLL_CTRL1, val); 5739 POSTING_READ(DPLL_CTRL1); 5740 5741 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5742 5743 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5744 DRM_ERROR("DPLL0 not locked\n"); 5745 } 5746 5747 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5748 { 5749 int ret; 5750 u32 val; 5751 5752 /* inform PCU we want to change CDCLK */ 5753 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5754 mutex_lock(&dev_priv->rps.hw_lock); 5755 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5756 mutex_unlock(&dev_priv->rps.hw_lock); 5757 5758 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5759 } 5760 5761 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5762 { 5763 unsigned int i; 5764 5765 for (i = 0; i < 15; i++) { 5766 if (skl_cdclk_pcu_ready(dev_priv)) 5767 return true; 5768 udelay(10); 5769 } 5770 5771 return false; 5772 } 5773 5774 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5775 { 5776 struct drm_device *dev = dev_priv->dev; 5777 u32 freq_select, pcu_ack; 5778 5779 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5780 5781 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5782 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5783 return; 5784 } 5785 5786 /* set CDCLK_CTL */ 5787 switch(freq) { 5788 case 450000: 5789 case 432000: 5790 freq_select = CDCLK_FREQ_450_432; 5791 pcu_ack = 1; 5792 break; 5793 case 540000: 5794 freq_select = CDCLK_FREQ_540; 5795 pcu_ack = 2; 5796 break; 5797 case 308570: 5798 case 337500: 5799 default: 5800 freq_select = CDCLK_FREQ_337_308; 5801 pcu_ack = 0; 5802 break; 5803 case 617140: 5804 case 675000: 5805 freq_select = CDCLK_FREQ_675_617; 5806 pcu_ack = 3; 5807 break; 5808 } 5809 5810 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5811 POSTING_READ(CDCLK_CTL); 5812 5813 /* inform PCU of the change */ 5814 mutex_lock(&dev_priv->rps.hw_lock); 5815 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5816 mutex_unlock(&dev_priv->rps.hw_lock); 5817 5818 intel_update_cdclk(dev); 5819 } 5820 5821 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5822 { 5823 /* disable DBUF power */ 5824 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5825 POSTING_READ(DBUF_CTL); 5826 5827 udelay(10); 5828 5829 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5830 DRM_ERROR("DBuf power disable timeout\n"); 5831 5832 /* disable DPLL0 */ 5833 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5834 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5835 DRM_ERROR("Couldn't disable DPLL0\n"); 5836 } 5837 5838 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5839 { 5840 unsigned int required_vco; 5841 5842 /* DPLL0 not enabled (happens on early BIOS versions) */ 5843 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5844 /* enable DPLL0 */ 5845 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); 5846 skl_dpll0_enable(dev_priv, required_vco); 5847 } 5848 5849 /* set CDCLK to the frequency the BIOS chose */ 5850 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5851 5852 /* enable DBUF power */ 5853 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5854 POSTING_READ(DBUF_CTL); 5855 5856 udelay(10); 5857 5858 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5859 DRM_ERROR("DBuf power enable timeout\n"); 5860 } 5861 5862 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5863 { 5864 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5865 uint32_t cdctl = I915_READ(CDCLK_CTL); 5866 int freq = dev_priv->skl_boot_cdclk; 5867 5868 /* 5869 * check if the pre-os intialized the display 5870 * There is SWF18 scratchpad register defined which is set by the 5871 * pre-os which can be used by the OS drivers to check the status 5872 */ 5873 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5874 goto sanitize; 5875 5876 /* Is PLL enabled and locked ? */ 5877 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5878 goto sanitize; 5879 5880 /* DPLL okay; verify the cdclock 5881 * 5882 * Noticed in some instances that the freq selection is correct but 5883 * decimal part is programmed wrong from BIOS where pre-os does not 5884 * enable display. Verify the same as well. 5885 */ 5886 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5887 /* All well; nothing to sanitize */ 5888 return false; 5889 sanitize: 5890 /* 5891 * As of now initialize with max cdclk till 5892 * we get dynamic cdclk support 5893 * */ 5894 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; 5895 skl_init_cdclk(dev_priv); 5896 5897 /* we did have to sanitize */ 5898 return true; 5899 } 5900 5901 /* Adjust CDclk dividers to allow high res or save power if possible */ 5902 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5903 { 5904 struct drm_i915_private *dev_priv = dev->dev_private; 5905 u32 val, cmd; 5906 5907 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5908 != dev_priv->cdclk_freq); 5909 5910 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5911 cmd = 2; 5912 else if (cdclk == 266667) 5913 cmd = 1; 5914 else 5915 cmd = 0; 5916 5917 mutex_lock(&dev_priv->rps.hw_lock); 5918 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5919 val &= ~DSPFREQGUAR_MASK; 5920 val |= (cmd << DSPFREQGUAR_SHIFT); 5921 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5922 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5923 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5924 50)) { 5925 DRM_ERROR("timed out waiting for CDclk change\n"); 5926 } 5927 mutex_unlock(&dev_priv->rps.hw_lock); 5928 5929 mutex_lock(&dev_priv->sb_lock); 5930 5931 if (cdclk == 400000) { 5932 u32 divider; 5933 5934 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5935 5936 /* adjust cdclk divider */ 5937 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5938 val &= ~CCK_FREQUENCY_VALUES; 5939 val |= divider; 5940 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5941 5942 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5943 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 5944 50)) 5945 DRM_ERROR("timed out waiting for CDclk change\n"); 5946 } 5947 5948 /* adjust self-refresh exit latency value */ 5949 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5950 val &= ~0x7f; 5951 5952 /* 5953 * For high bandwidth configs, we set a higher latency in the bunit 5954 * so that the core display fetch happens in time to avoid underruns. 5955 */ 5956 if (cdclk == 400000) 5957 val |= 4500 / 250; /* 4.5 usec */ 5958 else 5959 val |= 3000 / 250; /* 3.0 usec */ 5960 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5961 5962 mutex_unlock(&dev_priv->sb_lock); 5963 5964 intel_update_cdclk(dev); 5965 } 5966 5967 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5968 { 5969 struct drm_i915_private *dev_priv = dev->dev_private; 5970 u32 val, cmd; 5971 5972 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5973 != dev_priv->cdclk_freq); 5974 5975 switch (cdclk) { 5976 case 333333: 5977 case 320000: 5978 case 266667: 5979 case 200000: 5980 break; 5981 default: 5982 MISSING_CASE(cdclk); 5983 return; 5984 } 5985 5986 /* 5987 * Specs are full of misinformation, but testing on actual 5988 * hardware has shown that we just need to write the desired 5989 * CCK divider into the Punit register. 5990 */ 5991 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5992 5993 mutex_lock(&dev_priv->rps.hw_lock); 5994 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5995 val &= ~DSPFREQGUAR_MASK_CHV; 5996 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 5997 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5998 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5999 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 6000 50)) { 6001 DRM_ERROR("timed out waiting for CDclk change\n"); 6002 } 6003 mutex_unlock(&dev_priv->rps.hw_lock); 6004 6005 intel_update_cdclk(dev); 6006 } 6007 6008 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 6009 int max_pixclk) 6010 { 6011 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 6012 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 6013 6014 /* 6015 * Really only a few cases to deal with, as only 4 CDclks are supported: 6016 * 200MHz 6017 * 267MHz 6018 * 320/333MHz (depends on HPLL freq) 6019 * 400MHz (VLV only) 6020 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 6021 * of the lower bin and adjust if needed. 6022 * 6023 * We seem to get an unstable or solid color picture at 200MHz. 6024 * Not sure what's wrong. For now use 200MHz only when all pipes 6025 * are off. 6026 */ 6027 if (!IS_CHERRYVIEW(dev_priv) && 6028 max_pixclk > freq_320*limit/100) 6029 return 400000; 6030 else if (max_pixclk > 266667*limit/100) 6031 return freq_320; 6032 else if (max_pixclk > 0) 6033 return 266667; 6034 else 6035 return 200000; 6036 } 6037 6038 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 6039 int max_pixclk) 6040 { 6041 /* 6042 * FIXME: 6043 * - remove the guardband, it's not needed on BXT 6044 * - set 19.2MHz bypass frequency if there are no active pipes 6045 */ 6046 if (max_pixclk > 576000*9/10) 6047 return 624000; 6048 else if (max_pixclk > 384000*9/10) 6049 return 576000; 6050 else if (max_pixclk > 288000*9/10) 6051 return 384000; 6052 else if (max_pixclk > 144000*9/10) 6053 return 288000; 6054 else 6055 return 144000; 6056 } 6057 6058 /* Compute the max pixel clock for new configuration. Uses atomic state if 6059 * that's non-NULL, look at current state otherwise. */ 6060 static int intel_mode_max_pixclk(struct drm_device *dev, 6061 struct drm_atomic_state *state) 6062 { 6063 struct intel_crtc *intel_crtc; 6064 struct intel_crtc_state *crtc_state; 6065 int max_pixclk = 0; 6066 6067 for_each_intel_crtc(dev, intel_crtc) { 6068 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 6069 if (IS_ERR(crtc_state)) 6070 return PTR_ERR(crtc_state); 6071 6072 if (!crtc_state->base.enable) 6073 continue; 6074 6075 max_pixclk = max(max_pixclk, 6076 crtc_state->base.adjusted_mode.crtc_clock); 6077 } 6078 6079 return max_pixclk; 6080 } 6081 6082 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6083 { 6084 struct drm_device *dev = state->dev; 6085 struct drm_i915_private *dev_priv = dev->dev_private; 6086 int max_pixclk = intel_mode_max_pixclk(dev, state); 6087 6088 if (max_pixclk < 0) 6089 return max_pixclk; 6090 6091 to_intel_atomic_state(state)->cdclk = 6092 valleyview_calc_cdclk(dev_priv, max_pixclk); 6093 6094 return 0; 6095 } 6096 6097 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 6098 { 6099 struct drm_device *dev = state->dev; 6100 struct drm_i915_private *dev_priv = dev->dev_private; 6101 int max_pixclk = intel_mode_max_pixclk(dev, state); 6102 6103 if (max_pixclk < 0) 6104 return max_pixclk; 6105 6106 to_intel_atomic_state(state)->cdclk = 6107 broxton_calc_cdclk(dev_priv, max_pixclk); 6108 6109 return 0; 6110 } 6111 6112 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 6113 { 6114 unsigned int credits, default_credits; 6115 6116 if (IS_CHERRYVIEW(dev_priv)) 6117 default_credits = PFI_CREDIT(12); 6118 else 6119 default_credits = PFI_CREDIT(8); 6120 6121 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) { 6122 /* CHV suggested value is 31 or 63 */ 6123 if (IS_CHERRYVIEW(dev_priv)) 6124 credits = PFI_CREDIT_63; 6125 else 6126 credits = PFI_CREDIT(15); 6127 } else { 6128 credits = default_credits; 6129 } 6130 6131 /* 6132 * WA - write default credits before re-programming 6133 * FIXME: should we also set the resend bit here? 6134 */ 6135 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6136 default_credits); 6137 6138 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6139 credits | PFI_CREDIT_RESEND); 6140 6141 /* 6142 * FIXME is this guaranteed to clear 6143 * immediately or should we poll for it? 6144 */ 6145 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6146 } 6147 6148 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6149 { 6150 struct drm_device *dev = old_state->dev; 6151 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 6152 struct drm_i915_private *dev_priv = dev->dev_private; 6153 6154 /* 6155 * FIXME: We can end up here with all power domains off, yet 6156 * with a CDCLK frequency other than the minimum. To account 6157 * for this take the PIPE-A power domain, which covers the HW 6158 * blocks needed for the following programming. This can be 6159 * removed once it's guaranteed that we get here either with 6160 * the minimum CDCLK set, or the required power domains 6161 * enabled. 6162 */ 6163 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6164 6165 if (IS_CHERRYVIEW(dev)) 6166 cherryview_set_cdclk(dev, req_cdclk); 6167 else 6168 valleyview_set_cdclk(dev, req_cdclk); 6169 6170 vlv_program_pfi_credits(dev_priv); 6171 6172 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6173 } 6174 6175 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6176 { 6177 struct drm_device *dev = crtc->dev; 6178 struct drm_i915_private *dev_priv = to_i915(dev); 6179 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6180 struct intel_encoder *encoder; 6181 int pipe = intel_crtc->pipe; 6182 6183 if (WARN_ON(intel_crtc->active)) 6184 return; 6185 6186 if (intel_crtc->config->has_dp_encoder) 6187 intel_dp_set_m_n(intel_crtc, M1_N1); 6188 6189 intel_set_pipe_timings(intel_crtc); 6190 6191 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6192 struct drm_i915_private *dev_priv = dev->dev_private; 6193 6194 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6195 I915_WRITE(CHV_CANVAS(pipe), 0); 6196 } 6197 6198 i9xx_set_pipeconf(intel_crtc); 6199 6200 intel_crtc->active = true; 6201 6202 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6203 6204 for_each_encoder_on_crtc(dev, crtc, encoder) 6205 if (encoder->pre_pll_enable) 6206 encoder->pre_pll_enable(encoder); 6207 6208 if (!intel_crtc->config->has_dsi_encoder) { 6209 if (IS_CHERRYVIEW(dev)) { 6210 chv_prepare_pll(intel_crtc, intel_crtc->config); 6211 chv_enable_pll(intel_crtc, intel_crtc->config); 6212 } else { 6213 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6214 vlv_enable_pll(intel_crtc, intel_crtc->config); 6215 } 6216 } 6217 6218 for_each_encoder_on_crtc(dev, crtc, encoder) 6219 if (encoder->pre_enable) 6220 encoder->pre_enable(encoder); 6221 6222 i9xx_pfit_enable(intel_crtc); 6223 6224 intel_crtc_load_lut(crtc); 6225 6226 intel_enable_pipe(intel_crtc); 6227 6228 assert_vblank_disabled(crtc); 6229 drm_crtc_vblank_on(crtc); 6230 6231 for_each_encoder_on_crtc(dev, crtc, encoder) 6232 encoder->enable(encoder); 6233 } 6234 6235 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6236 { 6237 struct drm_device *dev = crtc->base.dev; 6238 struct drm_i915_private *dev_priv = dev->dev_private; 6239 6240 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6241 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6242 } 6243 6244 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6245 { 6246 struct drm_device *dev = crtc->dev; 6247 struct drm_i915_private *dev_priv = to_i915(dev); 6248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6249 struct intel_encoder *encoder; 6250 int pipe = intel_crtc->pipe; 6251 6252 if (WARN_ON(intel_crtc->active)) 6253 return; 6254 6255 i9xx_set_pll_dividers(intel_crtc); 6256 6257 if (intel_crtc->config->has_dp_encoder) 6258 intel_dp_set_m_n(intel_crtc, M1_N1); 6259 6260 intel_set_pipe_timings(intel_crtc); 6261 6262 i9xx_set_pipeconf(intel_crtc); 6263 6264 intel_crtc->active = true; 6265 6266 if (!IS_GEN2(dev)) 6267 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6268 6269 for_each_encoder_on_crtc(dev, crtc, encoder) 6270 if (encoder->pre_enable) 6271 encoder->pre_enable(encoder); 6272 6273 i9xx_enable_pll(intel_crtc); 6274 6275 i9xx_pfit_enable(intel_crtc); 6276 6277 intel_crtc_load_lut(crtc); 6278 6279 intel_update_watermarks(crtc); 6280 intel_enable_pipe(intel_crtc); 6281 6282 assert_vblank_disabled(crtc); 6283 drm_crtc_vblank_on(crtc); 6284 6285 for_each_encoder_on_crtc(dev, crtc, encoder) 6286 encoder->enable(encoder); 6287 6288 intel_fbc_enable(intel_crtc); 6289 } 6290 6291 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6292 { 6293 struct drm_device *dev = crtc->base.dev; 6294 struct drm_i915_private *dev_priv = dev->dev_private; 6295 6296 if (!crtc->config->gmch_pfit.control) 6297 return; 6298 6299 assert_pipe_disabled(dev_priv, crtc->pipe); 6300 6301 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6302 I915_READ(PFIT_CONTROL)); 6303 I915_WRITE(PFIT_CONTROL, 0); 6304 } 6305 6306 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6307 { 6308 struct drm_device *dev = crtc->dev; 6309 struct drm_i915_private *dev_priv = dev->dev_private; 6310 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6311 struct intel_encoder *encoder; 6312 int pipe = intel_crtc->pipe; 6313 6314 /* 6315 * On gen2 planes are double buffered but the pipe isn't, so we must 6316 * wait for planes to fully turn off before disabling the pipe. 6317 * We also need to wait on all gmch platforms because of the 6318 * self-refresh mode constraint explained above. 6319 */ 6320 intel_wait_for_vblank(dev, pipe); 6321 6322 for_each_encoder_on_crtc(dev, crtc, encoder) 6323 encoder->disable(encoder); 6324 6325 drm_crtc_vblank_off(crtc); 6326 assert_vblank_disabled(crtc); 6327 6328 intel_disable_pipe(intel_crtc); 6329 6330 i9xx_pfit_disable(intel_crtc); 6331 6332 for_each_encoder_on_crtc(dev, crtc, encoder) 6333 if (encoder->post_disable) 6334 encoder->post_disable(encoder); 6335 6336 if (!intel_crtc->config->has_dsi_encoder) { 6337 if (IS_CHERRYVIEW(dev)) 6338 chv_disable_pll(dev_priv, pipe); 6339 else if (IS_VALLEYVIEW(dev)) 6340 vlv_disable_pll(dev_priv, pipe); 6341 else 6342 i9xx_disable_pll(intel_crtc); 6343 } 6344 6345 for_each_encoder_on_crtc(dev, crtc, encoder) 6346 if (encoder->post_pll_disable) 6347 encoder->post_pll_disable(encoder); 6348 6349 if (!IS_GEN2(dev)) 6350 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6351 6352 intel_fbc_disable_crtc(intel_crtc); 6353 } 6354 6355 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6356 { 6357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6358 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6359 enum intel_display_power_domain domain; 6360 unsigned long domains; 6361 6362 if (!intel_crtc->active) 6363 return; 6364 6365 if (to_intel_plane_state(crtc->primary->state)->visible) { 6366 WARN_ON(intel_crtc->unpin_work); 6367 6368 intel_pre_disable_primary(crtc); 6369 6370 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 6371 to_intel_plane_state(crtc->primary->state)->visible = false; 6372 } 6373 6374 dev_priv->display.crtc_disable(crtc); 6375 intel_crtc->active = false; 6376 intel_update_watermarks(crtc); 6377 intel_disable_shared_dpll(intel_crtc); 6378 6379 domains = intel_crtc->enabled_power_domains; 6380 for_each_power_domain(domain, domains) 6381 intel_display_power_put(dev_priv, domain); 6382 intel_crtc->enabled_power_domains = 0; 6383 } 6384 6385 /* 6386 * turn all crtc's off, but do not adjust state 6387 * This has to be paired with a call to intel_modeset_setup_hw_state. 6388 */ 6389 int intel_display_suspend(struct drm_device *dev) 6390 { 6391 struct drm_mode_config *config = &dev->mode_config; 6392 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; 6393 struct drm_atomic_state *state; 6394 struct drm_crtc *crtc; 6395 unsigned crtc_mask = 0; 6396 int ret = 0; 6397 6398 if (WARN_ON(!ctx)) 6399 return 0; 6400 6401 #if 0 6402 lockdep_assert_held(&ctx->ww_ctx); 6403 #endif 6404 state = drm_atomic_state_alloc(dev); 6405 if (WARN_ON(!state)) 6406 return -ENOMEM; 6407 6408 state->acquire_ctx = ctx; 6409 state->allow_modeset = true; 6410 6411 for_each_crtc(dev, crtc) { 6412 struct drm_crtc_state *crtc_state = 6413 drm_atomic_get_crtc_state(state, crtc); 6414 6415 ret = PTR_ERR_OR_ZERO(crtc_state); 6416 if (ret) 6417 goto free; 6418 6419 if (!crtc_state->active) 6420 continue; 6421 6422 crtc_state->active = false; 6423 crtc_mask |= 1 << drm_crtc_index(crtc); 6424 } 6425 6426 if (crtc_mask) { 6427 ret = drm_atomic_commit(state); 6428 6429 if (!ret) { 6430 for_each_crtc(dev, crtc) 6431 if (crtc_mask & (1 << drm_crtc_index(crtc))) 6432 crtc->state->active = true; 6433 6434 return ret; 6435 } 6436 } 6437 6438 free: 6439 if (ret) 6440 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6441 drm_atomic_state_free(state); 6442 return ret; 6443 } 6444 6445 void intel_encoder_destroy(struct drm_encoder *encoder) 6446 { 6447 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6448 6449 drm_encoder_cleanup(encoder); 6450 kfree(intel_encoder); 6451 } 6452 6453 /* Cross check the actual hw state with our own modeset state tracking (and it's 6454 * internal consistency). */ 6455 static void intel_connector_check_state(struct intel_connector *connector) 6456 { 6457 struct drm_crtc *crtc = connector->base.state->crtc; 6458 6459 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6460 connector->base.base.id, 6461 connector->base.name); 6462 6463 if (connector->get_hw_state(connector)) { 6464 struct intel_encoder *encoder = connector->encoder; 6465 struct drm_connector_state *conn_state = connector->base.state; 6466 6467 I915_STATE_WARN(!crtc, 6468 "connector enabled without attached crtc\n"); 6469 6470 if (!crtc) 6471 return; 6472 6473 I915_STATE_WARN(!crtc->state->active, 6474 "connector is active, but attached crtc isn't\n"); 6475 6476 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6477 return; 6478 6479 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6480 "atomic encoder doesn't match attached encoder\n"); 6481 6482 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6483 "attached encoder crtc differs from connector crtc\n"); 6484 } else { 6485 I915_STATE_WARN(crtc && crtc->state->active, 6486 "attached crtc is active, but connector isn't\n"); 6487 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 6488 "best encoder set without crtc!\n"); 6489 } 6490 } 6491 6492 int intel_connector_init(struct intel_connector *connector) 6493 { 6494 drm_atomic_helper_connector_reset(&connector->base); 6495 6496 if (!connector->base.state) 6497 return -ENOMEM; 6498 6499 return 0; 6500 } 6501 6502 struct intel_connector *intel_connector_alloc(void) 6503 { 6504 struct intel_connector *connector; 6505 6506 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6507 if (!connector) 6508 return NULL; 6509 6510 if (intel_connector_init(connector) < 0) { 6511 kfree(connector); 6512 return NULL; 6513 } 6514 6515 return connector; 6516 } 6517 6518 /* Simple connector->get_hw_state implementation for encoders that support only 6519 * one connector and no cloning and hence the encoder state determines the state 6520 * of the connector. */ 6521 bool intel_connector_get_hw_state(struct intel_connector *connector) 6522 { 6523 enum i915_pipe pipe = 0; 6524 struct intel_encoder *encoder = connector->encoder; 6525 6526 return encoder->get_hw_state(encoder, &pipe); 6527 } 6528 6529 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6530 { 6531 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6532 return crtc_state->fdi_lanes; 6533 6534 return 0; 6535 } 6536 6537 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6538 struct intel_crtc_state *pipe_config) 6539 { 6540 struct drm_atomic_state *state = pipe_config->base.state; 6541 struct intel_crtc *other_crtc; 6542 struct intel_crtc_state *other_crtc_state; 6543 6544 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6545 pipe_name(pipe), pipe_config->fdi_lanes); 6546 if (pipe_config->fdi_lanes > 4) { 6547 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6548 pipe_name(pipe), pipe_config->fdi_lanes); 6549 return -EINVAL; 6550 } 6551 6552 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6553 if (pipe_config->fdi_lanes > 2) { 6554 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6555 pipe_config->fdi_lanes); 6556 return -EINVAL; 6557 } else { 6558 return 0; 6559 } 6560 } 6561 6562 if (INTEL_INFO(dev)->num_pipes == 2) 6563 return 0; 6564 6565 /* Ivybridge 3 pipe is really complicated */ 6566 switch (pipe) { 6567 case PIPE_A: 6568 return 0; 6569 case PIPE_B: 6570 if (pipe_config->fdi_lanes <= 2) 6571 return 0; 6572 6573 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6574 other_crtc_state = 6575 intel_atomic_get_crtc_state(state, other_crtc); 6576 if (IS_ERR(other_crtc_state)) 6577 return PTR_ERR(other_crtc_state); 6578 6579 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6580 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6581 pipe_name(pipe), pipe_config->fdi_lanes); 6582 return -EINVAL; 6583 } 6584 return 0; 6585 case PIPE_C: 6586 if (pipe_config->fdi_lanes > 2) { 6587 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6588 pipe_name(pipe), pipe_config->fdi_lanes); 6589 return -EINVAL; 6590 } 6591 6592 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6593 other_crtc_state = 6594 intel_atomic_get_crtc_state(state, other_crtc); 6595 if (IS_ERR(other_crtc_state)) 6596 return PTR_ERR(other_crtc_state); 6597 6598 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6599 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6600 return -EINVAL; 6601 } 6602 return 0; 6603 default: 6604 BUG(); 6605 } 6606 } 6607 6608 #define RETRY 1 6609 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6610 struct intel_crtc_state *pipe_config) 6611 { 6612 struct drm_device *dev = intel_crtc->base.dev; 6613 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6614 int lane, link_bw, fdi_dotclock, ret; 6615 bool needs_recompute = false; 6616 6617 retry: 6618 /* FDI is a binary signal running at ~2.7GHz, encoding 6619 * each output octet as 10 bits. The actual frequency 6620 * is stored as a divider into a 100MHz clock, and the 6621 * mode pixel clock is stored in units of 1KHz. 6622 * Hence the bw of each lane in terms of the mode signal 6623 * is: 6624 */ 6625 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 6626 6627 fdi_dotclock = adjusted_mode->crtc_clock; 6628 6629 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6630 pipe_config->pipe_bpp); 6631 6632 pipe_config->fdi_lanes = lane; 6633 6634 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6635 link_bw, &pipe_config->fdi_m_n); 6636 6637 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, 6638 intel_crtc->pipe, pipe_config); 6639 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6640 pipe_config->pipe_bpp -= 2*3; 6641 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6642 pipe_config->pipe_bpp); 6643 needs_recompute = true; 6644 pipe_config->bw_constrained = true; 6645 6646 goto retry; 6647 } 6648 6649 if (needs_recompute) 6650 return RETRY; 6651 6652 return ret; 6653 } 6654 6655 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6656 struct intel_crtc_state *pipe_config) 6657 { 6658 if (pipe_config->pipe_bpp > 24) 6659 return false; 6660 6661 /* HSW can handle pixel rate up to cdclk? */ 6662 if (IS_HASWELL(dev_priv->dev)) 6663 return true; 6664 6665 /* 6666 * We compare against max which means we must take 6667 * the increased cdclk requirement into account when 6668 * calculating the new cdclk. 6669 * 6670 * Should measure whether using a lower cdclk w/o IPS 6671 */ 6672 return ilk_pipe_pixel_rate(pipe_config) <= 6673 dev_priv->max_cdclk_freq * 95 / 100; 6674 } 6675 6676 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6677 struct intel_crtc_state *pipe_config) 6678 { 6679 struct drm_device *dev = crtc->base.dev; 6680 struct drm_i915_private *dev_priv = dev->dev_private; 6681 6682 pipe_config->ips_enabled = i915.enable_ips && 6683 hsw_crtc_supports_ips(crtc) && 6684 pipe_config_supports_ips(dev_priv, pipe_config); 6685 } 6686 6687 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6688 { 6689 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6690 6691 /* GDG double wide on either pipe, otherwise pipe A only */ 6692 return INTEL_INFO(dev_priv)->gen < 4 && 6693 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6694 } 6695 6696 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6697 struct intel_crtc_state *pipe_config) 6698 { 6699 struct drm_device *dev = crtc->base.dev; 6700 struct drm_i915_private *dev_priv = dev->dev_private; 6701 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6702 6703 /* FIXME should check pixel clock limits on all platforms */ 6704 if (INTEL_INFO(dev)->gen < 4) { 6705 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6706 6707 /* 6708 * Enable double wide mode when the dot clock 6709 * is > 90% of the (display) core speed. 6710 */ 6711 if (intel_crtc_supports_double_wide(crtc) && 6712 adjusted_mode->crtc_clock > clock_limit) { 6713 clock_limit *= 2; 6714 pipe_config->double_wide = true; 6715 } 6716 6717 if (adjusted_mode->crtc_clock > clock_limit) { 6718 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6719 adjusted_mode->crtc_clock, clock_limit, 6720 yesno(pipe_config->double_wide)); 6721 return -EINVAL; 6722 } 6723 } 6724 6725 /* 6726 * Pipe horizontal size must be even in: 6727 * - DVO ganged mode 6728 * - LVDS dual channel mode 6729 * - Double wide pipe 6730 */ 6731 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6732 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6733 pipe_config->pipe_src_w &= ~1; 6734 6735 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6736 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6737 */ 6738 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6739 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6740 return -EINVAL; 6741 6742 if (HAS_IPS(dev)) 6743 hsw_compute_ips_config(crtc, pipe_config); 6744 6745 if (pipe_config->has_pch_encoder) 6746 return ironlake_fdi_compute_config(crtc, pipe_config); 6747 6748 return 0; 6749 } 6750 6751 static int skylake_get_display_clock_speed(struct drm_device *dev) 6752 { 6753 struct drm_i915_private *dev_priv = to_i915(dev); 6754 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6755 uint32_t cdctl = I915_READ(CDCLK_CTL); 6756 uint32_t linkrate; 6757 6758 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6759 return 24000; /* 24MHz is the cd freq with NSSC ref */ 6760 6761 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6762 return 540000; 6763 6764 linkrate = (I915_READ(DPLL_CTRL1) & 6765 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; 6766 6767 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6768 linkrate == DPLL_CTRL1_LINK_RATE_1080) { 6769 /* vco 8640 */ 6770 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6771 case CDCLK_FREQ_450_432: 6772 return 432000; 6773 case CDCLK_FREQ_337_308: 6774 return 308570; 6775 case CDCLK_FREQ_675_617: 6776 return 617140; 6777 default: 6778 WARN(1, "Unknown cd freq selection\n"); 6779 } 6780 } else { 6781 /* vco 8100 */ 6782 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6783 case CDCLK_FREQ_450_432: 6784 return 450000; 6785 case CDCLK_FREQ_337_308: 6786 return 337500; 6787 case CDCLK_FREQ_675_617: 6788 return 675000; 6789 default: 6790 WARN(1, "Unknown cd freq selection\n"); 6791 } 6792 } 6793 6794 /* error case, do as if DPLL0 isn't enabled */ 6795 return 24000; 6796 } 6797 6798 static int broxton_get_display_clock_speed(struct drm_device *dev) 6799 { 6800 struct drm_i915_private *dev_priv = to_i915(dev); 6801 uint32_t cdctl = I915_READ(CDCLK_CTL); 6802 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6803 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); 6804 int cdclk; 6805 6806 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6807 return 19200; 6808 6809 cdclk = 19200 * pll_ratio / 2; 6810 6811 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6812 case BXT_CDCLK_CD2X_DIV_SEL_1: 6813 return cdclk; /* 576MHz or 624MHz */ 6814 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6815 return cdclk * 2 / 3; /* 384MHz */ 6816 case BXT_CDCLK_CD2X_DIV_SEL_2: 6817 return cdclk / 2; /* 288MHz */ 6818 case BXT_CDCLK_CD2X_DIV_SEL_4: 6819 return cdclk / 4; /* 144MHz */ 6820 } 6821 6822 /* error case, do as if DE PLL isn't enabled */ 6823 return 19200; 6824 } 6825 6826 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6827 { 6828 struct drm_i915_private *dev_priv = dev->dev_private; 6829 uint32_t lcpll = I915_READ(LCPLL_CTL); 6830 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6831 6832 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6833 return 800000; 6834 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6835 return 450000; 6836 else if (freq == LCPLL_CLK_FREQ_450) 6837 return 450000; 6838 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6839 return 540000; 6840 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6841 return 337500; 6842 else 6843 return 675000; 6844 } 6845 6846 static int haswell_get_display_clock_speed(struct drm_device *dev) 6847 { 6848 struct drm_i915_private *dev_priv = dev->dev_private; 6849 uint32_t lcpll = I915_READ(LCPLL_CTL); 6850 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6851 6852 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6853 return 800000; 6854 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6855 return 450000; 6856 else if (freq == LCPLL_CLK_FREQ_450) 6857 return 450000; 6858 else if (IS_HSW_ULT(dev)) 6859 return 337500; 6860 else 6861 return 540000; 6862 } 6863 6864 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6865 { 6866 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", 6867 CCK_DISPLAY_CLOCK_CONTROL); 6868 } 6869 6870 static int ilk_get_display_clock_speed(struct drm_device *dev) 6871 { 6872 return 450000; 6873 } 6874 6875 static int i945_get_display_clock_speed(struct drm_device *dev) 6876 { 6877 return 400000; 6878 } 6879 6880 static int i915_get_display_clock_speed(struct drm_device *dev) 6881 { 6882 return 333333; 6883 } 6884 6885 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6886 { 6887 return 200000; 6888 } 6889 6890 static int pnv_get_display_clock_speed(struct drm_device *dev) 6891 { 6892 u16 gcfgc = 0; 6893 6894 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6895 6896 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6897 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6898 return 266667; 6899 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6900 return 333333; 6901 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6902 return 444444; 6903 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6904 return 200000; 6905 default: 6906 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6907 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6908 return 133333; 6909 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6910 return 166667; 6911 } 6912 } 6913 6914 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6915 { 6916 u16 gcfgc = 0; 6917 6918 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6919 6920 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6921 return 133333; 6922 else { 6923 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6924 case GC_DISPLAY_CLOCK_333_MHZ: 6925 return 333333; 6926 default: 6927 case GC_DISPLAY_CLOCK_190_200_MHZ: 6928 return 190000; 6929 } 6930 } 6931 } 6932 6933 static int i865_get_display_clock_speed(struct drm_device *dev) 6934 { 6935 return 266667; 6936 } 6937 6938 static int i85x_get_display_clock_speed(struct drm_device *dev) 6939 { 6940 u16 hpllcc = 0; 6941 6942 /* 6943 * 852GM/852GMV only supports 133 MHz and the HPLLCC 6944 * encoding is different :( 6945 * FIXME is this the right way to detect 852GM/852GMV? 6946 */ 6947 if (dev->pdev->revision == 0x1) 6948 return 133333; 6949 6950 #if 0 6951 pci_bus_read_config_word(dev->pdev->bus, 6952 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 6953 #endif 6954 6955 /* Assume that the hardware is in the high speed state. This 6956 * should be the default. 6957 */ 6958 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6959 case GC_CLOCK_133_200: 6960 case GC_CLOCK_133_200_2: 6961 case GC_CLOCK_100_200: 6962 return 200000; 6963 case GC_CLOCK_166_250: 6964 return 250000; 6965 case GC_CLOCK_100_133: 6966 return 133333; 6967 case GC_CLOCK_133_266: 6968 case GC_CLOCK_133_266_2: 6969 case GC_CLOCK_166_266: 6970 return 266667; 6971 } 6972 6973 /* Shouldn't happen */ 6974 return 0; 6975 } 6976 6977 static int i830_get_display_clock_speed(struct drm_device *dev) 6978 { 6979 return 133333; 6980 } 6981 6982 static unsigned int intel_hpll_vco(struct drm_device *dev) 6983 { 6984 struct drm_i915_private *dev_priv = dev->dev_private; 6985 static const unsigned int blb_vco[8] = { 6986 [0] = 3200000, 6987 [1] = 4000000, 6988 [2] = 5333333, 6989 [3] = 4800000, 6990 [4] = 6400000, 6991 }; 6992 static const unsigned int pnv_vco[8] = { 6993 [0] = 3200000, 6994 [1] = 4000000, 6995 [2] = 5333333, 6996 [3] = 4800000, 6997 [4] = 2666667, 6998 }; 6999 static const unsigned int cl_vco[8] = { 7000 [0] = 3200000, 7001 [1] = 4000000, 7002 [2] = 5333333, 7003 [3] = 6400000, 7004 [4] = 3333333, 7005 [5] = 3566667, 7006 [6] = 4266667, 7007 }; 7008 static const unsigned int elk_vco[8] = { 7009 [0] = 3200000, 7010 [1] = 4000000, 7011 [2] = 5333333, 7012 [3] = 4800000, 7013 }; 7014 static const unsigned int ctg_vco[8] = { 7015 [0] = 3200000, 7016 [1] = 4000000, 7017 [2] = 5333333, 7018 [3] = 6400000, 7019 [4] = 2666667, 7020 [5] = 4266667, 7021 }; 7022 const unsigned int *vco_table; 7023 unsigned int vco; 7024 uint8_t tmp = 0; 7025 7026 /* FIXME other chipsets? */ 7027 if (IS_GM45(dev)) 7028 vco_table = ctg_vco; 7029 else if (IS_G4X(dev)) 7030 vco_table = elk_vco; 7031 else if (IS_CRESTLINE(dev)) 7032 vco_table = cl_vco; 7033 else if (IS_PINEVIEW(dev)) 7034 vco_table = pnv_vco; 7035 else if (IS_G33(dev)) 7036 vco_table = blb_vco; 7037 else 7038 return 0; 7039 7040 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); 7041 7042 vco = vco_table[tmp & 0x7]; 7043 if (vco == 0) 7044 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); 7045 else 7046 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); 7047 7048 return vco; 7049 } 7050 7051 static int gm45_get_display_clock_speed(struct drm_device *dev) 7052 { 7053 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7054 uint16_t tmp = 0; 7055 7056 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7057 7058 cdclk_sel = (tmp >> 12) & 0x1; 7059 7060 switch (vco) { 7061 case 2666667: 7062 case 4000000: 7063 case 5333333: 7064 return cdclk_sel ? 333333 : 222222; 7065 case 3200000: 7066 return cdclk_sel ? 320000 : 228571; 7067 default: 7068 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); 7069 return 222222; 7070 } 7071 } 7072 7073 static int i965gm_get_display_clock_speed(struct drm_device *dev) 7074 { 7075 static const uint8_t div_3200[] = { 16, 10, 8 }; 7076 static const uint8_t div_4000[] = { 20, 12, 10 }; 7077 static const uint8_t div_5333[] = { 24, 16, 14 }; 7078 const uint8_t *div_table; 7079 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7080 uint16_t tmp = 0; 7081 7082 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7083 7084 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 7085 7086 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7087 goto fail; 7088 7089 switch (vco) { 7090 case 3200000: 7091 div_table = div_3200; 7092 break; 7093 case 4000000: 7094 div_table = div_4000; 7095 break; 7096 case 5333333: 7097 div_table = div_5333; 7098 break; 7099 default: 7100 goto fail; 7101 } 7102 7103 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7104 7105 fail: 7106 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); 7107 return 200000; 7108 } 7109 7110 static int g33_get_display_clock_speed(struct drm_device *dev) 7111 { 7112 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; 7113 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; 7114 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; 7115 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; 7116 const uint8_t *div_table; 7117 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7118 uint16_t tmp = 0; 7119 7120 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7121 7122 cdclk_sel = (tmp >> 4) & 0x7; 7123 7124 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7125 goto fail; 7126 7127 switch (vco) { 7128 case 3200000: 7129 div_table = div_3200; 7130 break; 7131 case 4000000: 7132 div_table = div_4000; 7133 break; 7134 case 4800000: 7135 div_table = div_4800; 7136 break; 7137 case 5333333: 7138 div_table = div_5333; 7139 break; 7140 default: 7141 goto fail; 7142 } 7143 7144 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7145 7146 fail: 7147 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); 7148 return 190476; 7149 } 7150 7151 static void 7152 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 7153 { 7154 while (*num > DATA_LINK_M_N_MASK || 7155 *den > DATA_LINK_M_N_MASK) { 7156 *num >>= 1; 7157 *den >>= 1; 7158 } 7159 } 7160 7161 static void compute_m_n(unsigned int m, unsigned int n, 7162 uint32_t *ret_m, uint32_t *ret_n) 7163 { 7164 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7165 *ret_m = div_u64((uint64_t) m * *ret_n, n); 7166 intel_reduce_m_n_ratio(ret_m, ret_n); 7167 } 7168 7169 void 7170 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 7171 int pixel_clock, int link_clock, 7172 struct intel_link_m_n *m_n) 7173 { 7174 m_n->tu = 64; 7175 7176 compute_m_n(bits_per_pixel * pixel_clock, 7177 link_clock * nlanes * 8, 7178 &m_n->gmch_m, &m_n->gmch_n); 7179 7180 compute_m_n(pixel_clock, link_clock, 7181 &m_n->link_m, &m_n->link_n); 7182 } 7183 7184 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7185 { 7186 if (i915.panel_use_ssc >= 0) 7187 return i915.panel_use_ssc != 0; 7188 return dev_priv->vbt.lvds_use_ssc 7189 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7190 } 7191 7192 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 7193 int num_connectors) 7194 { 7195 struct drm_device *dev = crtc_state->base.crtc->dev; 7196 struct drm_i915_private *dev_priv = dev->dev_private; 7197 int refclk; 7198 7199 WARN_ON(!crtc_state->base.state); 7200 7201 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) { 7202 refclk = 100000; 7203 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7204 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 7205 refclk = dev_priv->vbt.lvds_ssc_freq; 7206 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7207 } else if (!IS_GEN2(dev)) { 7208 refclk = 96000; 7209 } else { 7210 refclk = 48000; 7211 } 7212 7213 return refclk; 7214 } 7215 7216 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 7217 { 7218 return (1 << dpll->n) << 16 | dpll->m2; 7219 } 7220 7221 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 7222 { 7223 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7224 } 7225 7226 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7227 struct intel_crtc_state *crtc_state, 7228 intel_clock_t *reduced_clock) 7229 { 7230 struct drm_device *dev = crtc->base.dev; 7231 u32 fp, fp2 = 0; 7232 7233 if (IS_PINEVIEW(dev)) { 7234 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7235 if (reduced_clock) 7236 fp2 = pnv_dpll_compute_fp(reduced_clock); 7237 } else { 7238 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7239 if (reduced_clock) 7240 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7241 } 7242 7243 crtc_state->dpll_hw_state.fp0 = fp; 7244 7245 crtc->lowfreq_avail = false; 7246 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7247 reduced_clock) { 7248 crtc_state->dpll_hw_state.fp1 = fp2; 7249 crtc->lowfreq_avail = true; 7250 } else { 7251 crtc_state->dpll_hw_state.fp1 = fp; 7252 } 7253 } 7254 7255 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 7256 pipe) 7257 { 7258 u32 reg_val; 7259 7260 /* 7261 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7262 * and set it to a reasonable value instead. 7263 */ 7264 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7265 reg_val &= 0xffffff00; 7266 reg_val |= 0x00000030; 7267 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7268 7269 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7270 reg_val &= 0x8cffffff; 7271 reg_val = 0x8c000000; 7272 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7273 7274 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7275 reg_val &= 0xffffff00; 7276 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7277 7278 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7279 reg_val &= 0x00ffffff; 7280 reg_val |= 0xb0000000; 7281 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7282 } 7283 7284 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 7285 struct intel_link_m_n *m_n) 7286 { 7287 struct drm_device *dev = crtc->base.dev; 7288 struct drm_i915_private *dev_priv = dev->dev_private; 7289 int pipe = crtc->pipe; 7290 7291 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7292 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7293 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7294 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7295 } 7296 7297 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 7298 struct intel_link_m_n *m_n, 7299 struct intel_link_m_n *m2_n2) 7300 { 7301 struct drm_device *dev = crtc->base.dev; 7302 struct drm_i915_private *dev_priv = dev->dev_private; 7303 int pipe = crtc->pipe; 7304 enum transcoder transcoder = crtc->config->cpu_transcoder; 7305 7306 if (INTEL_INFO(dev)->gen >= 5) { 7307 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7308 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7309 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7310 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7311 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7312 * for gen < 8) and if DRRS is supported (to make sure the 7313 * registers are not unnecessarily accessed). 7314 */ 7315 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7316 crtc->config->has_drrs) { 7317 I915_WRITE(PIPE_DATA_M2(transcoder), 7318 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7319 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7320 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7321 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7322 } 7323 } else { 7324 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7325 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7326 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7327 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7328 } 7329 } 7330 7331 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7332 { 7333 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7334 7335 if (m_n == M1_N1) { 7336 dp_m_n = &crtc->config->dp_m_n; 7337 dp_m2_n2 = &crtc->config->dp_m2_n2; 7338 } else if (m_n == M2_N2) { 7339 7340 /* 7341 * M2_N2 registers are not supported. Hence m2_n2 divider value 7342 * needs to be programmed into M1_N1. 7343 */ 7344 dp_m_n = &crtc->config->dp_m2_n2; 7345 } else { 7346 DRM_ERROR("Unsupported divider value\n"); 7347 return; 7348 } 7349 7350 if (crtc->config->has_pch_encoder) 7351 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7352 else 7353 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7354 } 7355 7356 static void vlv_compute_dpll(struct intel_crtc *crtc, 7357 struct intel_crtc_state *pipe_config) 7358 { 7359 u32 dpll, dpll_md; 7360 7361 /* 7362 * Enable DPIO clock input. We should never disable the reference 7363 * clock for pipe B, since VGA hotplug / manual detection depends 7364 * on it. 7365 */ 7366 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV | 7367 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV; 7368 /* We should never disable this, set it here for state tracking */ 7369 if (crtc->pipe == PIPE_B) 7370 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7371 dpll |= DPLL_VCO_ENABLE; 7372 pipe_config->dpll_hw_state.dpll = dpll; 7373 7374 dpll_md = (pipe_config->pixel_multiplier - 1) 7375 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7376 pipe_config->dpll_hw_state.dpll_md = dpll_md; 7377 } 7378 7379 static void vlv_prepare_pll(struct intel_crtc *crtc, 7380 const struct intel_crtc_state *pipe_config) 7381 { 7382 struct drm_device *dev = crtc->base.dev; 7383 struct drm_i915_private *dev_priv = dev->dev_private; 7384 int pipe = crtc->pipe; 7385 u32 mdiv; 7386 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7387 u32 coreclk, reg_val; 7388 7389 mutex_lock(&dev_priv->sb_lock); 7390 7391 bestn = pipe_config->dpll.n; 7392 bestm1 = pipe_config->dpll.m1; 7393 bestm2 = pipe_config->dpll.m2; 7394 bestp1 = pipe_config->dpll.p1; 7395 bestp2 = pipe_config->dpll.p2; 7396 7397 /* See eDP HDMI DPIO driver vbios notes doc */ 7398 7399 /* PLL B needs special handling */ 7400 if (pipe == PIPE_B) 7401 vlv_pllb_recal_opamp(dev_priv, pipe); 7402 7403 /* Set up Tx target for periodic Rcomp update */ 7404 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7405 7406 /* Disable target IRef on PLL */ 7407 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7408 reg_val &= 0x00ffffff; 7409 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7410 7411 /* Disable fast lock */ 7412 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7413 7414 /* Set idtafcrecal before PLL is enabled */ 7415 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7416 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7417 mdiv |= ((bestn << DPIO_N_SHIFT)); 7418 mdiv |= (1 << DPIO_K_SHIFT); 7419 7420 /* 7421 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7422 * but we don't support that). 7423 * Note: don't use the DAC post divider as it seems unstable. 7424 */ 7425 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7426 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7427 7428 mdiv |= DPIO_ENABLE_CALIBRATION; 7429 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7430 7431 /* Set HBR and RBR LPF coefficients */ 7432 if (pipe_config->port_clock == 162000 || 7433 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7434 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7435 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7436 0x009f0003); 7437 else 7438 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7439 0x00d0000f); 7440 7441 if (pipe_config->has_dp_encoder) { 7442 /* Use SSC source */ 7443 if (pipe == PIPE_A) 7444 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7445 0x0df40000); 7446 else 7447 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7448 0x0df70000); 7449 } else { /* HDMI or VGA */ 7450 /* Use bend source */ 7451 if (pipe == PIPE_A) 7452 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7453 0x0df70000); 7454 else 7455 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7456 0x0df40000); 7457 } 7458 7459 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7460 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7461 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7462 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 7463 coreclk |= 0x01000000; 7464 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7465 7466 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7467 mutex_unlock(&dev_priv->sb_lock); 7468 } 7469 7470 static void chv_compute_dpll(struct intel_crtc *crtc, 7471 struct intel_crtc_state *pipe_config) 7472 { 7473 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7474 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 7475 DPLL_VCO_ENABLE; 7476 if (crtc->pipe != PIPE_A) 7477 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7478 7479 pipe_config->dpll_hw_state.dpll_md = 7480 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7481 } 7482 7483 static void chv_prepare_pll(struct intel_crtc *crtc, 7484 const struct intel_crtc_state *pipe_config) 7485 { 7486 struct drm_device *dev = crtc->base.dev; 7487 struct drm_i915_private *dev_priv = dev->dev_private; 7488 int pipe = crtc->pipe; 7489 i915_reg_t dpll_reg = DPLL(crtc->pipe); 7490 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7491 u32 loopfilter, tribuf_calcntr; 7492 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7493 u32 dpio_val; 7494 int vco; 7495 7496 bestn = pipe_config->dpll.n; 7497 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7498 bestm1 = pipe_config->dpll.m1; 7499 bestm2 = pipe_config->dpll.m2 >> 22; 7500 bestp1 = pipe_config->dpll.p1; 7501 bestp2 = pipe_config->dpll.p2; 7502 vco = pipe_config->dpll.vco; 7503 dpio_val = 0; 7504 loopfilter = 0; 7505 7506 /* 7507 * Enable Refclk and SSC 7508 */ 7509 I915_WRITE(dpll_reg, 7510 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7511 7512 mutex_lock(&dev_priv->sb_lock); 7513 7514 /* p1 and p2 divider */ 7515 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7516 5 << DPIO_CHV_S1_DIV_SHIFT | 7517 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7518 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7519 1 << DPIO_CHV_K_DIV_SHIFT); 7520 7521 /* Feedback post-divider - m2 */ 7522 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7523 7524 /* Feedback refclk divider - n and m1 */ 7525 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7526 DPIO_CHV_M1_DIV_BY_2 | 7527 1 << DPIO_CHV_N_DIV_SHIFT); 7528 7529 /* M2 fraction division */ 7530 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7531 7532 /* M2 fraction division enable */ 7533 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7534 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7535 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7536 if (bestm2_frac) 7537 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7538 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7539 7540 /* Program digital lock detect threshold */ 7541 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7542 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7543 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7544 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7545 if (!bestm2_frac) 7546 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7547 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7548 7549 /* Loop filter */ 7550 if (vco == 5400000) { 7551 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7552 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7553 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7554 tribuf_calcntr = 0x9; 7555 } else if (vco <= 6200000) { 7556 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7557 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7558 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7559 tribuf_calcntr = 0x9; 7560 } else if (vco <= 6480000) { 7561 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7562 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7563 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7564 tribuf_calcntr = 0x8; 7565 } else { 7566 /* Not supported. Apply the same limits as in the max case */ 7567 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7568 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7569 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7570 tribuf_calcntr = 0; 7571 } 7572 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7573 7574 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7575 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7576 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7577 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7578 7579 /* AFC Recal */ 7580 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7581 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7582 DPIO_AFC_RECAL); 7583 7584 mutex_unlock(&dev_priv->sb_lock); 7585 } 7586 7587 /** 7588 * vlv_force_pll_on - forcibly enable just the PLL 7589 * @dev_priv: i915 private structure 7590 * @pipe: pipe PLL to enable 7591 * @dpll: PLL configuration 7592 * 7593 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7594 * in cases where we need the PLL enabled even when @pipe is not going to 7595 * be enabled. 7596 */ 7597 void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7598 const struct dpll *dpll) 7599 { 7600 struct intel_crtc *crtc = 7601 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7602 struct intel_crtc_state pipe_config = { 7603 .base.crtc = &crtc->base, 7604 .pixel_multiplier = 1, 7605 .dpll = *dpll, 7606 }; 7607 7608 if (IS_CHERRYVIEW(dev)) { 7609 chv_compute_dpll(crtc, &pipe_config); 7610 chv_prepare_pll(crtc, &pipe_config); 7611 chv_enable_pll(crtc, &pipe_config); 7612 } else { 7613 vlv_compute_dpll(crtc, &pipe_config); 7614 vlv_prepare_pll(crtc, &pipe_config); 7615 vlv_enable_pll(crtc, &pipe_config); 7616 } 7617 } 7618 7619 /** 7620 * vlv_force_pll_off - forcibly disable just the PLL 7621 * @dev_priv: i915 private structure 7622 * @pipe: pipe PLL to disable 7623 * 7624 * Disable the PLL for @pipe. To be used in cases where we need 7625 * the PLL enabled even when @pipe is not going to be enabled. 7626 */ 7627 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7628 { 7629 if (IS_CHERRYVIEW(dev)) 7630 chv_disable_pll(to_i915(dev), pipe); 7631 else 7632 vlv_disable_pll(to_i915(dev), pipe); 7633 } 7634 7635 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7636 struct intel_crtc_state *crtc_state, 7637 intel_clock_t *reduced_clock, 7638 int num_connectors) 7639 { 7640 struct drm_device *dev = crtc->base.dev; 7641 struct drm_i915_private *dev_priv = dev->dev_private; 7642 u32 dpll; 7643 bool is_sdvo; 7644 struct dpll *clock = &crtc_state->dpll; 7645 7646 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7647 7648 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || 7649 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); 7650 7651 dpll = DPLL_VGA_MODE_DIS; 7652 7653 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7654 dpll |= DPLLB_MODE_LVDS; 7655 else 7656 dpll |= DPLLB_MODE_DAC_SERIAL; 7657 7658 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7659 dpll |= (crtc_state->pixel_multiplier - 1) 7660 << SDVO_MULTIPLIER_SHIFT_HIRES; 7661 } 7662 7663 if (is_sdvo) 7664 dpll |= DPLL_SDVO_HIGH_SPEED; 7665 7666 if (crtc_state->has_dp_encoder) 7667 dpll |= DPLL_SDVO_HIGH_SPEED; 7668 7669 /* compute bitmask from p1 value */ 7670 if (IS_PINEVIEW(dev)) 7671 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7672 else { 7673 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7674 if (IS_G4X(dev) && reduced_clock) 7675 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7676 } 7677 switch (clock->p2) { 7678 case 5: 7679 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7680 break; 7681 case 7: 7682 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7683 break; 7684 case 10: 7685 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7686 break; 7687 case 14: 7688 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7689 break; 7690 } 7691 if (INTEL_INFO(dev)->gen >= 4) 7692 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7693 7694 if (crtc_state->sdvo_tv_clock) 7695 dpll |= PLL_REF_INPUT_TVCLKINBC; 7696 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7697 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7698 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7699 else 7700 dpll |= PLL_REF_INPUT_DREFCLK; 7701 7702 dpll |= DPLL_VCO_ENABLE; 7703 crtc_state->dpll_hw_state.dpll = dpll; 7704 7705 if (INTEL_INFO(dev)->gen >= 4) { 7706 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7707 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7708 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7709 } 7710 } 7711 7712 static void i8xx_compute_dpll(struct intel_crtc *crtc, 7713 struct intel_crtc_state *crtc_state, 7714 intel_clock_t *reduced_clock, 7715 int num_connectors) 7716 { 7717 struct drm_device *dev = crtc->base.dev; 7718 struct drm_i915_private *dev_priv = dev->dev_private; 7719 u32 dpll; 7720 struct dpll *clock = &crtc_state->dpll; 7721 7722 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7723 7724 dpll = DPLL_VGA_MODE_DIS; 7725 7726 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7727 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7728 } else { 7729 if (clock->p1 == 2) 7730 dpll |= PLL_P1_DIVIDE_BY_TWO; 7731 else 7732 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7733 if (clock->p2 == 4) 7734 dpll |= PLL_P2_DIVIDE_BY_4; 7735 } 7736 7737 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7738 dpll |= DPLL_DVO_2X_MODE; 7739 7740 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7741 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7742 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7743 else 7744 dpll |= PLL_REF_INPUT_DREFCLK; 7745 7746 dpll |= DPLL_VCO_ENABLE; 7747 crtc_state->dpll_hw_state.dpll = dpll; 7748 } 7749 7750 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7751 { 7752 struct drm_device *dev = intel_crtc->base.dev; 7753 struct drm_i915_private *dev_priv = dev->dev_private; 7754 enum i915_pipe pipe = intel_crtc->pipe; 7755 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7756 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7757 uint32_t crtc_vtotal, crtc_vblank_end; 7758 int vsyncshift = 0; 7759 7760 /* We need to be careful not to changed the adjusted mode, for otherwise 7761 * the hw state checker will get angry at the mismatch. */ 7762 crtc_vtotal = adjusted_mode->crtc_vtotal; 7763 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7764 7765 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7766 /* the chip adds 2 halflines automatically */ 7767 crtc_vtotal -= 1; 7768 crtc_vblank_end -= 1; 7769 7770 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7771 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7772 else 7773 vsyncshift = adjusted_mode->crtc_hsync_start - 7774 adjusted_mode->crtc_htotal / 2; 7775 if (vsyncshift < 0) 7776 vsyncshift += adjusted_mode->crtc_htotal; 7777 } 7778 7779 if (INTEL_INFO(dev)->gen > 3) 7780 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7781 7782 I915_WRITE(HTOTAL(cpu_transcoder), 7783 (adjusted_mode->crtc_hdisplay - 1) | 7784 ((adjusted_mode->crtc_htotal - 1) << 16)); 7785 I915_WRITE(HBLANK(cpu_transcoder), 7786 (adjusted_mode->crtc_hblank_start - 1) | 7787 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7788 I915_WRITE(HSYNC(cpu_transcoder), 7789 (adjusted_mode->crtc_hsync_start - 1) | 7790 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7791 7792 I915_WRITE(VTOTAL(cpu_transcoder), 7793 (adjusted_mode->crtc_vdisplay - 1) | 7794 ((crtc_vtotal - 1) << 16)); 7795 I915_WRITE(VBLANK(cpu_transcoder), 7796 (adjusted_mode->crtc_vblank_start - 1) | 7797 ((crtc_vblank_end - 1) << 16)); 7798 I915_WRITE(VSYNC(cpu_transcoder), 7799 (adjusted_mode->crtc_vsync_start - 1) | 7800 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7801 7802 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7803 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7804 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7805 * bits. */ 7806 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7807 (pipe == PIPE_B || pipe == PIPE_C)) 7808 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7809 7810 /* pipesrc controls the size that is scaled from, which should 7811 * always be the user's requested size. 7812 */ 7813 I915_WRITE(PIPESRC(pipe), 7814 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7815 (intel_crtc->config->pipe_src_h - 1)); 7816 } 7817 7818 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7819 struct intel_crtc_state *pipe_config) 7820 { 7821 struct drm_device *dev = crtc->base.dev; 7822 struct drm_i915_private *dev_priv = dev->dev_private; 7823 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7824 uint32_t tmp; 7825 7826 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7827 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7828 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7829 tmp = I915_READ(HBLANK(cpu_transcoder)); 7830 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7831 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7832 tmp = I915_READ(HSYNC(cpu_transcoder)); 7833 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7834 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7835 7836 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7837 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7838 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7839 tmp = I915_READ(VBLANK(cpu_transcoder)); 7840 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7841 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7842 tmp = I915_READ(VSYNC(cpu_transcoder)); 7843 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7844 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7845 7846 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7847 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7848 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7849 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7850 } 7851 7852 tmp = I915_READ(PIPESRC(crtc->pipe)); 7853 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7854 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7855 7856 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7857 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7858 } 7859 7860 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7861 struct intel_crtc_state *pipe_config) 7862 { 7863 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7864 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7865 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7866 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7867 7868 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7869 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7870 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7871 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7872 7873 mode->flags = pipe_config->base.adjusted_mode.flags; 7874 mode->type = DRM_MODE_TYPE_DRIVER; 7875 7876 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7877 mode->flags |= pipe_config->base.adjusted_mode.flags; 7878 7879 mode->hsync = drm_mode_hsync(mode); 7880 mode->vrefresh = drm_mode_vrefresh(mode); 7881 drm_mode_set_name(mode); 7882 } 7883 7884 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7885 { 7886 struct drm_device *dev = intel_crtc->base.dev; 7887 struct drm_i915_private *dev_priv = dev->dev_private; 7888 uint32_t pipeconf; 7889 7890 pipeconf = 0; 7891 7892 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7893 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7894 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7895 7896 if (intel_crtc->config->double_wide) 7897 pipeconf |= PIPECONF_DOUBLE_WIDE; 7898 7899 /* only g4x and later have fancy bpc/dither controls */ 7900 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 7901 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7902 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7903 pipeconf |= PIPECONF_DITHER_EN | 7904 PIPECONF_DITHER_TYPE_SP; 7905 7906 switch (intel_crtc->config->pipe_bpp) { 7907 case 18: 7908 pipeconf |= PIPECONF_6BPC; 7909 break; 7910 case 24: 7911 pipeconf |= PIPECONF_8BPC; 7912 break; 7913 case 30: 7914 pipeconf |= PIPECONF_10BPC; 7915 break; 7916 default: 7917 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7918 BUG(); 7919 } 7920 } 7921 7922 if (HAS_PIPE_CXSR(dev)) { 7923 if (intel_crtc->lowfreq_avail) { 7924 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7925 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7926 } else { 7927 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7928 } 7929 } 7930 7931 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7932 if (INTEL_INFO(dev)->gen < 4 || 7933 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7934 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7935 else 7936 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7937 } else 7938 pipeconf |= PIPECONF_PROGRESSIVE; 7939 7940 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 7941 intel_crtc->config->limited_color_range) 7942 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7943 7944 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7945 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7946 } 7947 7948 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7949 struct intel_crtc_state *crtc_state) 7950 { 7951 struct drm_device *dev = crtc->base.dev; 7952 struct drm_i915_private *dev_priv = dev->dev_private; 7953 int refclk, num_connectors = 0; 7954 intel_clock_t clock; 7955 bool ok; 7956 const intel_limit_t *limit; 7957 struct drm_atomic_state *state = crtc_state->base.state; 7958 struct drm_connector *connector; 7959 struct drm_connector_state *connector_state; 7960 int i; 7961 7962 memset(&crtc_state->dpll_hw_state, 0, 7963 sizeof(crtc_state->dpll_hw_state)); 7964 7965 if (crtc_state->has_dsi_encoder) 7966 return 0; 7967 7968 for_each_connector_in_state(state, connector, connector_state, i) { 7969 if (connector_state->crtc == &crtc->base) 7970 num_connectors++; 7971 } 7972 7973 if (!crtc_state->clock_set) { 7974 refclk = i9xx_get_refclk(crtc_state, num_connectors); 7975 7976 /* 7977 * Returns a set of divisors for the desired target clock with 7978 * the given refclk, or FALSE. The returned values represent 7979 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 7980 * 2) / p1 / p2. 7981 */ 7982 limit = intel_limit(crtc_state, refclk); 7983 ok = dev_priv->display.find_dpll(limit, crtc_state, 7984 crtc_state->port_clock, 7985 refclk, NULL, &clock); 7986 if (!ok) { 7987 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7988 return -EINVAL; 7989 } 7990 7991 /* Compat-code for transition, will disappear. */ 7992 crtc_state->dpll.n = clock.n; 7993 crtc_state->dpll.m1 = clock.m1; 7994 crtc_state->dpll.m2 = clock.m2; 7995 crtc_state->dpll.p1 = clock.p1; 7996 crtc_state->dpll.p2 = clock.p2; 7997 } 7998 7999 if (IS_GEN2(dev)) { 8000 i8xx_compute_dpll(crtc, crtc_state, NULL, 8001 num_connectors); 8002 } else if (IS_CHERRYVIEW(dev)) { 8003 chv_compute_dpll(crtc, crtc_state); 8004 } else if (IS_VALLEYVIEW(dev)) { 8005 vlv_compute_dpll(crtc, crtc_state); 8006 } else { 8007 i9xx_compute_dpll(crtc, crtc_state, NULL, 8008 num_connectors); 8009 } 8010 8011 return 0; 8012 } 8013 8014 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8015 struct intel_crtc_state *pipe_config) 8016 { 8017 struct drm_device *dev = crtc->base.dev; 8018 struct drm_i915_private *dev_priv = dev->dev_private; 8019 uint32_t tmp; 8020 8021 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 8022 return; 8023 8024 tmp = I915_READ(PFIT_CONTROL); 8025 if (!(tmp & PFIT_ENABLE)) 8026 return; 8027 8028 /* Check whether the pfit is attached to our pipe. */ 8029 if (INTEL_INFO(dev)->gen < 4) { 8030 if (crtc->pipe != PIPE_B) 8031 return; 8032 } else { 8033 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8034 return; 8035 } 8036 8037 pipe_config->gmch_pfit.control = tmp; 8038 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8039 if (INTEL_INFO(dev)->gen < 5) 8040 pipe_config->gmch_pfit.lvds_border_bits = 8041 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 8042 } 8043 8044 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8045 struct intel_crtc_state *pipe_config) 8046 { 8047 struct drm_device *dev = crtc->base.dev; 8048 struct drm_i915_private *dev_priv = dev->dev_private; 8049 int pipe = pipe_config->cpu_transcoder; 8050 intel_clock_t clock; 8051 u32 mdiv; 8052 int refclk = 100000; 8053 8054 /* In case of MIPI DPLL will not even be used */ 8055 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 8056 return; 8057 8058 mutex_lock(&dev_priv->sb_lock); 8059 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8060 mutex_unlock(&dev_priv->sb_lock); 8061 8062 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8063 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8064 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8065 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8066 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8067 8068 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8069 } 8070 8071 static void 8072 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8073 struct intel_initial_plane_config *plane_config) 8074 { 8075 struct drm_device *dev = crtc->base.dev; 8076 struct drm_i915_private *dev_priv = dev->dev_private; 8077 u32 val, base, offset; 8078 int pipe = crtc->pipe, plane = crtc->plane; 8079 int fourcc, pixel_format; 8080 unsigned int aligned_height; 8081 struct drm_framebuffer *fb; 8082 struct intel_framebuffer *intel_fb; 8083 8084 val = I915_READ(DSPCNTR(plane)); 8085 if (!(val & DISPLAY_PLANE_ENABLE)) 8086 return; 8087 8088 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8089 if (!intel_fb) { 8090 DRM_DEBUG_KMS("failed to alloc fb\n"); 8091 return; 8092 } 8093 8094 fb = &intel_fb->base; 8095 8096 if (INTEL_INFO(dev)->gen >= 4) { 8097 if (val & DISPPLANE_TILED) { 8098 plane_config->tiling = I915_TILING_X; 8099 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8100 } 8101 } 8102 8103 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8104 fourcc = i9xx_format_to_fourcc(pixel_format); 8105 fb->pixel_format = fourcc; 8106 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8107 8108 if (INTEL_INFO(dev)->gen >= 4) { 8109 if (plane_config->tiling) 8110 offset = I915_READ(DSPTILEOFF(plane)); 8111 else 8112 offset = I915_READ(DSPLINOFF(plane)); 8113 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 8114 } else { 8115 base = I915_READ(DSPADDR(plane)); 8116 } 8117 plane_config->base = base; 8118 8119 val = I915_READ(PIPESRC(pipe)); 8120 fb->width = ((val >> 16) & 0xfff) + 1; 8121 fb->height = ((val >> 0) & 0xfff) + 1; 8122 8123 val = I915_READ(DSPSTRIDE(pipe)); 8124 fb->pitches[0] = val & 0xffffffc0; 8125 8126 aligned_height = intel_fb_align_height(dev, fb->height, 8127 fb->pixel_format, 8128 fb->modifier[0]); 8129 8130 plane_config->size = fb->pitches[0] * aligned_height; 8131 8132 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8133 pipe_name(pipe), plane, fb->width, fb->height, 8134 fb->bits_per_pixel, base, fb->pitches[0], 8135 plane_config->size); 8136 8137 plane_config->fb = intel_fb; 8138 } 8139 8140 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8141 struct intel_crtc_state *pipe_config) 8142 { 8143 struct drm_device *dev = crtc->base.dev; 8144 struct drm_i915_private *dev_priv = dev->dev_private; 8145 int pipe = pipe_config->cpu_transcoder; 8146 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8147 intel_clock_t clock; 8148 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8149 int refclk = 100000; 8150 8151 mutex_lock(&dev_priv->sb_lock); 8152 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8153 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8154 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8155 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8156 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8157 mutex_unlock(&dev_priv->sb_lock); 8158 8159 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8160 clock.m2 = (pll_dw0 & 0xff) << 22; 8161 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8162 clock.m2 |= pll_dw2 & 0x3fffff; 8163 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8164 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8165 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8166 8167 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8168 } 8169 8170 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8171 struct intel_crtc_state *pipe_config) 8172 { 8173 struct drm_device *dev = crtc->base.dev; 8174 struct drm_i915_private *dev_priv = dev->dev_private; 8175 enum intel_display_power_domain power_domain; 8176 uint32_t tmp; 8177 bool ret; 8178 8179 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8180 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8181 return false; 8182 8183 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8184 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8185 8186 ret = false; 8187 8188 tmp = I915_READ(PIPECONF(crtc->pipe)); 8189 if (!(tmp & PIPECONF_ENABLE)) 8190 goto out; 8191 8192 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8193 switch (tmp & PIPECONF_BPC_MASK) { 8194 case PIPECONF_6BPC: 8195 pipe_config->pipe_bpp = 18; 8196 break; 8197 case PIPECONF_8BPC: 8198 pipe_config->pipe_bpp = 24; 8199 break; 8200 case PIPECONF_10BPC: 8201 pipe_config->pipe_bpp = 30; 8202 break; 8203 default: 8204 break; 8205 } 8206 } 8207 8208 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 8209 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8210 pipe_config->limited_color_range = true; 8211 8212 if (INTEL_INFO(dev)->gen < 4) 8213 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8214 8215 intel_get_pipe_timings(crtc, pipe_config); 8216 8217 i9xx_get_pfit_config(crtc, pipe_config); 8218 8219 if (INTEL_INFO(dev)->gen >= 4) { 8220 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8221 pipe_config->pixel_multiplier = 8222 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8223 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8224 pipe_config->dpll_hw_state.dpll_md = tmp; 8225 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 8226 tmp = I915_READ(DPLL(crtc->pipe)); 8227 pipe_config->pixel_multiplier = 8228 ((tmp & SDVO_MULTIPLIER_MASK) 8229 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8230 } else { 8231 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8232 * port and will be fixed up in the encoder->get_config 8233 * function. */ 8234 pipe_config->pixel_multiplier = 1; 8235 } 8236 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8237 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 8238 /* 8239 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 8240 * on 830. Filter it out here so that we don't 8241 * report errors due to that. 8242 */ 8243 if (IS_I830(dev)) 8244 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 8245 8246 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8247 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8248 } else { 8249 /* Mask out read-only status bits. */ 8250 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8251 DPLL_PORTC_READY_MASK | 8252 DPLL_PORTB_READY_MASK); 8253 } 8254 8255 if (IS_CHERRYVIEW(dev)) 8256 chv_crtc_clock_get(crtc, pipe_config); 8257 else if (IS_VALLEYVIEW(dev)) 8258 vlv_crtc_clock_get(crtc, pipe_config); 8259 else 8260 i9xx_crtc_clock_get(crtc, pipe_config); 8261 8262 /* 8263 * Normally the dotclock is filled in by the encoder .get_config() 8264 * but in case the pipe is enabled w/o any ports we need a sane 8265 * default. 8266 */ 8267 pipe_config->base.adjusted_mode.crtc_clock = 8268 pipe_config->port_clock / pipe_config->pixel_multiplier; 8269 8270 ret = true; 8271 8272 out: 8273 intel_display_power_put(dev_priv, power_domain); 8274 8275 return ret; 8276 } 8277 8278 static void ironlake_init_pch_refclk(struct drm_device *dev) 8279 { 8280 struct drm_i915_private *dev_priv = dev->dev_private; 8281 struct intel_encoder *encoder; 8282 u32 val, final; 8283 bool has_lvds = false; 8284 bool has_cpu_edp = false; 8285 bool has_panel = false; 8286 bool has_ck505 = false; 8287 bool can_ssc = false; 8288 8289 /* We need to take the global config into account */ 8290 for_each_intel_encoder(dev, encoder) { 8291 switch (encoder->type) { 8292 case INTEL_OUTPUT_LVDS: 8293 has_panel = true; 8294 has_lvds = true; 8295 break; 8296 case INTEL_OUTPUT_EDP: 8297 has_panel = true; 8298 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8299 has_cpu_edp = true; 8300 break; 8301 default: 8302 break; 8303 } 8304 } 8305 8306 if (HAS_PCH_IBX(dev)) { 8307 has_ck505 = dev_priv->vbt.display_clock_mode; 8308 can_ssc = has_ck505; 8309 } else { 8310 has_ck505 = false; 8311 can_ssc = true; 8312 } 8313 8314 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8315 has_panel, has_lvds, has_ck505); 8316 8317 /* Ironlake: try to setup display ref clock before DPLL 8318 * enabling. This is only under driver's control after 8319 * PCH B stepping, previous chipset stepping should be 8320 * ignoring this setting. 8321 */ 8322 val = I915_READ(PCH_DREF_CONTROL); 8323 8324 /* As we must carefully and slowly disable/enable each source in turn, 8325 * compute the final state we want first and check if we need to 8326 * make any changes at all. 8327 */ 8328 final = val; 8329 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8330 if (has_ck505) 8331 final |= DREF_NONSPREAD_CK505_ENABLE; 8332 else 8333 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8334 8335 final &= ~DREF_SSC_SOURCE_MASK; 8336 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8337 final &= ~DREF_SSC1_ENABLE; 8338 8339 if (has_panel) { 8340 final |= DREF_SSC_SOURCE_ENABLE; 8341 8342 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8343 final |= DREF_SSC1_ENABLE; 8344 8345 if (has_cpu_edp) { 8346 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8347 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8348 else 8349 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8350 } else 8351 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8352 } else { 8353 final |= DREF_SSC_SOURCE_DISABLE; 8354 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8355 } 8356 8357 if (final == val) 8358 return; 8359 8360 /* Always enable nonspread source */ 8361 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8362 8363 if (has_ck505) 8364 val |= DREF_NONSPREAD_CK505_ENABLE; 8365 else 8366 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8367 8368 if (has_panel) { 8369 val &= ~DREF_SSC_SOURCE_MASK; 8370 val |= DREF_SSC_SOURCE_ENABLE; 8371 8372 /* SSC must be turned on before enabling the CPU output */ 8373 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8374 DRM_DEBUG_KMS("Using SSC on panel\n"); 8375 val |= DREF_SSC1_ENABLE; 8376 } else 8377 val &= ~DREF_SSC1_ENABLE; 8378 8379 /* Get SSC going before enabling the outputs */ 8380 I915_WRITE(PCH_DREF_CONTROL, val); 8381 POSTING_READ(PCH_DREF_CONTROL); 8382 udelay(200); 8383 8384 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8385 8386 /* Enable CPU source on CPU attached eDP */ 8387 if (has_cpu_edp) { 8388 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8389 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8390 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8391 } else 8392 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8393 } else 8394 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8395 8396 I915_WRITE(PCH_DREF_CONTROL, val); 8397 POSTING_READ(PCH_DREF_CONTROL); 8398 udelay(200); 8399 } else { 8400 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8401 8402 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8403 8404 /* Turn off CPU output */ 8405 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8406 8407 I915_WRITE(PCH_DREF_CONTROL, val); 8408 POSTING_READ(PCH_DREF_CONTROL); 8409 udelay(200); 8410 8411 /* Turn off the SSC source */ 8412 val &= ~DREF_SSC_SOURCE_MASK; 8413 val |= DREF_SSC_SOURCE_DISABLE; 8414 8415 /* Turn off SSC1 */ 8416 val &= ~DREF_SSC1_ENABLE; 8417 8418 I915_WRITE(PCH_DREF_CONTROL, val); 8419 POSTING_READ(PCH_DREF_CONTROL); 8420 udelay(200); 8421 } 8422 8423 BUG_ON(val != final); 8424 } 8425 8426 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8427 { 8428 uint32_t tmp; 8429 8430 tmp = I915_READ(SOUTH_CHICKEN2); 8431 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8432 I915_WRITE(SOUTH_CHICKEN2, tmp); 8433 8434 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8435 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8436 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8437 8438 tmp = I915_READ(SOUTH_CHICKEN2); 8439 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8440 I915_WRITE(SOUTH_CHICKEN2, tmp); 8441 8442 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8443 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8444 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8445 } 8446 8447 /* WaMPhyProgramming:hsw */ 8448 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8449 { 8450 uint32_t tmp; 8451 8452 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8453 tmp &= ~(0xFF << 24); 8454 tmp |= (0x12 << 24); 8455 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8456 8457 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8458 tmp |= (1 << 11); 8459 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8460 8461 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8462 tmp |= (1 << 11); 8463 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8464 8465 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8466 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8467 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8468 8469 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8470 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8471 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8472 8473 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8474 tmp &= ~(7 << 13); 8475 tmp |= (5 << 13); 8476 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8477 8478 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8479 tmp &= ~(7 << 13); 8480 tmp |= (5 << 13); 8481 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8482 8483 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8484 tmp &= ~0xFF; 8485 tmp |= 0x1C; 8486 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8487 8488 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8489 tmp &= ~0xFF; 8490 tmp |= 0x1C; 8491 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8492 8493 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8494 tmp &= ~(0xFF << 16); 8495 tmp |= (0x1C << 16); 8496 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8497 8498 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8499 tmp &= ~(0xFF << 16); 8500 tmp |= (0x1C << 16); 8501 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8502 8503 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8504 tmp |= (1 << 27); 8505 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8506 8507 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8508 tmp |= (1 << 27); 8509 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8510 8511 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8512 tmp &= ~(0xF << 28); 8513 tmp |= (4 << 28); 8514 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8515 8516 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8517 tmp &= ~(0xF << 28); 8518 tmp |= (4 << 28); 8519 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8520 } 8521 8522 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8523 * Programming" based on the parameters passed: 8524 * - Sequence to enable CLKOUT_DP 8525 * - Sequence to enable CLKOUT_DP without spread 8526 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8527 */ 8528 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8529 bool with_fdi) 8530 { 8531 struct drm_i915_private *dev_priv = dev->dev_private; 8532 uint32_t reg, tmp; 8533 8534 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8535 with_spread = true; 8536 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) 8537 with_fdi = false; 8538 8539 mutex_lock(&dev_priv->sb_lock); 8540 8541 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8542 tmp &= ~SBI_SSCCTL_DISABLE; 8543 tmp |= SBI_SSCCTL_PATHALT; 8544 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8545 8546 udelay(24); 8547 8548 if (with_spread) { 8549 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8550 tmp &= ~SBI_SSCCTL_PATHALT; 8551 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8552 8553 if (with_fdi) { 8554 lpt_reset_fdi_mphy(dev_priv); 8555 lpt_program_fdi_mphy(dev_priv); 8556 } 8557 } 8558 8559 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8560 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8561 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8562 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8563 8564 mutex_unlock(&dev_priv->sb_lock); 8565 } 8566 8567 /* Sequence to disable CLKOUT_DP */ 8568 static void lpt_disable_clkout_dp(struct drm_device *dev) 8569 { 8570 struct drm_i915_private *dev_priv = dev->dev_private; 8571 uint32_t reg, tmp; 8572 8573 mutex_lock(&dev_priv->sb_lock); 8574 8575 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8576 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8577 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8578 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8579 8580 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8581 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8582 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8583 tmp |= SBI_SSCCTL_PATHALT; 8584 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8585 udelay(32); 8586 } 8587 tmp |= SBI_SSCCTL_DISABLE; 8588 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8589 } 8590 8591 mutex_unlock(&dev_priv->sb_lock); 8592 } 8593 8594 #define BEND_IDX(steps) ((50 + (steps)) / 5) 8595 8596 static const uint16_t sscdivintphase[] = { 8597 [BEND_IDX( 50)] = 0x3B23, 8598 [BEND_IDX( 45)] = 0x3B23, 8599 [BEND_IDX( 40)] = 0x3C23, 8600 [BEND_IDX( 35)] = 0x3C23, 8601 [BEND_IDX( 30)] = 0x3D23, 8602 [BEND_IDX( 25)] = 0x3D23, 8603 [BEND_IDX( 20)] = 0x3E23, 8604 [BEND_IDX( 15)] = 0x3E23, 8605 [BEND_IDX( 10)] = 0x3F23, 8606 [BEND_IDX( 5)] = 0x3F23, 8607 [BEND_IDX( 0)] = 0x0025, 8608 [BEND_IDX( -5)] = 0x0025, 8609 [BEND_IDX(-10)] = 0x0125, 8610 [BEND_IDX(-15)] = 0x0125, 8611 [BEND_IDX(-20)] = 0x0225, 8612 [BEND_IDX(-25)] = 0x0225, 8613 [BEND_IDX(-30)] = 0x0325, 8614 [BEND_IDX(-35)] = 0x0325, 8615 [BEND_IDX(-40)] = 0x0425, 8616 [BEND_IDX(-45)] = 0x0425, 8617 [BEND_IDX(-50)] = 0x0525, 8618 }; 8619 8620 /* 8621 * Bend CLKOUT_DP 8622 * steps -50 to 50 inclusive, in steps of 5 8623 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 8624 * change in clock period = -(steps / 10) * 5.787 ps 8625 */ 8626 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 8627 { 8628 uint32_t tmp; 8629 int idx = BEND_IDX(steps); 8630 8631 if (WARN_ON(steps % 5 != 0)) 8632 return; 8633 8634 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 8635 return; 8636 8637 mutex_lock(&dev_priv->sb_lock); 8638 8639 if (steps % 10 != 0) 8640 tmp = 0xAAAAAAAB; 8641 else 8642 tmp = 0x00000000; 8643 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 8644 8645 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 8646 tmp &= 0xffff0000; 8647 tmp |= sscdivintphase[idx]; 8648 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 8649 8650 mutex_unlock(&dev_priv->sb_lock); 8651 } 8652 8653 #undef BEND_IDX 8654 8655 static void lpt_init_pch_refclk(struct drm_device *dev) 8656 { 8657 struct intel_encoder *encoder; 8658 bool has_vga = false; 8659 8660 for_each_intel_encoder(dev, encoder) { 8661 switch (encoder->type) { 8662 case INTEL_OUTPUT_ANALOG: 8663 has_vga = true; 8664 break; 8665 default: 8666 break; 8667 } 8668 } 8669 8670 if (has_vga) { 8671 lpt_bend_clkout_dp(to_i915(dev), 0); 8672 lpt_enable_clkout_dp(dev, true, true); 8673 } else { 8674 lpt_disable_clkout_dp(dev); 8675 } 8676 } 8677 8678 /* 8679 * Initialize reference clocks when the driver loads 8680 */ 8681 void intel_init_pch_refclk(struct drm_device *dev) 8682 { 8683 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8684 ironlake_init_pch_refclk(dev); 8685 else if (HAS_PCH_LPT(dev)) 8686 lpt_init_pch_refclk(dev); 8687 } 8688 8689 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) 8690 { 8691 struct drm_device *dev = crtc_state->base.crtc->dev; 8692 struct drm_i915_private *dev_priv = dev->dev_private; 8693 struct drm_atomic_state *state = crtc_state->base.state; 8694 struct drm_connector *connector; 8695 struct drm_connector_state *connector_state; 8696 struct intel_encoder *encoder; 8697 int num_connectors = 0, i; 8698 bool is_lvds = false; 8699 8700 for_each_connector_in_state(state, connector, connector_state, i) { 8701 if (connector_state->crtc != crtc_state->base.crtc) 8702 continue; 8703 8704 encoder = to_intel_encoder(connector_state->best_encoder); 8705 8706 switch (encoder->type) { 8707 case INTEL_OUTPUT_LVDS: 8708 is_lvds = true; 8709 break; 8710 default: 8711 break; 8712 } 8713 num_connectors++; 8714 } 8715 8716 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 8717 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8718 dev_priv->vbt.lvds_ssc_freq); 8719 return dev_priv->vbt.lvds_ssc_freq; 8720 } 8721 8722 return 120000; 8723 } 8724 8725 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8726 { 8727 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8728 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8729 int pipe = intel_crtc->pipe; 8730 uint32_t val; 8731 8732 val = 0; 8733 8734 switch (intel_crtc->config->pipe_bpp) { 8735 case 18: 8736 val |= PIPECONF_6BPC; 8737 break; 8738 case 24: 8739 val |= PIPECONF_8BPC; 8740 break; 8741 case 30: 8742 val |= PIPECONF_10BPC; 8743 break; 8744 case 36: 8745 val |= PIPECONF_12BPC; 8746 break; 8747 default: 8748 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8749 BUG(); 8750 } 8751 8752 if (intel_crtc->config->dither) 8753 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8754 8755 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8756 val |= PIPECONF_INTERLACED_ILK; 8757 else 8758 val |= PIPECONF_PROGRESSIVE; 8759 8760 if (intel_crtc->config->limited_color_range) 8761 val |= PIPECONF_COLOR_RANGE_SELECT; 8762 8763 I915_WRITE(PIPECONF(pipe), val); 8764 POSTING_READ(PIPECONF(pipe)); 8765 } 8766 8767 /* 8768 * Set up the pipe CSC unit. 8769 * 8770 * Currently only full range RGB to limited range RGB conversion 8771 * is supported, but eventually this should handle various 8772 * RGB<->YCbCr scenarios as well. 8773 */ 8774 static void intel_set_pipe_csc(struct drm_crtc *crtc) 8775 { 8776 struct drm_device *dev = crtc->dev; 8777 struct drm_i915_private *dev_priv = dev->dev_private; 8778 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8779 int pipe = intel_crtc->pipe; 8780 uint16_t coeff = 0x7800; /* 1.0 */ 8781 8782 /* 8783 * TODO: Check what kind of values actually come out of the pipe 8784 * with these coeff/postoff values and adjust to get the best 8785 * accuracy. Perhaps we even need to take the bpc value into 8786 * consideration. 8787 */ 8788 8789 if (intel_crtc->config->limited_color_range) 8790 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 8791 8792 /* 8793 * GY/GU and RY/RU should be the other way around according 8794 * to BSpec, but reality doesn't agree. Just set them up in 8795 * a way that results in the correct picture. 8796 */ 8797 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 8798 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 8799 8800 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 8801 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 8802 8803 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 8804 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 8805 8806 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 8807 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 8808 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 8809 8810 if (INTEL_INFO(dev)->gen > 6) { 8811 uint16_t postoff = 0; 8812 8813 if (intel_crtc->config->limited_color_range) 8814 postoff = (16 * (1 << 12) / 255) & 0x1fff; 8815 8816 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 8817 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 8818 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 8819 8820 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 8821 } else { 8822 uint32_t mode = CSC_MODE_YUV_TO_RGB; 8823 8824 if (intel_crtc->config->limited_color_range) 8825 mode |= CSC_BLACK_SCREEN_OFFSET; 8826 8827 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 8828 } 8829 } 8830 8831 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8832 { 8833 struct drm_device *dev = crtc->dev; 8834 struct drm_i915_private *dev_priv = dev->dev_private; 8835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8836 enum i915_pipe pipe = intel_crtc->pipe; 8837 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8838 uint32_t val; 8839 8840 val = 0; 8841 8842 if (IS_HASWELL(dev) && intel_crtc->config->dither) 8843 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8844 8845 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8846 val |= PIPECONF_INTERLACED_ILK; 8847 else 8848 val |= PIPECONF_PROGRESSIVE; 8849 8850 I915_WRITE(PIPECONF(cpu_transcoder), val); 8851 POSTING_READ(PIPECONF(cpu_transcoder)); 8852 8853 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 8854 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 8855 8856 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 8857 val = 0; 8858 8859 switch (intel_crtc->config->pipe_bpp) { 8860 case 18: 8861 val |= PIPEMISC_DITHER_6_BPC; 8862 break; 8863 case 24: 8864 val |= PIPEMISC_DITHER_8_BPC; 8865 break; 8866 case 30: 8867 val |= PIPEMISC_DITHER_10_BPC; 8868 break; 8869 case 36: 8870 val |= PIPEMISC_DITHER_12_BPC; 8871 break; 8872 default: 8873 /* Case prevented by pipe_config_set_bpp. */ 8874 BUG(); 8875 } 8876 8877 if (intel_crtc->config->dither) 8878 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8879 8880 I915_WRITE(PIPEMISC(pipe), val); 8881 } 8882 } 8883 8884 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 8885 struct intel_crtc_state *crtc_state, 8886 intel_clock_t *clock, 8887 bool *has_reduced_clock, 8888 intel_clock_t *reduced_clock) 8889 { 8890 struct drm_device *dev = crtc->dev; 8891 struct drm_i915_private *dev_priv = dev->dev_private; 8892 int refclk; 8893 const intel_limit_t *limit; 8894 bool ret; 8895 8896 refclk = ironlake_get_refclk(crtc_state); 8897 8898 /* 8899 * Returns a set of divisors for the desired target clock with the given 8900 * refclk, or FALSE. The returned values represent the clock equation: 8901 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 8902 */ 8903 limit = intel_limit(crtc_state, refclk); 8904 ret = dev_priv->display.find_dpll(limit, crtc_state, 8905 crtc_state->port_clock, 8906 refclk, NULL, clock); 8907 if (!ret) 8908 return false; 8909 8910 return true; 8911 } 8912 8913 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8914 { 8915 /* 8916 * Account for spread spectrum to avoid 8917 * oversubscribing the link. Max center spread 8918 * is 2.5%; use 5% for safety's sake. 8919 */ 8920 u32 bps = target_clock * bpp * 21 / 20; 8921 return DIV_ROUND_UP(bps, link_bw * 8); 8922 } 8923 8924 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8925 { 8926 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8927 } 8928 8929 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8930 struct intel_crtc_state *crtc_state, 8931 u32 *fp, 8932 intel_clock_t *reduced_clock, u32 *fp2) 8933 { 8934 struct drm_crtc *crtc = &intel_crtc->base; 8935 struct drm_device *dev = crtc->dev; 8936 struct drm_i915_private *dev_priv = dev->dev_private; 8937 struct drm_atomic_state *state = crtc_state->base.state; 8938 struct drm_connector *connector; 8939 struct drm_connector_state *connector_state; 8940 struct intel_encoder *encoder; 8941 uint32_t dpll; 8942 int factor, num_connectors = 0, i; 8943 bool is_lvds = false, is_sdvo = false; 8944 8945 for_each_connector_in_state(state, connector, connector_state, i) { 8946 if (connector_state->crtc != crtc_state->base.crtc) 8947 continue; 8948 8949 encoder = to_intel_encoder(connector_state->best_encoder); 8950 8951 switch (encoder->type) { 8952 case INTEL_OUTPUT_LVDS: 8953 is_lvds = true; 8954 break; 8955 case INTEL_OUTPUT_SDVO: 8956 case INTEL_OUTPUT_HDMI: 8957 is_sdvo = true; 8958 break; 8959 default: 8960 break; 8961 } 8962 8963 num_connectors++; 8964 } 8965 8966 /* Enable autotuning of the PLL clock (if permissible) */ 8967 factor = 21; 8968 if (is_lvds) { 8969 if ((intel_panel_use_ssc(dev_priv) && 8970 dev_priv->vbt.lvds_ssc_freq == 100000) || 8971 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8972 factor = 25; 8973 } else if (crtc_state->sdvo_tv_clock) 8974 factor = 20; 8975 8976 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8977 *fp |= FP_CB_TUNE; 8978 8979 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 8980 *fp2 |= FP_CB_TUNE; 8981 8982 dpll = 0; 8983 8984 if (is_lvds) 8985 dpll |= DPLLB_MODE_LVDS; 8986 else 8987 dpll |= DPLLB_MODE_DAC_SERIAL; 8988 8989 dpll |= (crtc_state->pixel_multiplier - 1) 8990 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8991 8992 if (is_sdvo) 8993 dpll |= DPLL_SDVO_HIGH_SPEED; 8994 if (crtc_state->has_dp_encoder) 8995 dpll |= DPLL_SDVO_HIGH_SPEED; 8996 8997 /* compute bitmask from p1 value */ 8998 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8999 /* also FPA1 */ 9000 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9001 9002 switch (crtc_state->dpll.p2) { 9003 case 5: 9004 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9005 break; 9006 case 7: 9007 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9008 break; 9009 case 10: 9010 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9011 break; 9012 case 14: 9013 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9014 break; 9015 } 9016 9017 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 9018 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9019 else 9020 dpll |= PLL_REF_INPUT_DREFCLK; 9021 9022 return dpll | DPLL_VCO_ENABLE; 9023 } 9024 9025 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9026 struct intel_crtc_state *crtc_state) 9027 { 9028 struct drm_device *dev = crtc->base.dev; 9029 intel_clock_t clock, reduced_clock; 9030 u32 dpll = 0, fp = 0, fp2 = 0; 9031 bool ok, has_reduced_clock = false; 9032 bool is_lvds = false; 9033 struct intel_shared_dpll *pll; 9034 9035 memset(&crtc_state->dpll_hw_state, 0, 9036 sizeof(crtc_state->dpll_hw_state)); 9037 9038 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); 9039 9040 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 9041 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 9042 9043 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 9044 &has_reduced_clock, &reduced_clock); 9045 if (!ok && !crtc_state->clock_set) { 9046 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9047 return -EINVAL; 9048 } 9049 /* Compat-code for transition, will disappear. */ 9050 if (!crtc_state->clock_set) { 9051 crtc_state->dpll.n = clock.n; 9052 crtc_state->dpll.m1 = clock.m1; 9053 crtc_state->dpll.m2 = clock.m2; 9054 crtc_state->dpll.p1 = clock.p1; 9055 crtc_state->dpll.p2 = clock.p2; 9056 } 9057 9058 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9059 if (crtc_state->has_pch_encoder) { 9060 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9061 if (has_reduced_clock) 9062 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 9063 9064 dpll = ironlake_compute_dpll(crtc, crtc_state, 9065 &fp, &reduced_clock, 9066 has_reduced_clock ? &fp2 : NULL); 9067 9068 crtc_state->dpll_hw_state.dpll = dpll; 9069 crtc_state->dpll_hw_state.fp0 = fp; 9070 if (has_reduced_clock) 9071 crtc_state->dpll_hw_state.fp1 = fp2; 9072 else 9073 crtc_state->dpll_hw_state.fp1 = fp; 9074 9075 pll = intel_get_shared_dpll(crtc, crtc_state); 9076 if (pll == NULL) { 9077 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 9078 pipe_name(crtc->pipe)); 9079 return -EINVAL; 9080 } 9081 } 9082 9083 if (is_lvds && has_reduced_clock) 9084 crtc->lowfreq_avail = true; 9085 else 9086 crtc->lowfreq_avail = false; 9087 9088 return 0; 9089 } 9090 9091 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9092 struct intel_link_m_n *m_n) 9093 { 9094 struct drm_device *dev = crtc->base.dev; 9095 struct drm_i915_private *dev_priv = dev->dev_private; 9096 enum i915_pipe pipe = crtc->pipe; 9097 9098 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9099 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9100 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9101 & ~TU_SIZE_MASK; 9102 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9103 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9104 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9105 } 9106 9107 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9108 enum transcoder transcoder, 9109 struct intel_link_m_n *m_n, 9110 struct intel_link_m_n *m2_n2) 9111 { 9112 struct drm_device *dev = crtc->base.dev; 9113 struct drm_i915_private *dev_priv = dev->dev_private; 9114 enum i915_pipe pipe = crtc->pipe; 9115 9116 if (INTEL_INFO(dev)->gen >= 5) { 9117 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9118 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9119 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9120 & ~TU_SIZE_MASK; 9121 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9122 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9123 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9124 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 9125 * gen < 8) and if DRRS is supported (to make sure the 9126 * registers are not unnecessarily read). 9127 */ 9128 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 9129 crtc->config->has_drrs) { 9130 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9131 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9132 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9133 & ~TU_SIZE_MASK; 9134 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9135 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9136 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9137 } 9138 } else { 9139 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9140 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9141 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9142 & ~TU_SIZE_MASK; 9143 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9144 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9145 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9146 } 9147 } 9148 9149 void intel_dp_get_m_n(struct intel_crtc *crtc, 9150 struct intel_crtc_state *pipe_config) 9151 { 9152 if (pipe_config->has_pch_encoder) 9153 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9154 else 9155 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9156 &pipe_config->dp_m_n, 9157 &pipe_config->dp_m2_n2); 9158 } 9159 9160 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9161 struct intel_crtc_state *pipe_config) 9162 { 9163 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9164 &pipe_config->fdi_m_n, NULL); 9165 } 9166 9167 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9168 struct intel_crtc_state *pipe_config) 9169 { 9170 struct drm_device *dev = crtc->base.dev; 9171 struct drm_i915_private *dev_priv = dev->dev_private; 9172 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9173 uint32_t ps_ctrl = 0; 9174 int id = -1; 9175 int i; 9176 9177 /* find scaler attached to this pipe */ 9178 for (i = 0; i < crtc->num_scalers; i++) { 9179 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9180 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9181 id = i; 9182 pipe_config->pch_pfit.enabled = true; 9183 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9184 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9185 break; 9186 } 9187 } 9188 9189 scaler_state->scaler_id = id; 9190 if (id >= 0) { 9191 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9192 } else { 9193 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9194 } 9195 } 9196 9197 static void 9198 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9199 struct intel_initial_plane_config *plane_config) 9200 { 9201 struct drm_device *dev = crtc->base.dev; 9202 struct drm_i915_private *dev_priv = dev->dev_private; 9203 u32 val, base, offset, stride_mult, tiling; 9204 int pipe = crtc->pipe; 9205 int fourcc, pixel_format; 9206 unsigned int aligned_height; 9207 struct drm_framebuffer *fb; 9208 struct intel_framebuffer *intel_fb; 9209 9210 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9211 if (!intel_fb) { 9212 DRM_DEBUG_KMS("failed to alloc fb\n"); 9213 return; 9214 } 9215 9216 fb = &intel_fb->base; 9217 9218 val = I915_READ(PLANE_CTL(pipe, 0)); 9219 if (!(val & PLANE_CTL_ENABLE)) 9220 goto error; 9221 9222 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9223 fourcc = skl_format_to_fourcc(pixel_format, 9224 val & PLANE_CTL_ORDER_RGBX, 9225 val & PLANE_CTL_ALPHA_MASK); 9226 fb->pixel_format = fourcc; 9227 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9228 9229 tiling = val & PLANE_CTL_TILED_MASK; 9230 switch (tiling) { 9231 case PLANE_CTL_TILED_LINEAR: 9232 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 9233 break; 9234 case PLANE_CTL_TILED_X: 9235 plane_config->tiling = I915_TILING_X; 9236 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9237 break; 9238 case PLANE_CTL_TILED_Y: 9239 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 9240 break; 9241 case PLANE_CTL_TILED_YF: 9242 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 9243 break; 9244 default: 9245 MISSING_CASE(tiling); 9246 goto error; 9247 } 9248 9249 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 9250 plane_config->base = base; 9251 9252 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 9253 9254 val = I915_READ(PLANE_SIZE(pipe, 0)); 9255 fb->height = ((val >> 16) & 0xfff) + 1; 9256 fb->width = ((val >> 0) & 0x1fff) + 1; 9257 9258 val = I915_READ(PLANE_STRIDE(pipe, 0)); 9259 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], 9260 fb->pixel_format); 9261 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9262 9263 aligned_height = intel_fb_align_height(dev, fb->height, 9264 fb->pixel_format, 9265 fb->modifier[0]); 9266 9267 plane_config->size = fb->pitches[0] * aligned_height; 9268 9269 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9270 pipe_name(pipe), fb->width, fb->height, 9271 fb->bits_per_pixel, base, fb->pitches[0], 9272 plane_config->size); 9273 9274 plane_config->fb = intel_fb; 9275 return; 9276 9277 error: 9278 kfree(fb); 9279 } 9280 9281 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9282 struct intel_crtc_state *pipe_config) 9283 { 9284 struct drm_device *dev = crtc->base.dev; 9285 struct drm_i915_private *dev_priv = dev->dev_private; 9286 uint32_t tmp; 9287 9288 tmp = I915_READ(PF_CTL(crtc->pipe)); 9289 9290 if (tmp & PF_ENABLE) { 9291 pipe_config->pch_pfit.enabled = true; 9292 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9293 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9294 9295 /* We currently do not free assignements of panel fitters on 9296 * ivb/hsw (since we don't use the higher upscaling modes which 9297 * differentiates them) so just WARN about this case for now. */ 9298 if (IS_GEN7(dev)) { 9299 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9300 PF_PIPE_SEL_IVB(crtc->pipe)); 9301 } 9302 } 9303 } 9304 9305 static void 9306 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 9307 struct intel_initial_plane_config *plane_config) 9308 { 9309 struct drm_device *dev = crtc->base.dev; 9310 struct drm_i915_private *dev_priv = dev->dev_private; 9311 u32 val, base, offset; 9312 int pipe = crtc->pipe; 9313 int fourcc, pixel_format; 9314 unsigned int aligned_height; 9315 struct drm_framebuffer *fb; 9316 struct intel_framebuffer *intel_fb; 9317 9318 val = I915_READ(DSPCNTR(pipe)); 9319 if (!(val & DISPLAY_PLANE_ENABLE)) 9320 return; 9321 9322 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9323 if (!intel_fb) { 9324 DRM_DEBUG_KMS("failed to alloc fb\n"); 9325 return; 9326 } 9327 9328 fb = &intel_fb->base; 9329 9330 if (INTEL_INFO(dev)->gen >= 4) { 9331 if (val & DISPPLANE_TILED) { 9332 plane_config->tiling = I915_TILING_X; 9333 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9334 } 9335 } 9336 9337 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9338 fourcc = i9xx_format_to_fourcc(pixel_format); 9339 fb->pixel_format = fourcc; 9340 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9341 9342 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9343 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9344 offset = I915_READ(DSPOFFSET(pipe)); 9345 } else { 9346 if (plane_config->tiling) 9347 offset = I915_READ(DSPTILEOFF(pipe)); 9348 else 9349 offset = I915_READ(DSPLINOFF(pipe)); 9350 } 9351 plane_config->base = base; 9352 9353 val = I915_READ(PIPESRC(pipe)); 9354 fb->width = ((val >> 16) & 0xfff) + 1; 9355 fb->height = ((val >> 0) & 0xfff) + 1; 9356 9357 val = I915_READ(DSPSTRIDE(pipe)); 9358 fb->pitches[0] = val & 0xffffffc0; 9359 9360 aligned_height = intel_fb_align_height(dev, fb->height, 9361 fb->pixel_format, 9362 fb->modifier[0]); 9363 9364 plane_config->size = fb->pitches[0] * aligned_height; 9365 9366 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9367 pipe_name(pipe), fb->width, fb->height, 9368 fb->bits_per_pixel, base, fb->pitches[0], 9369 plane_config->size); 9370 9371 plane_config->fb = intel_fb; 9372 } 9373 9374 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9375 struct intel_crtc_state *pipe_config) 9376 { 9377 struct drm_device *dev = crtc->base.dev; 9378 struct drm_i915_private *dev_priv = dev->dev_private; 9379 enum intel_display_power_domain power_domain; 9380 uint32_t tmp; 9381 bool ret; 9382 9383 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9384 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9385 return false; 9386 9387 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9388 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9389 9390 ret = false; 9391 tmp = I915_READ(PIPECONF(crtc->pipe)); 9392 if (!(tmp & PIPECONF_ENABLE)) 9393 goto out; 9394 9395 switch (tmp & PIPECONF_BPC_MASK) { 9396 case PIPECONF_6BPC: 9397 pipe_config->pipe_bpp = 18; 9398 break; 9399 case PIPECONF_8BPC: 9400 pipe_config->pipe_bpp = 24; 9401 break; 9402 case PIPECONF_10BPC: 9403 pipe_config->pipe_bpp = 30; 9404 break; 9405 case PIPECONF_12BPC: 9406 pipe_config->pipe_bpp = 36; 9407 break; 9408 default: 9409 break; 9410 } 9411 9412 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9413 pipe_config->limited_color_range = true; 9414 9415 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9416 struct intel_shared_dpll *pll; 9417 9418 pipe_config->has_pch_encoder = true; 9419 9420 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9421 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9422 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9423 9424 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9425 9426 if (HAS_PCH_IBX(dev_priv->dev)) { 9427 pipe_config->shared_dpll = 9428 (enum intel_dpll_id) crtc->pipe; 9429 } else { 9430 tmp = I915_READ(PCH_DPLL_SEL); 9431 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9432 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 9433 else 9434 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 9435 } 9436 9437 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9438 9439 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9440 &pipe_config->dpll_hw_state)); 9441 9442 tmp = pipe_config->dpll_hw_state.dpll; 9443 pipe_config->pixel_multiplier = 9444 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9445 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9446 9447 ironlake_pch_clock_get(crtc, pipe_config); 9448 } else { 9449 pipe_config->pixel_multiplier = 1; 9450 } 9451 9452 intel_get_pipe_timings(crtc, pipe_config); 9453 9454 ironlake_get_pfit_config(crtc, pipe_config); 9455 9456 ret = true; 9457 9458 out: 9459 intel_display_power_put(dev_priv, power_domain); 9460 9461 return ret; 9462 } 9463 9464 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9465 { 9466 struct drm_device *dev = dev_priv->dev; 9467 struct intel_crtc *crtc; 9468 9469 for_each_intel_crtc(dev, crtc) 9470 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9471 pipe_name(crtc->pipe)); 9472 9473 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9474 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9475 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9476 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9477 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9478 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9479 "CPU PWM1 enabled\n"); 9480 if (IS_HASWELL(dev)) 9481 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9482 "CPU PWM2 enabled\n"); 9483 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9484 "PCH PWM1 enabled\n"); 9485 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9486 "Utility pin enabled\n"); 9487 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9488 9489 /* 9490 * In theory we can still leave IRQs enabled, as long as only the HPD 9491 * interrupts remain enabled. We used to check for that, but since it's 9492 * gen-specific and since we only disable LCPLL after we fully disable 9493 * the interrupts, the check below should be enough. 9494 */ 9495 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9496 } 9497 9498 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9499 { 9500 struct drm_device *dev = dev_priv->dev; 9501 9502 if (IS_HASWELL(dev)) 9503 return I915_READ(D_COMP_HSW); 9504 else 9505 return I915_READ(D_COMP_BDW); 9506 } 9507 9508 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9509 { 9510 struct drm_device *dev = dev_priv->dev; 9511 9512 if (IS_HASWELL(dev)) { 9513 mutex_lock(&dev_priv->rps.hw_lock); 9514 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9515 val)) 9516 DRM_ERROR("Failed to write to D_COMP\n"); 9517 mutex_unlock(&dev_priv->rps.hw_lock); 9518 } else { 9519 I915_WRITE(D_COMP_BDW, val); 9520 POSTING_READ(D_COMP_BDW); 9521 } 9522 } 9523 9524 /* 9525 * This function implements pieces of two sequences from BSpec: 9526 * - Sequence for display software to disable LCPLL 9527 * - Sequence for display software to allow package C8+ 9528 * The steps implemented here are just the steps that actually touch the LCPLL 9529 * register. Callers should take care of disabling all the display engine 9530 * functions, doing the mode unset, fixing interrupts, etc. 9531 */ 9532 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9533 bool switch_to_fclk, bool allow_power_down) 9534 { 9535 uint32_t val; 9536 9537 assert_can_disable_lcpll(dev_priv); 9538 9539 val = I915_READ(LCPLL_CTL); 9540 9541 if (switch_to_fclk) { 9542 val |= LCPLL_CD_SOURCE_FCLK; 9543 I915_WRITE(LCPLL_CTL, val); 9544 9545 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9546 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9547 DRM_ERROR("Switching to FCLK failed\n"); 9548 9549 val = I915_READ(LCPLL_CTL); 9550 } 9551 9552 val |= LCPLL_PLL_DISABLE; 9553 I915_WRITE(LCPLL_CTL, val); 9554 POSTING_READ(LCPLL_CTL); 9555 9556 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9557 DRM_ERROR("LCPLL still locked\n"); 9558 9559 val = hsw_read_dcomp(dev_priv); 9560 val |= D_COMP_COMP_DISABLE; 9561 hsw_write_dcomp(dev_priv, val); 9562 ndelay(100); 9563 9564 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9565 1)) 9566 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9567 9568 if (allow_power_down) { 9569 val = I915_READ(LCPLL_CTL); 9570 val |= LCPLL_POWER_DOWN_ALLOW; 9571 I915_WRITE(LCPLL_CTL, val); 9572 POSTING_READ(LCPLL_CTL); 9573 } 9574 } 9575 9576 /* 9577 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9578 * source. 9579 */ 9580 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9581 { 9582 uint32_t val; 9583 9584 val = I915_READ(LCPLL_CTL); 9585 9586 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9587 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9588 return; 9589 9590 /* 9591 * Make sure we're not on PC8 state before disabling PC8, otherwise 9592 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9593 */ 9594 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9595 9596 if (val & LCPLL_POWER_DOWN_ALLOW) { 9597 val &= ~LCPLL_POWER_DOWN_ALLOW; 9598 I915_WRITE(LCPLL_CTL, val); 9599 POSTING_READ(LCPLL_CTL); 9600 } 9601 9602 val = hsw_read_dcomp(dev_priv); 9603 val |= D_COMP_COMP_FORCE; 9604 val &= ~D_COMP_COMP_DISABLE; 9605 hsw_write_dcomp(dev_priv, val); 9606 9607 val = I915_READ(LCPLL_CTL); 9608 val &= ~LCPLL_PLL_DISABLE; 9609 I915_WRITE(LCPLL_CTL, val); 9610 9611 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9612 DRM_ERROR("LCPLL not locked yet\n"); 9613 9614 if (val & LCPLL_CD_SOURCE_FCLK) { 9615 val = I915_READ(LCPLL_CTL); 9616 val &= ~LCPLL_CD_SOURCE_FCLK; 9617 I915_WRITE(LCPLL_CTL, val); 9618 9619 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9620 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9621 DRM_ERROR("Switching back to LCPLL failed\n"); 9622 } 9623 9624 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9625 intel_update_cdclk(dev_priv->dev); 9626 } 9627 9628 /* 9629 * Package states C8 and deeper are really deep PC states that can only be 9630 * reached when all the devices on the system allow it, so even if the graphics 9631 * device allows PC8+, it doesn't mean the system will actually get to these 9632 * states. Our driver only allows PC8+ when going into runtime PM. 9633 * 9634 * The requirements for PC8+ are that all the outputs are disabled, the power 9635 * well is disabled and most interrupts are disabled, and these are also 9636 * requirements for runtime PM. When these conditions are met, we manually do 9637 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9638 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9639 * hang the machine. 9640 * 9641 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9642 * the state of some registers, so when we come back from PC8+ we need to 9643 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9644 * need to take care of the registers kept by RC6. Notice that this happens even 9645 * if we don't put the device in PCI D3 state (which is what currently happens 9646 * because of the runtime PM support). 9647 * 9648 * For more, read "Display Sequences for Package C8" on the hardware 9649 * documentation. 9650 */ 9651 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9652 { 9653 struct drm_device *dev = dev_priv->dev; 9654 uint32_t val; 9655 9656 DRM_DEBUG_KMS("Enabling package C8+\n"); 9657 9658 if (HAS_PCH_LPT_LP(dev)) { 9659 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9660 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9661 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9662 } 9663 9664 lpt_disable_clkout_dp(dev); 9665 hsw_disable_lcpll(dev_priv, true, true); 9666 } 9667 9668 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9669 { 9670 struct drm_device *dev = dev_priv->dev; 9671 uint32_t val; 9672 9673 DRM_DEBUG_KMS("Disabling package C8+\n"); 9674 9675 hsw_restore_lcpll(dev_priv); 9676 lpt_init_pch_refclk(dev); 9677 9678 if (HAS_PCH_LPT_LP(dev)) { 9679 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9680 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9681 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9682 } 9683 9684 intel_prepare_ddi(dev); 9685 } 9686 9687 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9688 { 9689 struct drm_device *dev = old_state->dev; 9690 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 9691 9692 broxton_set_cdclk(dev, req_cdclk); 9693 } 9694 9695 /* compute the max rate for new configuration */ 9696 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9697 { 9698 struct intel_crtc *intel_crtc; 9699 struct intel_crtc_state *crtc_state; 9700 int max_pixel_rate = 0; 9701 9702 for_each_intel_crtc(state->dev, intel_crtc) { 9703 int pixel_rate; 9704 9705 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9706 if (IS_ERR(crtc_state)) 9707 return PTR_ERR(crtc_state); 9708 9709 if (!crtc_state->base.enable) 9710 continue; 9711 9712 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 9713 9714 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 9715 if (IS_BROADWELL(state->dev) && crtc_state->ips_enabled) 9716 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 9717 9718 max_pixel_rate = max(max_pixel_rate, pixel_rate); 9719 } 9720 9721 return max_pixel_rate; 9722 } 9723 9724 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9725 { 9726 struct drm_i915_private *dev_priv = dev->dev_private; 9727 uint32_t val, data; 9728 int ret; 9729 9730 if (WARN((I915_READ(LCPLL_CTL) & 9731 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 9732 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 9733 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 9734 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 9735 "trying to change cdclk frequency with cdclk not enabled\n")) 9736 return; 9737 9738 mutex_lock(&dev_priv->rps.hw_lock); 9739 ret = sandybridge_pcode_write(dev_priv, 9740 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 9741 mutex_unlock(&dev_priv->rps.hw_lock); 9742 if (ret) { 9743 DRM_ERROR("failed to inform pcode about cdclk change\n"); 9744 return; 9745 } 9746 9747 val = I915_READ(LCPLL_CTL); 9748 val |= LCPLL_CD_SOURCE_FCLK; 9749 I915_WRITE(LCPLL_CTL, val); 9750 9751 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9752 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9753 DRM_ERROR("Switching to FCLK failed\n"); 9754 9755 val = I915_READ(LCPLL_CTL); 9756 val &= ~LCPLL_CLK_FREQ_MASK; 9757 9758 switch (cdclk) { 9759 case 450000: 9760 val |= LCPLL_CLK_FREQ_450; 9761 data = 0; 9762 break; 9763 case 540000: 9764 val |= LCPLL_CLK_FREQ_54O_BDW; 9765 data = 1; 9766 break; 9767 case 337500: 9768 val |= LCPLL_CLK_FREQ_337_5_BDW; 9769 data = 2; 9770 break; 9771 case 675000: 9772 val |= LCPLL_CLK_FREQ_675_BDW; 9773 data = 3; 9774 break; 9775 default: 9776 WARN(1, "invalid cdclk frequency\n"); 9777 return; 9778 } 9779 9780 I915_WRITE(LCPLL_CTL, val); 9781 9782 val = I915_READ(LCPLL_CTL); 9783 val &= ~LCPLL_CD_SOURCE_FCLK; 9784 I915_WRITE(LCPLL_CTL, val); 9785 9786 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9787 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9788 DRM_ERROR("Switching back to LCPLL failed\n"); 9789 9790 mutex_lock(&dev_priv->rps.hw_lock); 9791 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 9792 mutex_unlock(&dev_priv->rps.hw_lock); 9793 9794 intel_update_cdclk(dev); 9795 9796 WARN(cdclk != dev_priv->cdclk_freq, 9797 "cdclk requested %d kHz but got %d kHz\n", 9798 cdclk, dev_priv->cdclk_freq); 9799 } 9800 9801 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9802 { 9803 struct drm_i915_private *dev_priv = to_i915(state->dev); 9804 int max_pixclk = ilk_max_pixel_rate(state); 9805 int cdclk; 9806 9807 /* 9808 * FIXME should also account for plane ratio 9809 * once 64bpp pixel formats are supported. 9810 */ 9811 if (max_pixclk > 540000) 9812 cdclk = 675000; 9813 else if (max_pixclk > 450000) 9814 cdclk = 540000; 9815 else if (max_pixclk > 337500) 9816 cdclk = 450000; 9817 else 9818 cdclk = 337500; 9819 9820 if (cdclk > dev_priv->max_cdclk_freq) { 9821 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9822 cdclk, dev_priv->max_cdclk_freq); 9823 return -EINVAL; 9824 } 9825 9826 to_intel_atomic_state(state)->cdclk = cdclk; 9827 9828 return 0; 9829 } 9830 9831 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9832 { 9833 struct drm_device *dev = old_state->dev; 9834 unsigned int req_cdclk = to_intel_atomic_state(old_state)->cdclk; 9835 9836 broadwell_set_cdclk(dev, req_cdclk); 9837 } 9838 9839 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9840 struct intel_crtc_state *crtc_state) 9841 { 9842 if (!intel_ddi_pll_select(crtc, crtc_state)) 9843 return -EINVAL; 9844 9845 crtc->lowfreq_avail = false; 9846 9847 return 0; 9848 } 9849 9850 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9851 enum port port, 9852 struct intel_crtc_state *pipe_config) 9853 { 9854 switch (port) { 9855 case PORT_A: 9856 pipe_config->ddi_pll_sel = SKL_DPLL0; 9857 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9858 break; 9859 case PORT_B: 9860 pipe_config->ddi_pll_sel = SKL_DPLL1; 9861 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9862 break; 9863 case PORT_C: 9864 pipe_config->ddi_pll_sel = SKL_DPLL2; 9865 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9866 break; 9867 default: 9868 DRM_ERROR("Incorrect port type\n"); 9869 } 9870 } 9871 9872 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 9873 enum port port, 9874 struct intel_crtc_state *pipe_config) 9875 { 9876 u32 temp, dpll_ctl1; 9877 9878 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 9879 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 9880 9881 switch (pipe_config->ddi_pll_sel) { 9882 case SKL_DPLL0: 9883 /* 9884 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 9885 * of the shared DPLL framework and thus needs to be read out 9886 * separately 9887 */ 9888 dpll_ctl1 = I915_READ(DPLL_CTRL1); 9889 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 9890 break; 9891 case SKL_DPLL1: 9892 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9893 break; 9894 case SKL_DPLL2: 9895 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9896 break; 9897 case SKL_DPLL3: 9898 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9899 break; 9900 } 9901 } 9902 9903 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 9904 enum port port, 9905 struct intel_crtc_state *pipe_config) 9906 { 9907 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 9908 9909 switch (pipe_config->ddi_pll_sel) { 9910 case PORT_CLK_SEL_WRPLL1: 9911 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 9912 break; 9913 case PORT_CLK_SEL_WRPLL2: 9914 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9915 break; 9916 case PORT_CLK_SEL_SPLL: 9917 pipe_config->shared_dpll = DPLL_ID_SPLL; 9918 break; 9919 } 9920 } 9921 9922 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9923 struct intel_crtc_state *pipe_config) 9924 { 9925 struct drm_device *dev = crtc->base.dev; 9926 struct drm_i915_private *dev_priv = dev->dev_private; 9927 struct intel_shared_dpll *pll; 9928 enum port port; 9929 uint32_t tmp; 9930 9931 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9932 9933 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9934 9935 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 9936 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9937 else if (IS_BROXTON(dev)) 9938 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9939 else 9940 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9941 9942 if (pipe_config->shared_dpll >= 0) { 9943 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9944 9945 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9946 &pipe_config->dpll_hw_state)); 9947 } 9948 9949 /* 9950 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9951 * DDI E. So just check whether this pipe is wired to DDI E and whether 9952 * the PCH transcoder is on. 9953 */ 9954 if (INTEL_INFO(dev)->gen < 9 && 9955 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9956 pipe_config->has_pch_encoder = true; 9957 9958 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9959 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9960 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9961 9962 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9963 } 9964 } 9965 9966 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9967 struct intel_crtc_state *pipe_config) 9968 { 9969 struct drm_device *dev = crtc->base.dev; 9970 struct drm_i915_private *dev_priv = dev->dev_private; 9971 enum intel_display_power_domain power_domain; 9972 unsigned long power_domain_mask; 9973 uint32_t tmp; 9974 bool ret; 9975 9976 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9977 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9978 return false; 9979 power_domain_mask = BIT(power_domain); 9980 9981 ret = false; 9982 9983 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9984 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9985 9986 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9987 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9988 enum i915_pipe trans_edp_pipe; 9989 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9990 default: 9991 WARN(1, "unknown pipe linked to edp transcoder\n"); 9992 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9993 case TRANS_DDI_EDP_INPUT_A_ON: 9994 trans_edp_pipe = PIPE_A; 9995 break; 9996 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9997 trans_edp_pipe = PIPE_B; 9998 break; 9999 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10000 trans_edp_pipe = PIPE_C; 10001 break; 10002 } 10003 10004 if (trans_edp_pipe == crtc->pipe) 10005 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10006 } 10007 10008 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10009 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10010 goto out; 10011 power_domain_mask |= BIT(power_domain); 10012 10013 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10014 if (!(tmp & PIPECONF_ENABLE)) 10015 goto out; 10016 10017 haswell_get_ddi_port_state(crtc, pipe_config); 10018 10019 intel_get_pipe_timings(crtc, pipe_config); 10020 10021 if (INTEL_INFO(dev)->gen >= 9) { 10022 skl_init_scalers(dev, crtc, pipe_config); 10023 } 10024 10025 if (INTEL_INFO(dev)->gen >= 9) { 10026 pipe_config->scaler_state.scaler_id = -1; 10027 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10028 } 10029 10030 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10031 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 10032 power_domain_mask |= BIT(power_domain); 10033 if (INTEL_INFO(dev)->gen >= 9) 10034 skylake_get_pfit_config(crtc, pipe_config); 10035 else 10036 ironlake_get_pfit_config(crtc, pipe_config); 10037 } 10038 10039 if (IS_HASWELL(dev)) 10040 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 10041 (I915_READ(IPS_CTL) & IPS_ENABLE); 10042 10043 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 10044 pipe_config->pixel_multiplier = 10045 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10046 } else { 10047 pipe_config->pixel_multiplier = 1; 10048 } 10049 10050 ret = true; 10051 10052 out: 10053 for_each_power_domain(power_domain, power_domain_mask) 10054 intel_display_power_put(dev_priv, power_domain); 10055 10056 return ret; 10057 } 10058 10059 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 10060 { 10061 struct drm_device *dev = crtc->dev; 10062 struct drm_i915_private *dev_priv = dev->dev_private; 10063 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10064 uint32_t cntl = 0, size = 0; 10065 10066 if (on) { 10067 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 10068 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 10069 unsigned int stride = roundup_pow_of_two(width) * 4; 10070 10071 switch (stride) { 10072 default: 10073 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 10074 width, stride); 10075 stride = 256; 10076 /* fallthrough */ 10077 case 256: 10078 case 512: 10079 case 1024: 10080 case 2048: 10081 break; 10082 } 10083 10084 cntl |= CURSOR_ENABLE | 10085 CURSOR_GAMMA_ENABLE | 10086 CURSOR_FORMAT_ARGB | 10087 CURSOR_STRIDE(stride); 10088 10089 size = (height << 12) | width; 10090 } 10091 10092 if (intel_crtc->cursor_cntl != 0 && 10093 (intel_crtc->cursor_base != base || 10094 intel_crtc->cursor_size != size || 10095 intel_crtc->cursor_cntl != cntl)) { 10096 /* On these chipsets we can only modify the base/size/stride 10097 * whilst the cursor is disabled. 10098 */ 10099 I915_WRITE(CURCNTR(PIPE_A), 0); 10100 POSTING_READ(CURCNTR(PIPE_A)); 10101 intel_crtc->cursor_cntl = 0; 10102 } 10103 10104 if (intel_crtc->cursor_base != base) { 10105 I915_WRITE(CURBASE(PIPE_A), base); 10106 intel_crtc->cursor_base = base; 10107 } 10108 10109 if (intel_crtc->cursor_size != size) { 10110 I915_WRITE(CURSIZE, size); 10111 intel_crtc->cursor_size = size; 10112 } 10113 10114 if (intel_crtc->cursor_cntl != cntl) { 10115 I915_WRITE(CURCNTR(PIPE_A), cntl); 10116 POSTING_READ(CURCNTR(PIPE_A)); 10117 intel_crtc->cursor_cntl = cntl; 10118 } 10119 } 10120 10121 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on) 10122 { 10123 struct drm_device *dev = crtc->dev; 10124 struct drm_i915_private *dev_priv = dev->dev_private; 10125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10126 int pipe = intel_crtc->pipe; 10127 uint32_t cntl = 0; 10128 10129 if (on) { 10130 cntl = MCURSOR_GAMMA_ENABLE; 10131 switch (intel_crtc->base.cursor->state->crtc_w) { 10132 case 64: 10133 cntl |= CURSOR_MODE_64_ARGB_AX; 10134 break; 10135 case 128: 10136 cntl |= CURSOR_MODE_128_ARGB_AX; 10137 break; 10138 case 256: 10139 cntl |= CURSOR_MODE_256_ARGB_AX; 10140 break; 10141 default: 10142 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); 10143 return; 10144 } 10145 cntl |= pipe << 28; /* Connect to correct pipe */ 10146 10147 if (HAS_DDI(dev)) 10148 cntl |= CURSOR_PIPE_CSC_ENABLE; 10149 } 10150 10151 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) 10152 cntl |= CURSOR_ROTATE_180; 10153 10154 if (intel_crtc->cursor_cntl != cntl) { 10155 I915_WRITE(CURCNTR(pipe), cntl); 10156 POSTING_READ(CURCNTR(pipe)); 10157 intel_crtc->cursor_cntl = cntl; 10158 } 10159 10160 /* and commit changes on next vblank */ 10161 I915_WRITE(CURBASE(pipe), base); 10162 POSTING_READ(CURBASE(pipe)); 10163 10164 intel_crtc->cursor_base = base; 10165 } 10166 10167 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 10168 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 10169 bool on) 10170 { 10171 struct drm_device *dev = crtc->dev; 10172 struct drm_i915_private *dev_priv = dev->dev_private; 10173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10174 int pipe = intel_crtc->pipe; 10175 struct drm_plane_state *cursor_state = crtc->cursor->state; 10176 int x = cursor_state->crtc_x; 10177 int y = cursor_state->crtc_y; 10178 u32 base = 0, pos = 0; 10179 10180 base = intel_crtc->cursor_addr; 10181 10182 if (x >= intel_crtc->config->pipe_src_w) 10183 on = false; 10184 10185 if (y >= intel_crtc->config->pipe_src_h) 10186 on = false; 10187 10188 if (x < 0) { 10189 if (x + cursor_state->crtc_w <= 0) 10190 on = false; 10191 10192 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10193 x = -x; 10194 } 10195 pos |= x << CURSOR_X_SHIFT; 10196 10197 if (y < 0) { 10198 if (y + cursor_state->crtc_h <= 0) 10199 on = false; 10200 10201 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10202 y = -y; 10203 } 10204 pos |= y << CURSOR_Y_SHIFT; 10205 10206 I915_WRITE(CURPOS(pipe), pos); 10207 10208 /* ILK+ do this automagically */ 10209 if (HAS_GMCH_DISPLAY(dev) && 10210 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 10211 base += (cursor_state->crtc_h * 10212 cursor_state->crtc_w - 1) * 4; 10213 } 10214 10215 if (IS_845G(dev) || IS_I865G(dev)) 10216 i845_update_cursor(crtc, base, on); 10217 else 10218 i9xx_update_cursor(crtc, base, on); 10219 } 10220 10221 static bool cursor_size_ok(struct drm_device *dev, 10222 uint32_t width, uint32_t height) 10223 { 10224 if (width == 0 || height == 0) 10225 return false; 10226 10227 /* 10228 * 845g/865g are special in that they are only limited by 10229 * the width of their cursors, the height is arbitrary up to 10230 * the precision of the register. Everything else requires 10231 * square cursors, limited to a few power-of-two sizes. 10232 */ 10233 if (IS_845G(dev) || IS_I865G(dev)) { 10234 if ((width & 63) != 0) 10235 return false; 10236 10237 if (width > (IS_845G(dev) ? 64 : 512)) 10238 return false; 10239 10240 if (height > 1023) 10241 return false; 10242 } else { 10243 switch (width | height) { 10244 case 256: 10245 case 128: 10246 if (IS_GEN2(dev)) 10247 return false; 10248 case 64: 10249 break; 10250 default: 10251 return false; 10252 } 10253 } 10254 10255 return true; 10256 } 10257 10258 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 10259 u16 *blue, uint32_t start, uint32_t size) 10260 { 10261 int end = (start + size > 256) ? 256 : start + size, i; 10262 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10263 10264 for (i = start; i < end; i++) { 10265 intel_crtc->lut_r[i] = red[i] >> 8; 10266 intel_crtc->lut_g[i] = green[i] >> 8; 10267 intel_crtc->lut_b[i] = blue[i] >> 8; 10268 } 10269 10270 intel_crtc_load_lut(crtc); 10271 } 10272 10273 /* VESA 640x480x72Hz mode to set on the pipe */ 10274 static struct drm_display_mode load_detect_mode = { 10275 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 10276 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 10277 }; 10278 10279 struct drm_framebuffer * 10280 __intel_framebuffer_create(struct drm_device *dev, 10281 struct drm_mode_fb_cmd2 *mode_cmd, 10282 struct drm_i915_gem_object *obj) 10283 { 10284 struct intel_framebuffer *intel_fb; 10285 int ret; 10286 10287 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10288 if (!intel_fb) 10289 return ERR_PTR(-ENOMEM); 10290 10291 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10292 if (ret) 10293 goto err; 10294 10295 return &intel_fb->base; 10296 10297 err: 10298 kfree(intel_fb); 10299 return ERR_PTR(ret); 10300 } 10301 10302 static struct drm_framebuffer * 10303 intel_framebuffer_create(struct drm_device *dev, 10304 struct drm_mode_fb_cmd2 *mode_cmd, 10305 struct drm_i915_gem_object *obj) 10306 { 10307 struct drm_framebuffer *fb; 10308 int ret; 10309 10310 ret = i915_mutex_lock_interruptible(dev); 10311 if (ret) 10312 return ERR_PTR(ret); 10313 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 10314 mutex_unlock(&dev->struct_mutex); 10315 10316 return fb; 10317 } 10318 10319 static u32 10320 intel_framebuffer_pitch_for_width(int width, int bpp) 10321 { 10322 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 10323 return ALIGN(pitch, 64); 10324 } 10325 10326 static u32 10327 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 10328 { 10329 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 10330 return PAGE_ALIGN(pitch * mode->vdisplay); 10331 } 10332 10333 static struct drm_framebuffer * 10334 intel_framebuffer_create_for_mode(struct drm_device *dev, 10335 struct drm_display_mode *mode, 10336 int depth, int bpp) 10337 { 10338 struct drm_framebuffer *fb; 10339 struct drm_i915_gem_object *obj; 10340 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10341 10342 obj = i915_gem_alloc_object(dev, 10343 intel_framebuffer_size_for_mode(mode, bpp)); 10344 if (obj == NULL) 10345 return ERR_PTR(-ENOMEM); 10346 10347 mode_cmd.width = mode->hdisplay; 10348 mode_cmd.height = mode->vdisplay; 10349 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 10350 bpp); 10351 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10352 10353 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 10354 if (IS_ERR(fb)) 10355 drm_gem_object_unreference_unlocked(&obj->base); 10356 10357 return fb; 10358 } 10359 10360 static struct drm_framebuffer * 10361 mode_fits_in_fbdev(struct drm_device *dev, 10362 struct drm_display_mode *mode) 10363 { 10364 #ifdef CONFIG_DRM_FBDEV_EMULATION 10365 struct drm_i915_private *dev_priv = dev->dev_private; 10366 struct drm_i915_gem_object *obj; 10367 struct drm_framebuffer *fb; 10368 10369 if (!dev_priv->fbdev) 10370 return NULL; 10371 10372 if (!dev_priv->fbdev->fb) 10373 return NULL; 10374 10375 obj = dev_priv->fbdev->fb->obj; 10376 BUG_ON(!obj); 10377 10378 fb = &dev_priv->fbdev->fb->base; 10379 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 10380 fb->bits_per_pixel)) 10381 return NULL; 10382 10383 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10384 return NULL; 10385 10386 return fb; 10387 #else 10388 return NULL; 10389 #endif 10390 } 10391 10392 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 10393 struct drm_crtc *crtc, 10394 struct drm_display_mode *mode, 10395 struct drm_framebuffer *fb, 10396 int x, int y) 10397 { 10398 struct drm_plane_state *plane_state; 10399 int hdisplay, vdisplay; 10400 int ret; 10401 10402 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 10403 if (IS_ERR(plane_state)) 10404 return PTR_ERR(plane_state); 10405 10406 if (mode) 10407 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 10408 else 10409 hdisplay = vdisplay = 0; 10410 10411 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 10412 if (ret) 10413 return ret; 10414 drm_atomic_set_fb_for_plane(plane_state, fb); 10415 plane_state->crtc_x = 0; 10416 plane_state->crtc_y = 0; 10417 plane_state->crtc_w = hdisplay; 10418 plane_state->crtc_h = vdisplay; 10419 plane_state->src_x = x << 16; 10420 plane_state->src_y = y << 16; 10421 plane_state->src_w = hdisplay << 16; 10422 plane_state->src_h = vdisplay << 16; 10423 10424 return 0; 10425 } 10426 10427 bool intel_get_load_detect_pipe(struct drm_connector *connector, 10428 struct drm_display_mode *mode, 10429 struct intel_load_detect_pipe *old, 10430 struct drm_modeset_acquire_ctx *ctx) 10431 { 10432 struct intel_crtc *intel_crtc; 10433 struct intel_encoder *intel_encoder = 10434 intel_attached_encoder(connector); 10435 struct drm_crtc *possible_crtc; 10436 struct drm_encoder *encoder = &intel_encoder->base; 10437 struct drm_crtc *crtc = NULL; 10438 struct drm_device *dev = encoder->dev; 10439 struct drm_framebuffer *fb; 10440 struct drm_mode_config *config = &dev->mode_config; 10441 struct drm_atomic_state *state = NULL; 10442 struct drm_connector_state *connector_state; 10443 struct intel_crtc_state *crtc_state; 10444 int ret, i = -1; 10445 10446 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10447 connector->base.id, connector->name, 10448 encoder->base.id, encoder->name); 10449 10450 retry: 10451 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10452 if (ret) 10453 goto fail; 10454 10455 /* 10456 * Algorithm gets a little messy: 10457 * 10458 * - if the connector already has an assigned crtc, use it (but make 10459 * sure it's on first) 10460 * 10461 * - try to find the first unused crtc that can drive this connector, 10462 * and use that if we find one 10463 */ 10464 10465 /* See if we already have a CRTC for this connector */ 10466 if (encoder->crtc) { 10467 crtc = encoder->crtc; 10468 10469 ret = drm_modeset_lock(&crtc->mutex, ctx); 10470 if (ret) 10471 goto fail; 10472 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10473 if (ret) 10474 goto fail; 10475 10476 old->dpms_mode = connector->dpms; 10477 old->load_detect_temp = false; 10478 10479 /* Make sure the crtc and connector are running */ 10480 if (connector->dpms != DRM_MODE_DPMS_ON) 10481 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 10482 10483 return true; 10484 } 10485 10486 /* Find an unused one (if possible) */ 10487 for_each_crtc(dev, possible_crtc) { 10488 i++; 10489 if (!(encoder->possible_crtcs & (1 << i))) 10490 continue; 10491 if (possible_crtc->state->enable) 10492 continue; 10493 10494 crtc = possible_crtc; 10495 break; 10496 } 10497 10498 /* 10499 * If we didn't find an unused CRTC, don't use any. 10500 */ 10501 if (!crtc) { 10502 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10503 goto fail; 10504 } 10505 10506 ret = drm_modeset_lock(&crtc->mutex, ctx); 10507 if (ret) 10508 goto fail; 10509 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10510 if (ret) 10511 goto fail; 10512 10513 intel_crtc = to_intel_crtc(crtc); 10514 old->dpms_mode = connector->dpms; 10515 old->load_detect_temp = true; 10516 old->release_fb = NULL; 10517 10518 state = drm_atomic_state_alloc(dev); 10519 if (!state) 10520 return false; 10521 10522 state->acquire_ctx = ctx; 10523 10524 connector_state = drm_atomic_get_connector_state(state, connector); 10525 if (IS_ERR(connector_state)) { 10526 ret = PTR_ERR(connector_state); 10527 goto fail; 10528 } 10529 10530 connector_state->crtc = crtc; 10531 connector_state->best_encoder = &intel_encoder->base; 10532 10533 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10534 if (IS_ERR(crtc_state)) { 10535 ret = PTR_ERR(crtc_state); 10536 goto fail; 10537 } 10538 10539 crtc_state->base.active = crtc_state->base.enable = true; 10540 10541 if (!mode) 10542 mode = &load_detect_mode; 10543 10544 /* We need a framebuffer large enough to accommodate all accesses 10545 * that the plane may generate whilst we perform load detection. 10546 * We can not rely on the fbcon either being present (we get called 10547 * during its initialisation to detect all boot displays, or it may 10548 * not even exist) or that it is large enough to satisfy the 10549 * requested mode. 10550 */ 10551 fb = mode_fits_in_fbdev(dev, mode); 10552 if (fb == NULL) { 10553 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10554 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10555 old->release_fb = fb; 10556 } else 10557 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10558 if (IS_ERR(fb)) { 10559 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10560 goto fail; 10561 } 10562 10563 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10564 if (ret) 10565 goto fail; 10566 10567 drm_mode_copy(&crtc_state->base.mode, mode); 10568 10569 if (drm_atomic_commit(state)) { 10570 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10571 if (old->release_fb) 10572 old->release_fb->funcs->destroy(old->release_fb); 10573 goto fail; 10574 } 10575 crtc->primary->crtc = crtc; 10576 10577 /* let the connector get through one full cycle before testing */ 10578 intel_wait_for_vblank(dev, intel_crtc->pipe); 10579 return true; 10580 10581 fail: 10582 drm_atomic_state_free(state); 10583 state = NULL; 10584 10585 if (ret == -EDEADLK) { 10586 drm_modeset_backoff(ctx); 10587 goto retry; 10588 } 10589 10590 return false; 10591 } 10592 10593 void intel_release_load_detect_pipe(struct drm_connector *connector, 10594 struct intel_load_detect_pipe *old, 10595 struct drm_modeset_acquire_ctx *ctx) 10596 { 10597 struct drm_device *dev = connector->dev; 10598 struct intel_encoder *intel_encoder = 10599 intel_attached_encoder(connector); 10600 struct drm_encoder *encoder = &intel_encoder->base; 10601 struct drm_crtc *crtc = encoder->crtc; 10602 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10603 struct drm_atomic_state *state; 10604 struct drm_connector_state *connector_state; 10605 struct intel_crtc_state *crtc_state; 10606 int ret; 10607 10608 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10609 connector->base.id, connector->name, 10610 encoder->base.id, encoder->name); 10611 10612 if (old->load_detect_temp) { 10613 state = drm_atomic_state_alloc(dev); 10614 if (!state) 10615 goto fail; 10616 10617 state->acquire_ctx = ctx; 10618 10619 connector_state = drm_atomic_get_connector_state(state, connector); 10620 if (IS_ERR(connector_state)) 10621 goto fail; 10622 10623 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10624 if (IS_ERR(crtc_state)) 10625 goto fail; 10626 10627 connector_state->best_encoder = NULL; 10628 connector_state->crtc = NULL; 10629 10630 crtc_state->base.enable = crtc_state->base.active = false; 10631 10632 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL, 10633 0, 0); 10634 if (ret) 10635 goto fail; 10636 10637 ret = drm_atomic_commit(state); 10638 if (ret) 10639 goto fail; 10640 10641 if (old->release_fb) { 10642 drm_framebuffer_unregister_private(old->release_fb); 10643 drm_framebuffer_unreference(old->release_fb); 10644 } 10645 10646 return; 10647 } 10648 10649 /* Switch crtc and encoder back off if necessary */ 10650 if (old->dpms_mode != DRM_MODE_DPMS_ON) 10651 connector->funcs->dpms(connector, old->dpms_mode); 10652 10653 return; 10654 fail: 10655 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); 10656 drm_atomic_state_free(state); 10657 } 10658 10659 static int i9xx_pll_refclk(struct drm_device *dev, 10660 const struct intel_crtc_state *pipe_config) 10661 { 10662 struct drm_i915_private *dev_priv = dev->dev_private; 10663 u32 dpll = pipe_config->dpll_hw_state.dpll; 10664 10665 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10666 return dev_priv->vbt.lvds_ssc_freq; 10667 else if (HAS_PCH_SPLIT(dev)) 10668 return 120000; 10669 else if (!IS_GEN2(dev)) 10670 return 96000; 10671 else 10672 return 48000; 10673 } 10674 10675 /* Returns the clock of the currently programmed mode of the given pipe. */ 10676 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10677 struct intel_crtc_state *pipe_config) 10678 { 10679 struct drm_device *dev = crtc->base.dev; 10680 struct drm_i915_private *dev_priv = dev->dev_private; 10681 int pipe = pipe_config->cpu_transcoder; 10682 u32 dpll = pipe_config->dpll_hw_state.dpll; 10683 u32 fp; 10684 intel_clock_t clock; 10685 int port_clock; 10686 int refclk = i9xx_pll_refclk(dev, pipe_config); 10687 10688 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10689 fp = pipe_config->dpll_hw_state.fp0; 10690 else 10691 fp = pipe_config->dpll_hw_state.fp1; 10692 10693 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10694 if (IS_PINEVIEW(dev)) { 10695 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10696 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10697 } else { 10698 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10699 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10700 } 10701 10702 if (!IS_GEN2(dev)) { 10703 if (IS_PINEVIEW(dev)) 10704 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10705 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10706 else 10707 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10708 DPLL_FPA01_P1_POST_DIV_SHIFT); 10709 10710 switch (dpll & DPLL_MODE_MASK) { 10711 case DPLLB_MODE_DAC_SERIAL: 10712 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10713 5 : 10; 10714 break; 10715 case DPLLB_MODE_LVDS: 10716 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10717 7 : 14; 10718 break; 10719 default: 10720 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10721 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10722 return; 10723 } 10724 10725 if (IS_PINEVIEW(dev)) 10726 port_clock = pnv_calc_dpll_params(refclk, &clock); 10727 else 10728 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10729 } else { 10730 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10731 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10732 10733 if (is_lvds) { 10734 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10735 DPLL_FPA01_P1_POST_DIV_SHIFT); 10736 10737 if (lvds & LVDS_CLKB_POWER_UP) 10738 clock.p2 = 7; 10739 else 10740 clock.p2 = 14; 10741 } else { 10742 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10743 clock.p1 = 2; 10744 else { 10745 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10746 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10747 } 10748 if (dpll & PLL_P2_DIVIDE_BY_4) 10749 clock.p2 = 4; 10750 else 10751 clock.p2 = 2; 10752 } 10753 10754 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10755 } 10756 10757 /* 10758 * This value includes pixel_multiplier. We will use 10759 * port_clock to compute adjusted_mode.crtc_clock in the 10760 * encoder's get_config() function. 10761 */ 10762 pipe_config->port_clock = port_clock; 10763 } 10764 10765 int intel_dotclock_calculate(int link_freq, 10766 const struct intel_link_m_n *m_n) 10767 { 10768 /* 10769 * The calculation for the data clock is: 10770 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10771 * But we want to avoid losing precison if possible, so: 10772 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10773 * 10774 * and the link clock is simpler: 10775 * link_clock = (m * link_clock) / n 10776 */ 10777 10778 if (!m_n->link_n) 10779 return 0; 10780 10781 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10782 } 10783 10784 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10785 struct intel_crtc_state *pipe_config) 10786 { 10787 struct drm_device *dev = crtc->base.dev; 10788 10789 /* read out port_clock from the DPLL */ 10790 i9xx_crtc_clock_get(crtc, pipe_config); 10791 10792 /* 10793 * This value does not include pixel_multiplier. 10794 * We will check that port_clock and adjusted_mode.crtc_clock 10795 * agree once we know their relationship in the encoder's 10796 * get_config() function. 10797 */ 10798 pipe_config->base.adjusted_mode.crtc_clock = 10799 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 10800 &pipe_config->fdi_m_n); 10801 } 10802 10803 /** Returns the currently programmed mode of the given pipe. */ 10804 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10805 struct drm_crtc *crtc) 10806 { 10807 struct drm_i915_private *dev_priv = dev->dev_private; 10808 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10809 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10810 struct drm_display_mode *mode; 10811 struct intel_crtc_state pipe_config; 10812 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10813 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10814 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10815 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10816 enum i915_pipe pipe = intel_crtc->pipe; 10817 10818 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10819 if (!mode) 10820 return NULL; 10821 10822 /* 10823 * Construct a pipe_config sufficient for getting the clock info 10824 * back out of crtc_clock_get. 10825 * 10826 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10827 * to use a real value here instead. 10828 */ 10829 pipe_config.cpu_transcoder = (enum transcoder) pipe; 10830 pipe_config.pixel_multiplier = 1; 10831 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10832 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10833 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10834 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 10835 10836 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 10837 mode->hdisplay = (htot & 0xffff) + 1; 10838 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10839 mode->hsync_start = (hsync & 0xffff) + 1; 10840 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10841 mode->vdisplay = (vtot & 0xffff) + 1; 10842 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10843 mode->vsync_start = (vsync & 0xffff) + 1; 10844 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10845 10846 drm_mode_set_name(mode); 10847 10848 return mode; 10849 } 10850 10851 void intel_mark_busy(struct drm_device *dev) 10852 { 10853 struct drm_i915_private *dev_priv = dev->dev_private; 10854 10855 if (dev_priv->mm.busy) 10856 return; 10857 10858 intel_runtime_pm_get(dev_priv); 10859 i915_update_gfx_val(dev_priv); 10860 if (INTEL_INFO(dev)->gen >= 6) 10861 gen6_rps_busy(dev_priv); 10862 dev_priv->mm.busy = true; 10863 } 10864 10865 void intel_mark_idle(struct drm_device *dev) 10866 { 10867 struct drm_i915_private *dev_priv = dev->dev_private; 10868 10869 if (!dev_priv->mm.busy) 10870 return; 10871 10872 dev_priv->mm.busy = false; 10873 10874 if (INTEL_INFO(dev)->gen >= 6) 10875 gen6_rps_idle(dev->dev_private); 10876 10877 intel_runtime_pm_put(dev_priv); 10878 } 10879 10880 static void intel_crtc_destroy(struct drm_crtc *crtc) 10881 { 10882 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10883 struct drm_device *dev = crtc->dev; 10884 struct intel_unpin_work *work; 10885 10886 spin_lock_irq(&dev->event_lock); 10887 work = intel_crtc->unpin_work; 10888 intel_crtc->unpin_work = NULL; 10889 spin_unlock_irq(&dev->event_lock); 10890 10891 if (work) { 10892 cancel_work_sync(&work->work); 10893 kfree(work); 10894 } 10895 10896 drm_crtc_cleanup(crtc); 10897 10898 kfree(intel_crtc); 10899 } 10900 10901 static void intel_unpin_work_fn(struct work_struct *__work) 10902 { 10903 struct intel_unpin_work *work = 10904 container_of(__work, struct intel_unpin_work, work); 10905 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10906 struct drm_device *dev = crtc->base.dev; 10907 struct drm_plane *primary = crtc->base.primary; 10908 10909 mutex_lock(&dev->struct_mutex); 10910 intel_unpin_fb_obj(work->old_fb, primary->state); 10911 drm_gem_object_unreference(&work->pending_flip_obj->base); 10912 10913 if (work->flip_queued_req) 10914 i915_gem_request_assign(&work->flip_queued_req, NULL); 10915 mutex_unlock(&dev->struct_mutex); 10916 10917 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); 10918 drm_framebuffer_unreference(work->old_fb); 10919 10920 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 10921 atomic_dec(&crtc->unpin_work_count); 10922 10923 kfree(work); 10924 } 10925 10926 static void do_intel_finish_page_flip(struct drm_device *dev, 10927 struct drm_crtc *crtc) 10928 { 10929 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10930 struct intel_unpin_work *work; 10931 unsigned long flags; 10932 10933 /* Ignore early vblank irqs */ 10934 if (intel_crtc == NULL) 10935 return; 10936 10937 /* 10938 * This is called both by irq handlers and the reset code (to complete 10939 * lost pageflips) so needs the full irqsave spinlocks. 10940 */ 10941 spin_lock_irqsave(&dev->event_lock, flags); 10942 work = intel_crtc->unpin_work; 10943 10944 /* Ensure we don't miss a work->pending update ... */ 10945 smp_rmb(); 10946 10947 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 10948 spin_unlock_irqrestore(&dev->event_lock, flags); 10949 return; 10950 } 10951 10952 page_flip_completed(intel_crtc); 10953 10954 spin_unlock_irqrestore(&dev->event_lock, flags); 10955 } 10956 10957 void intel_finish_page_flip(struct drm_device *dev, int pipe) 10958 { 10959 struct drm_i915_private *dev_priv = dev->dev_private; 10960 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 10961 10962 do_intel_finish_page_flip(dev, crtc); 10963 } 10964 10965 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 10966 { 10967 struct drm_i915_private *dev_priv = dev->dev_private; 10968 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 10969 10970 do_intel_finish_page_flip(dev, crtc); 10971 } 10972 10973 /* Is 'a' after or equal to 'b'? */ 10974 static bool g4x_flip_count_after_eq(u32 a, u32 b) 10975 { 10976 return !((a - b) & 0x80000000); 10977 } 10978 10979 static bool page_flip_finished(struct intel_crtc *crtc) 10980 { 10981 struct drm_device *dev = crtc->base.dev; 10982 struct drm_i915_private *dev_priv = dev->dev_private; 10983 10984 if (i915_reset_in_progress(&dev_priv->gpu_error) || 10985 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 10986 return true; 10987 10988 /* 10989 * The relevant registers doen't exist on pre-ctg. 10990 * As the flip done interrupt doesn't trigger for mmio 10991 * flips on gmch platforms, a flip count check isn't 10992 * really needed there. But since ctg has the registers, 10993 * include it in the check anyway. 10994 */ 10995 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 10996 return true; 10997 10998 /* 10999 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 11000 * used the same base address. In that case the mmio flip might 11001 * have completed, but the CS hasn't even executed the flip yet. 11002 * 11003 * A flip count check isn't enough as the CS might have updated 11004 * the base address just after start of vblank, but before we 11005 * managed to process the interrupt. This means we'd complete the 11006 * CS flip too soon. 11007 * 11008 * Combining both checks should get us a good enough result. It may 11009 * still happen that the CS flip has been executed, but has not 11010 * yet actually completed. But in case the base address is the same 11011 * anyway, we don't really care. 11012 */ 11013 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11014 crtc->unpin_work->gtt_offset && 11015 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11016 crtc->unpin_work->flip_count); 11017 } 11018 11019 void intel_prepare_page_flip(struct drm_device *dev, int plane) 11020 { 11021 struct drm_i915_private *dev_priv = dev->dev_private; 11022 struct intel_crtc *intel_crtc = 11023 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 11024 unsigned long flags; 11025 11026 11027 /* 11028 * This is called both by irq handlers and the reset code (to complete 11029 * lost pageflips) so needs the full irqsave spinlocks. 11030 * 11031 * NB: An MMIO update of the plane base pointer will also 11032 * generate a page-flip completion irq, i.e. every modeset 11033 * is also accompanied by a spurious intel_prepare_page_flip(). 11034 */ 11035 spin_lock_irqsave(&dev->event_lock, flags); 11036 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 11037 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 11038 spin_unlock_irqrestore(&dev->event_lock, flags); 11039 } 11040 11041 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 11042 { 11043 /* Ensure that the work item is consistent when activating it ... */ 11044 smp_wmb(); 11045 atomic_set(&work->pending, INTEL_FLIP_PENDING); 11046 /* and that it is marked active as soon as the irq could fire. */ 11047 smp_wmb(); 11048 } 11049 11050 static int intel_gen2_queue_flip(struct drm_device *dev, 11051 struct drm_crtc *crtc, 11052 struct drm_framebuffer *fb, 11053 struct drm_i915_gem_object *obj, 11054 struct drm_i915_gem_request *req, 11055 uint32_t flags) 11056 { 11057 struct intel_engine_cs *ring = req->ring; 11058 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11059 u32 flip_mask; 11060 int ret; 11061 11062 ret = intel_ring_begin(req, 6); 11063 if (ret) 11064 return ret; 11065 11066 /* Can't queue multiple flips, so wait for the previous 11067 * one to finish before executing the next. 11068 */ 11069 if (intel_crtc->plane) 11070 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11071 else 11072 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11073 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11074 intel_ring_emit(ring, MI_NOOP); 11075 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11076 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11077 intel_ring_emit(ring, fb->pitches[0]); 11078 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11079 intel_ring_emit(ring, 0); /* aux display base address, unused */ 11080 11081 intel_mark_page_flip_active(intel_crtc->unpin_work); 11082 return 0; 11083 } 11084 11085 static int intel_gen3_queue_flip(struct drm_device *dev, 11086 struct drm_crtc *crtc, 11087 struct drm_framebuffer *fb, 11088 struct drm_i915_gem_object *obj, 11089 struct drm_i915_gem_request *req, 11090 uint32_t flags) 11091 { 11092 struct intel_engine_cs *ring = req->ring; 11093 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11094 u32 flip_mask; 11095 int ret; 11096 11097 ret = intel_ring_begin(req, 6); 11098 if (ret) 11099 return ret; 11100 11101 if (intel_crtc->plane) 11102 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11103 else 11104 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11105 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11106 intel_ring_emit(ring, MI_NOOP); 11107 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 11108 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11109 intel_ring_emit(ring, fb->pitches[0]); 11110 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11111 intel_ring_emit(ring, MI_NOOP); 11112 11113 intel_mark_page_flip_active(intel_crtc->unpin_work); 11114 return 0; 11115 } 11116 11117 static int intel_gen4_queue_flip(struct drm_device *dev, 11118 struct drm_crtc *crtc, 11119 struct drm_framebuffer *fb, 11120 struct drm_i915_gem_object *obj, 11121 struct drm_i915_gem_request *req, 11122 uint32_t flags) 11123 { 11124 struct intel_engine_cs *ring = req->ring; 11125 struct drm_i915_private *dev_priv = dev->dev_private; 11126 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11127 uint32_t pf, pipesrc; 11128 int ret; 11129 11130 ret = intel_ring_begin(req, 4); 11131 if (ret) 11132 return ret; 11133 11134 /* i965+ uses the linear or tiled offsets from the 11135 * Display Registers (which do not change across a page-flip) 11136 * so we need only reprogram the base address. 11137 */ 11138 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11139 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11140 intel_ring_emit(ring, fb->pitches[0]); 11141 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 11142 obj->tiling_mode); 11143 11144 /* XXX Enabling the panel-fitter across page-flip is so far 11145 * untested on non-native modes, so ignore it for now. 11146 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 11147 */ 11148 pf = 0; 11149 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11150 intel_ring_emit(ring, pf | pipesrc); 11151 11152 intel_mark_page_flip_active(intel_crtc->unpin_work); 11153 return 0; 11154 } 11155 11156 static int intel_gen6_queue_flip(struct drm_device *dev, 11157 struct drm_crtc *crtc, 11158 struct drm_framebuffer *fb, 11159 struct drm_i915_gem_object *obj, 11160 struct drm_i915_gem_request *req, 11161 uint32_t flags) 11162 { 11163 struct intel_engine_cs *ring = req->ring; 11164 struct drm_i915_private *dev_priv = dev->dev_private; 11165 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11166 uint32_t pf, pipesrc; 11167 int ret; 11168 11169 ret = intel_ring_begin(req, 4); 11170 if (ret) 11171 return ret; 11172 11173 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11174 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11175 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 11176 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11177 11178 /* Contrary to the suggestions in the documentation, 11179 * "Enable Panel Fitter" does not seem to be required when page 11180 * flipping with a non-native mode, and worse causes a normal 11181 * modeset to fail. 11182 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 11183 */ 11184 pf = 0; 11185 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11186 intel_ring_emit(ring, pf | pipesrc); 11187 11188 intel_mark_page_flip_active(intel_crtc->unpin_work); 11189 return 0; 11190 } 11191 11192 static int intel_gen7_queue_flip(struct drm_device *dev, 11193 struct drm_crtc *crtc, 11194 struct drm_framebuffer *fb, 11195 struct drm_i915_gem_object *obj, 11196 struct drm_i915_gem_request *req, 11197 uint32_t flags) 11198 { 11199 struct intel_engine_cs *ring = req->ring; 11200 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11201 uint32_t plane_bit = 0; 11202 int len, ret; 11203 11204 switch (intel_crtc->plane) { 11205 case PLANE_A: 11206 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 11207 break; 11208 case PLANE_B: 11209 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 11210 break; 11211 case PLANE_C: 11212 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 11213 break; 11214 default: 11215 WARN_ONCE(1, "unknown plane in flip command\n"); 11216 return -ENODEV; 11217 } 11218 11219 len = 4; 11220 if (ring->id == RCS) { 11221 len += 6; 11222 /* 11223 * On Gen 8, SRM is now taking an extra dword to accommodate 11224 * 48bits addresses, and we need a NOOP for the batch size to 11225 * stay even. 11226 */ 11227 if (IS_GEN8(dev)) 11228 len += 2; 11229 } 11230 11231 /* 11232 * BSpec MI_DISPLAY_FLIP for IVB: 11233 * "The full packet must be contained within the same cache line." 11234 * 11235 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 11236 * cacheline, if we ever start emitting more commands before 11237 * the MI_DISPLAY_FLIP we may need to first emit everything else, 11238 * then do the cacheline alignment, and finally emit the 11239 * MI_DISPLAY_FLIP. 11240 */ 11241 ret = intel_ring_cacheline_align(req); 11242 if (ret) 11243 return ret; 11244 11245 ret = intel_ring_begin(req, len); 11246 if (ret) 11247 return ret; 11248 11249 /* Unmask the flip-done completion message. Note that the bspec says that 11250 * we should do this for both the BCS and RCS, and that we must not unmask 11251 * more than one flip event at any time (or ensure that one flip message 11252 * can be sent by waiting for flip-done prior to queueing new flips). 11253 * Experimentation says that BCS works despite DERRMR masking all 11254 * flip-done completion events and that unmasking all planes at once 11255 * for the RCS also doesn't appear to drop events. Setting the DERRMR 11256 * to zero does lead to lockups within MI_DISPLAY_FLIP. 11257 */ 11258 if (ring->id == RCS) { 11259 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11260 intel_ring_emit_reg(ring, DERRMR); 11261 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11262 DERRMR_PIPEB_PRI_FLIP_DONE | 11263 DERRMR_PIPEC_PRI_FLIP_DONE)); 11264 if (IS_GEN8(dev)) 11265 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | 11266 MI_SRM_LRM_GLOBAL_GTT); 11267 else 11268 intel_ring_emit(ring, MI_STORE_REGISTER_MEM | 11269 MI_SRM_LRM_GLOBAL_GTT); 11270 intel_ring_emit_reg(ring, DERRMR); 11271 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11272 if (IS_GEN8(dev)) { 11273 intel_ring_emit(ring, 0); 11274 intel_ring_emit(ring, MI_NOOP); 11275 } 11276 } 11277 11278 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 11279 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 11280 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11281 intel_ring_emit(ring, (MI_NOOP)); 11282 11283 intel_mark_page_flip_active(intel_crtc->unpin_work); 11284 return 0; 11285 } 11286 11287 static bool use_mmio_flip(struct intel_engine_cs *ring, 11288 struct drm_i915_gem_object *obj) 11289 { 11290 /* 11291 * This is not being used for older platforms, because 11292 * non-availability of flip done interrupt forces us to use 11293 * CS flips. Older platforms derive flip done using some clever 11294 * tricks involving the flip_pending status bits and vblank irqs. 11295 * So using MMIO flips there would disrupt this mechanism. 11296 */ 11297 11298 if (ring == NULL) 11299 return true; 11300 11301 if (INTEL_INFO(ring->dev)->gen < 5) 11302 return false; 11303 11304 if (i915.use_mmio_flip < 0) 11305 return false; 11306 else if (i915.use_mmio_flip > 0) 11307 return true; 11308 else if (i915.enable_execlists) 11309 return true; 11310 #if 0 11311 else if (obj->base.dma_buf && 11312 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, 11313 false)) 11314 return true; 11315 #endif 11316 else 11317 return ring != i915_gem_request_get_ring(obj->last_write_req); 11318 } 11319 11320 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11321 unsigned int rotation, 11322 struct intel_unpin_work *work) 11323 { 11324 struct drm_device *dev = intel_crtc->base.dev; 11325 struct drm_i915_private *dev_priv = dev->dev_private; 11326 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11327 const enum i915_pipe pipe = intel_crtc->pipe; 11328 u32 ctl, stride, tile_height; 11329 11330 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11331 ctl &= ~PLANE_CTL_TILED_MASK; 11332 switch (fb->modifier[0]) { 11333 case DRM_FORMAT_MOD_NONE: 11334 break; 11335 case I915_FORMAT_MOD_X_TILED: 11336 ctl |= PLANE_CTL_TILED_X; 11337 break; 11338 case I915_FORMAT_MOD_Y_TILED: 11339 ctl |= PLANE_CTL_TILED_Y; 11340 break; 11341 case I915_FORMAT_MOD_Yf_TILED: 11342 ctl |= PLANE_CTL_TILED_YF; 11343 break; 11344 default: 11345 MISSING_CASE(fb->modifier[0]); 11346 } 11347 11348 /* 11349 * The stride is either expressed as a multiple of 64 bytes chunks for 11350 * linear buffers or in number of tiles for tiled buffers. 11351 */ 11352 if (intel_rotation_90_or_270(rotation)) { 11353 /* stride = Surface height in tiles */ 11354 tile_height = intel_tile_height(dev, fb->pixel_format, 11355 fb->modifier[0], 0); 11356 stride = DIV_ROUND_UP(fb->height, tile_height); 11357 } else { 11358 stride = fb->pitches[0] / 11359 intel_fb_stride_alignment(dev, fb->modifier[0], 11360 fb->pixel_format); 11361 } 11362 11363 /* 11364 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11365 * PLANE_SURF updates, the update is then guaranteed to be atomic. 11366 */ 11367 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11368 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11369 11370 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 11371 POSTING_READ(PLANE_SURF(pipe, 0)); 11372 } 11373 11374 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11375 struct intel_unpin_work *work) 11376 { 11377 struct drm_device *dev = intel_crtc->base.dev; 11378 struct drm_i915_private *dev_priv = dev->dev_private; 11379 struct intel_framebuffer *intel_fb = 11380 to_intel_framebuffer(intel_crtc->base.primary->fb); 11381 struct drm_i915_gem_object *obj = intel_fb->obj; 11382 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 11383 u32 dspcntr; 11384 11385 dspcntr = I915_READ(reg); 11386 11387 if (obj->tiling_mode != I915_TILING_NONE) 11388 dspcntr |= DISPPLANE_TILED; 11389 else 11390 dspcntr &= ~DISPPLANE_TILED; 11391 11392 I915_WRITE(reg, dspcntr); 11393 11394 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 11395 POSTING_READ(DSPSURF(intel_crtc->plane)); 11396 } 11397 11398 /* 11399 * XXX: This is the temporary way to update the plane registers until we get 11400 * around to using the usual plane update functions for MMIO flips 11401 */ 11402 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) 11403 { 11404 struct intel_crtc *crtc = mmio_flip->crtc; 11405 struct intel_unpin_work *work; 11406 11407 spin_lock_irq(&crtc->base.dev->event_lock); 11408 work = crtc->unpin_work; 11409 spin_unlock_irq(&crtc->base.dev->event_lock); 11410 if (work == NULL) 11411 return; 11412 11413 intel_mark_page_flip_active(work); 11414 11415 intel_pipe_update_start(crtc); 11416 11417 if (INTEL_INFO(mmio_flip->i915)->gen >= 9) 11418 skl_do_mmio_flip(crtc, mmio_flip->rotation, work); 11419 else 11420 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11421 ilk_do_mmio_flip(crtc, work); 11422 11423 intel_pipe_update_end(crtc); 11424 } 11425 11426 static void intel_mmio_flip_work_func(struct work_struct *work) 11427 { 11428 struct intel_mmio_flip *mmio_flip = 11429 container_of(work, struct intel_mmio_flip, work); 11430 #if 0 11431 struct intel_framebuffer *intel_fb = 11432 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11433 struct drm_i915_gem_object *obj = intel_fb->obj; 11434 #endif 11435 11436 if (mmio_flip->req) { 11437 WARN_ON(__i915_wait_request(mmio_flip->req, 11438 mmio_flip->crtc->reset_counter, 11439 false, NULL, 11440 &mmio_flip->i915->rps.mmioflips)); 11441 i915_gem_request_unreference__unlocked(mmio_flip->req); 11442 } 11443 11444 /* For framebuffer backed by dmabuf, wait for fence */ 11445 #if 0 11446 if (obj->base.dma_buf) 11447 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 11448 false, false, 11449 MAX_SCHEDULE_TIMEOUT) < 0); 11450 #endif 11451 11452 intel_do_mmio_flip(mmio_flip); 11453 kfree(mmio_flip); 11454 } 11455 11456 static int intel_queue_mmio_flip(struct drm_device *dev, 11457 struct drm_crtc *crtc, 11458 struct drm_i915_gem_object *obj) 11459 { 11460 struct intel_mmio_flip *mmio_flip; 11461 11462 mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK); 11463 if (mmio_flip == NULL) 11464 return -ENOMEM; 11465 11466 mmio_flip->i915 = to_i915(dev); 11467 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11468 mmio_flip->crtc = to_intel_crtc(crtc); 11469 mmio_flip->rotation = crtc->primary->state->rotation; 11470 11471 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11472 schedule_work(&mmio_flip->work); 11473 11474 return 0; 11475 } 11476 11477 static int intel_default_queue_flip(struct drm_device *dev, 11478 struct drm_crtc *crtc, 11479 struct drm_framebuffer *fb, 11480 struct drm_i915_gem_object *obj, 11481 struct drm_i915_gem_request *req, 11482 uint32_t flags) 11483 { 11484 return -ENODEV; 11485 } 11486 11487 static bool __intel_pageflip_stall_check(struct drm_device *dev, 11488 struct drm_crtc *crtc) 11489 { 11490 struct drm_i915_private *dev_priv = dev->dev_private; 11491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11492 struct intel_unpin_work *work = intel_crtc->unpin_work; 11493 u32 addr; 11494 11495 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11496 return true; 11497 11498 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) 11499 return false; 11500 11501 if (!work->enable_stall_check) 11502 return false; 11503 11504 if (work->flip_ready_vblank == 0) { 11505 if (work->flip_queued_req && 11506 !i915_gem_request_completed(work->flip_queued_req, true)) 11507 return false; 11508 11509 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11510 } 11511 11512 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11513 return false; 11514 11515 /* Potential stall - if we see that the flip has happened, 11516 * assume a missed interrupt. */ 11517 if (INTEL_INFO(dev)->gen >= 4) 11518 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11519 else 11520 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11521 11522 /* There is a potential issue here with a false positive after a flip 11523 * to the same address. We could address this by checking for a 11524 * non-incrementing frame counter. 11525 */ 11526 return addr == work->gtt_offset; 11527 } 11528 11529 void intel_check_page_flip(struct drm_device *dev, int pipe) 11530 { 11531 struct drm_i915_private *dev_priv = dev->dev_private; 11532 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11533 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11534 struct intel_unpin_work *work; 11535 11536 // WARN_ON(!in_interrupt()); 11537 11538 if (crtc == NULL) 11539 return; 11540 11541 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11542 work = intel_crtc->unpin_work; 11543 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11544 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11545 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11546 page_flip_completed(intel_crtc); 11547 work = NULL; 11548 } 11549 if (work != NULL && 11550 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11551 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11552 lockmgr(&dev->event_lock, LK_RELEASE); 11553 } 11554 11555 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11556 struct drm_framebuffer *fb, 11557 struct drm_pending_vblank_event *event, 11558 uint32_t page_flip_flags) 11559 { 11560 struct drm_device *dev = crtc->dev; 11561 struct drm_i915_private *dev_priv = dev->dev_private; 11562 struct drm_framebuffer *old_fb = crtc->primary->fb; 11563 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11565 struct drm_plane *primary = crtc->primary; 11566 enum i915_pipe pipe = intel_crtc->pipe; 11567 struct intel_unpin_work *work; 11568 struct intel_engine_cs *ring; 11569 bool mmio_flip; 11570 struct drm_i915_gem_request *request = NULL; 11571 int ret; 11572 11573 /* 11574 * drm_mode_page_flip_ioctl() should already catch this, but double 11575 * check to be safe. In the future we may enable pageflipping from 11576 * a disabled primary plane. 11577 */ 11578 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11579 return -EBUSY; 11580 11581 /* Can't change pixel format via MI display flips. */ 11582 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11583 return -EINVAL; 11584 11585 /* 11586 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11587 * Note that pitch changes could also affect these register. 11588 */ 11589 if (INTEL_INFO(dev)->gen > 3 && 11590 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11591 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11592 return -EINVAL; 11593 11594 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11595 goto out_hang; 11596 11597 work = kzalloc(sizeof(*work), GFP_KERNEL); 11598 if (work == NULL) 11599 return -ENOMEM; 11600 11601 work->event = event; 11602 work->crtc = crtc; 11603 work->old_fb = old_fb; 11604 INIT_WORK(&work->work, intel_unpin_work_fn); 11605 11606 ret = drm_crtc_vblank_get(crtc); 11607 if (ret) 11608 goto free_work; 11609 11610 /* We borrow the event spin lock for protecting unpin_work */ 11611 spin_lock_irq(&dev->event_lock); 11612 if (intel_crtc->unpin_work) { 11613 /* Before declaring the flip queue wedged, check if 11614 * the hardware completed the operation behind our backs. 11615 */ 11616 if (__intel_pageflip_stall_check(dev, crtc)) { 11617 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11618 page_flip_completed(intel_crtc); 11619 } else { 11620 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11621 spin_unlock_irq(&dev->event_lock); 11622 11623 drm_crtc_vblank_put(crtc); 11624 kfree(work); 11625 return -EBUSY; 11626 } 11627 } 11628 intel_crtc->unpin_work = work; 11629 spin_unlock_irq(&dev->event_lock); 11630 11631 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11632 flush_workqueue(dev_priv->wq); 11633 11634 /* Reference the objects for the scheduled work. */ 11635 drm_framebuffer_reference(work->old_fb); 11636 drm_gem_object_reference(&obj->base); 11637 11638 crtc->primary->fb = fb; 11639 update_state_fb(crtc->primary); 11640 11641 work->pending_flip_obj = obj; 11642 11643 ret = i915_mutex_lock_interruptible(dev); 11644 if (ret) 11645 goto cleanup; 11646 11647 atomic_inc(&intel_crtc->unpin_work_count); 11648 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 11649 11650 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11651 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 11652 11653 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 11654 ring = &dev_priv->ring[BCS]; 11655 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 11656 /* vlv: DISPLAY_FLIP fails to change tiling */ 11657 ring = NULL; 11658 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11659 ring = &dev_priv->ring[BCS]; 11660 } else if (INTEL_INFO(dev)->gen >= 7) { 11661 ring = i915_gem_request_get_ring(obj->last_write_req); 11662 if (ring == NULL || ring->id != RCS) 11663 ring = &dev_priv->ring[BCS]; 11664 } else { 11665 ring = &dev_priv->ring[RCS]; 11666 } 11667 11668 mmio_flip = use_mmio_flip(ring, obj); 11669 11670 /* When using CS flips, we want to emit semaphores between rings. 11671 * However, when using mmio flips we will create a task to do the 11672 * synchronisation, so all we want here is to pin the framebuffer 11673 * into the display plane and skip any waits. 11674 */ 11675 if (!mmio_flip) { 11676 ret = i915_gem_object_sync(obj, ring, &request); 11677 if (ret) 11678 goto cleanup_pending; 11679 } 11680 11681 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11682 crtc->primary->state); 11683 if (ret) 11684 goto cleanup_pending; 11685 11686 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11687 obj, 0); 11688 work->gtt_offset += intel_crtc->dspaddr_offset; 11689 11690 if (mmio_flip) { 11691 ret = intel_queue_mmio_flip(dev, crtc, obj); 11692 if (ret) 11693 goto cleanup_unpin; 11694 11695 i915_gem_request_assign(&work->flip_queued_req, 11696 obj->last_write_req); 11697 } else { 11698 if (!request) { 11699 ret = i915_gem_request_alloc(ring, ring->default_context, &request); 11700 if (ret) 11701 goto cleanup_unpin; 11702 } 11703 11704 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11705 page_flip_flags); 11706 if (ret) 11707 goto cleanup_unpin; 11708 11709 i915_gem_request_assign(&work->flip_queued_req, request); 11710 } 11711 11712 if (request) 11713 i915_add_request_no_flush(request); 11714 11715 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11716 work->enable_stall_check = true; 11717 11718 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 11719 to_intel_plane(primary)->frontbuffer_bit); 11720 mutex_unlock(&dev->struct_mutex); 11721 11722 intel_fbc_deactivate(intel_crtc); 11723 intel_frontbuffer_flip_prepare(dev, 11724 to_intel_plane(primary)->frontbuffer_bit); 11725 11726 trace_i915_flip_request(intel_crtc->plane, obj); 11727 11728 return 0; 11729 11730 cleanup_unpin: 11731 intel_unpin_fb_obj(fb, crtc->primary->state); 11732 cleanup_pending: 11733 if (request) 11734 i915_gem_request_cancel(request); 11735 atomic_dec(&intel_crtc->unpin_work_count); 11736 mutex_unlock(&dev->struct_mutex); 11737 cleanup: 11738 crtc->primary->fb = old_fb; 11739 update_state_fb(crtc->primary); 11740 11741 drm_gem_object_unreference_unlocked(&obj->base); 11742 drm_framebuffer_unreference(work->old_fb); 11743 11744 spin_lock_irq(&dev->event_lock); 11745 intel_crtc->unpin_work = NULL; 11746 spin_unlock_irq(&dev->event_lock); 11747 11748 drm_crtc_vblank_put(crtc); 11749 free_work: 11750 kfree(work); 11751 11752 if (ret == -EIO) { 11753 struct drm_atomic_state *state; 11754 struct drm_plane_state *plane_state; 11755 11756 out_hang: 11757 state = drm_atomic_state_alloc(dev); 11758 if (!state) 11759 return -ENOMEM; 11760 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 11761 11762 retry: 11763 plane_state = drm_atomic_get_plane_state(state, primary); 11764 ret = PTR_ERR_OR_ZERO(plane_state); 11765 if (!ret) { 11766 drm_atomic_set_fb_for_plane(plane_state, fb); 11767 11768 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 11769 if (!ret) 11770 ret = drm_atomic_commit(state); 11771 } 11772 11773 if (ret == -EDEADLK) { 11774 drm_modeset_backoff(state->acquire_ctx); 11775 drm_atomic_state_clear(state); 11776 goto retry; 11777 } 11778 11779 if (ret) 11780 drm_atomic_state_free(state); 11781 11782 if (ret == 0 && event) { 11783 spin_lock_irq(&dev->event_lock); 11784 drm_send_vblank_event(dev, pipe, event); 11785 spin_unlock_irq(&dev->event_lock); 11786 } 11787 } 11788 return ret; 11789 } 11790 11791 11792 /** 11793 * intel_wm_need_update - Check whether watermarks need updating 11794 * @plane: drm plane 11795 * @state: new plane state 11796 * 11797 * Check current plane state versus the new one to determine whether 11798 * watermarks need to be recalculated. 11799 * 11800 * Returns true or false. 11801 */ 11802 static bool intel_wm_need_update(struct drm_plane *plane, 11803 struct drm_plane_state *state) 11804 { 11805 struct intel_plane_state *new = to_intel_plane_state(state); 11806 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 11807 11808 /* Update watermarks on tiling or size changes. */ 11809 if (new->visible != cur->visible) 11810 return true; 11811 11812 if (!cur->base.fb || !new->base.fb) 11813 return false; 11814 11815 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || 11816 cur->base.rotation != new->base.rotation || 11817 drm_rect_width(&new->src) != drm_rect_width(&cur->src) || 11818 drm_rect_height(&new->src) != drm_rect_height(&cur->src) || 11819 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || 11820 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) 11821 return true; 11822 11823 return false; 11824 } 11825 11826 static bool needs_scaling(struct intel_plane_state *state) 11827 { 11828 int src_w = drm_rect_width(&state->src) >> 16; 11829 int src_h = drm_rect_height(&state->src) >> 16; 11830 int dst_w = drm_rect_width(&state->dst); 11831 int dst_h = drm_rect_height(&state->dst); 11832 11833 return (src_w != dst_w || src_h != dst_h); 11834 } 11835 11836 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11837 struct drm_plane_state *plane_state) 11838 { 11839 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 11840 struct drm_crtc *crtc = crtc_state->crtc; 11841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11842 struct drm_plane *plane = plane_state->plane; 11843 struct drm_device *dev = crtc->dev; 11844 struct drm_i915_private *dev_priv = dev->dev_private; 11845 struct intel_plane_state *old_plane_state = 11846 to_intel_plane_state(plane->state); 11847 int idx = intel_crtc->base.base.id, ret; 11848 int i = drm_plane_index(plane); 11849 bool mode_changed = needs_modeset(crtc_state); 11850 bool was_crtc_enabled = crtc->state->active; 11851 bool is_crtc_enabled = crtc_state->active; 11852 bool turn_off, turn_on, visible, was_visible; 11853 struct drm_framebuffer *fb = plane_state->fb; 11854 11855 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11856 plane->type != DRM_PLANE_TYPE_CURSOR) { 11857 ret = skl_update_scaler_plane( 11858 to_intel_crtc_state(crtc_state), 11859 to_intel_plane_state(plane_state)); 11860 if (ret) 11861 return ret; 11862 } 11863 11864 was_visible = old_plane_state->visible; 11865 visible = to_intel_plane_state(plane_state)->visible; 11866 11867 if (!was_crtc_enabled && WARN_ON(was_visible)) 11868 was_visible = false; 11869 11870 if (!is_crtc_enabled && WARN_ON(visible)) 11871 visible = false; 11872 11873 if (!was_visible && !visible) 11874 return 0; 11875 11876 turn_off = was_visible && (!visible || mode_changed); 11877 turn_on = visible && (!was_visible || mode_changed); 11878 11879 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11880 plane->base.id, fb ? fb->base.id : -1); 11881 11882 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11883 plane->base.id, was_visible, visible, 11884 turn_off, turn_on, mode_changed); 11885 11886 if (turn_on || turn_off) { 11887 pipe_config->wm_changed = true; 11888 11889 /* must disable cxsr around plane enable/disable */ 11890 if (plane->type != DRM_PLANE_TYPE_CURSOR) { 11891 if (is_crtc_enabled) 11892 intel_crtc->atomic.wait_vblank = true; 11893 pipe_config->disable_cxsr = true; 11894 } 11895 } else if (intel_wm_need_update(plane, plane_state)) { 11896 pipe_config->wm_changed = true; 11897 } 11898 11899 if (visible || was_visible) 11900 intel_crtc->atomic.fb_bits |= 11901 to_intel_plane(plane)->frontbuffer_bit; 11902 11903 switch (plane->type) { 11904 case DRM_PLANE_TYPE_PRIMARY: 11905 intel_crtc->atomic.pre_disable_primary = turn_off; 11906 intel_crtc->atomic.post_enable_primary = turn_on; 11907 11908 if (turn_off) { 11909 /* 11910 * FIXME: Actually if we will still have any other 11911 * plane enabled on the pipe we could let IPS enabled 11912 * still, but for now lets consider that when we make 11913 * primary invisible by setting DSPCNTR to 0 on 11914 * update_primary_plane function IPS needs to be 11915 * disable. 11916 */ 11917 intel_crtc->atomic.disable_ips = true; 11918 11919 intel_crtc->atomic.disable_fbc = true; 11920 } 11921 11922 /* 11923 * FBC does not work on some platforms for rotated 11924 * planes, so disable it when rotation is not 0 and 11925 * update it when rotation is set back to 0. 11926 * 11927 * FIXME: This is redundant with the fbc update done in 11928 * the primary plane enable function except that that 11929 * one is done too late. We eventually need to unify 11930 * this. 11931 */ 11932 11933 if (visible && 11934 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11935 dev_priv->fbc.crtc == intel_crtc && 11936 plane_state->rotation != BIT(DRM_ROTATE_0)) 11937 intel_crtc->atomic.disable_fbc = true; 11938 11939 /* 11940 * BDW signals flip done immediately if the plane 11941 * is disabled, even if the plane enable is already 11942 * armed to occur at the next vblank :( 11943 */ 11944 if (turn_on && IS_BROADWELL(dev)) 11945 intel_crtc->atomic.wait_vblank = true; 11946 11947 intel_crtc->atomic.update_fbc |= visible || mode_changed; 11948 break; 11949 case DRM_PLANE_TYPE_CURSOR: 11950 break; 11951 case DRM_PLANE_TYPE_OVERLAY: 11952 /* 11953 * WaCxSRDisabledForSpriteScaling:ivb 11954 * 11955 * cstate->update_wm was already set above, so this flag will 11956 * take effect when we commit and program watermarks. 11957 */ 11958 if (IS_IVYBRIDGE(dev) && 11959 needs_scaling(to_intel_plane_state(plane_state)) && 11960 !needs_scaling(old_plane_state)) { 11961 to_intel_crtc_state(crtc_state)->disable_lp_wm = true; 11962 } else if (turn_off && !mode_changed) { 11963 intel_crtc->atomic.wait_vblank = true; 11964 intel_crtc->atomic.update_sprite_watermarks |= 11965 1 << i; 11966 } 11967 11968 break; 11969 } 11970 return 0; 11971 } 11972 11973 static bool encoders_cloneable(const struct intel_encoder *a, 11974 const struct intel_encoder *b) 11975 { 11976 /* masks could be asymmetric, so check both ways */ 11977 return a == b || (a->cloneable & (1 << b->type) && 11978 b->cloneable & (1 << a->type)); 11979 } 11980 11981 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11982 struct intel_crtc *crtc, 11983 struct intel_encoder *encoder) 11984 { 11985 struct intel_encoder *source_encoder; 11986 struct drm_connector *connector; 11987 struct drm_connector_state *connector_state; 11988 int i; 11989 11990 for_each_connector_in_state(state, connector, connector_state, i) { 11991 if (connector_state->crtc != &crtc->base) 11992 continue; 11993 11994 source_encoder = 11995 to_intel_encoder(connector_state->best_encoder); 11996 if (!encoders_cloneable(encoder, source_encoder)) 11997 return false; 11998 } 11999 12000 return true; 12001 } 12002 12003 static bool check_encoder_cloning(struct drm_atomic_state *state, 12004 struct intel_crtc *crtc) 12005 { 12006 struct intel_encoder *encoder; 12007 struct drm_connector *connector; 12008 struct drm_connector_state *connector_state; 12009 int i; 12010 12011 for_each_connector_in_state(state, connector, connector_state, i) { 12012 if (connector_state->crtc != &crtc->base) 12013 continue; 12014 12015 encoder = to_intel_encoder(connector_state->best_encoder); 12016 if (!check_single_encoder_cloning(state, crtc, encoder)) 12017 return false; 12018 } 12019 12020 return true; 12021 } 12022 12023 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 12024 struct drm_crtc_state *crtc_state) 12025 { 12026 struct drm_device *dev = crtc->dev; 12027 struct drm_i915_private *dev_priv = dev->dev_private; 12028 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12029 struct intel_crtc_state *pipe_config = 12030 to_intel_crtc_state(crtc_state); 12031 struct drm_atomic_state *state = crtc_state->state; 12032 int ret; 12033 bool mode_changed = needs_modeset(crtc_state); 12034 12035 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { 12036 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12037 return -EINVAL; 12038 } 12039 12040 if (mode_changed && !crtc_state->active) 12041 pipe_config->wm_changed = true; 12042 12043 if (mode_changed && crtc_state->enable && 12044 dev_priv->display.crtc_compute_clock && 12045 !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) { 12046 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 12047 pipe_config); 12048 if (ret) 12049 return ret; 12050 } 12051 12052 ret = 0; 12053 if (dev_priv->display.compute_pipe_wm) { 12054 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state); 12055 if (ret) 12056 return ret; 12057 } 12058 12059 if (INTEL_INFO(dev)->gen >= 9) { 12060 if (mode_changed) 12061 ret = skl_update_scaler_crtc(pipe_config); 12062 12063 if (!ret) 12064 ret = intel_atomic_setup_scalers(dev, intel_crtc, 12065 pipe_config); 12066 } 12067 12068 return ret; 12069 } 12070 12071 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 12072 .mode_set_base_atomic = intel_pipe_set_base_atomic, 12073 .load_lut = intel_crtc_load_lut, 12074 .atomic_begin = intel_begin_crtc_commit, 12075 .atomic_flush = intel_finish_crtc_commit, 12076 .atomic_check = intel_crtc_atomic_check, 12077 }; 12078 12079 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12080 { 12081 struct intel_connector *connector; 12082 12083 for_each_intel_connector(dev, connector) { 12084 if (connector->base.encoder) { 12085 connector->base.state->best_encoder = 12086 connector->base.encoder; 12087 connector->base.state->crtc = 12088 connector->base.encoder->crtc; 12089 } else { 12090 connector->base.state->best_encoder = NULL; 12091 connector->base.state->crtc = NULL; 12092 } 12093 } 12094 } 12095 12096 static void 12097 connected_sink_compute_bpp(struct intel_connector *connector, 12098 struct intel_crtc_state *pipe_config) 12099 { 12100 int bpp = pipe_config->pipe_bpp; 12101 12102 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 12103 connector->base.base.id, 12104 connector->base.name); 12105 12106 /* Don't use an invalid EDID bpc value */ 12107 if (connector->base.display_info.bpc && 12108 connector->base.display_info.bpc * 3 < bpp) { 12109 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 12110 bpp, connector->base.display_info.bpc*3); 12111 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12112 } 12113 12114 /* Clamp bpp to default limit on screens without EDID 1.4 */ 12115 if (connector->base.display_info.bpc == 0) { 12116 int type = connector->base.connector_type; 12117 int clamp_bpp = 24; 12118 12119 /* Fall back to 18 bpp when DP sink capability is unknown. */ 12120 if (type == DRM_MODE_CONNECTOR_DisplayPort || 12121 type == DRM_MODE_CONNECTOR_eDP) 12122 clamp_bpp = 18; 12123 12124 if (bpp > clamp_bpp) { 12125 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", 12126 bpp, clamp_bpp); 12127 pipe_config->pipe_bpp = clamp_bpp; 12128 } 12129 } 12130 } 12131 12132 static int 12133 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12134 struct intel_crtc_state *pipe_config) 12135 { 12136 struct drm_device *dev = crtc->base.dev; 12137 struct drm_atomic_state *state; 12138 struct drm_connector *connector; 12139 struct drm_connector_state *connector_state; 12140 int bpp, i; 12141 12142 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 12143 bpp = 10*3; 12144 else if (INTEL_INFO(dev)->gen >= 5) 12145 bpp = 12*3; 12146 else 12147 bpp = 8*3; 12148 12149 12150 pipe_config->pipe_bpp = bpp; 12151 12152 state = pipe_config->base.state; 12153 12154 /* Clamp display bpp to EDID value */ 12155 for_each_connector_in_state(state, connector, connector_state, i) { 12156 if (connector_state->crtc != &crtc->base) 12157 continue; 12158 12159 connected_sink_compute_bpp(to_intel_connector(connector), 12160 pipe_config); 12161 } 12162 12163 return bpp; 12164 } 12165 12166 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12167 { 12168 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12169 "type: 0x%x flags: 0x%x\n", 12170 mode->crtc_clock, 12171 mode->crtc_hdisplay, mode->crtc_hsync_start, 12172 mode->crtc_hsync_end, mode->crtc_htotal, 12173 mode->crtc_vdisplay, mode->crtc_vsync_start, 12174 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 12175 } 12176 12177 static void intel_dump_pipe_config(struct intel_crtc *crtc, 12178 struct intel_crtc_state *pipe_config, 12179 const char *context) 12180 { 12181 struct drm_device *dev = crtc->base.dev; 12182 struct drm_plane *plane; 12183 struct intel_plane *intel_plane; 12184 struct intel_plane_state *state; 12185 struct drm_framebuffer *fb; 12186 12187 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12188 context, pipe_config, pipe_name(crtc->pipe)); 12189 12190 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 12191 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 12192 pipe_config->pipe_bpp, pipe_config->dither); 12193 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12194 pipe_config->has_pch_encoder, 12195 pipe_config->fdi_lanes, 12196 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 12197 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12198 pipe_config->fdi_m_n.tu); 12199 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12200 pipe_config->has_dp_encoder, 12201 pipe_config->lane_count, 12202 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12203 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12204 pipe_config->dp_m_n.tu); 12205 12206 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12207 pipe_config->has_dp_encoder, 12208 pipe_config->lane_count, 12209 pipe_config->dp_m2_n2.gmch_m, 12210 pipe_config->dp_m2_n2.gmch_n, 12211 pipe_config->dp_m2_n2.link_m, 12212 pipe_config->dp_m2_n2.link_n, 12213 pipe_config->dp_m2_n2.tu); 12214 12215 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 12216 pipe_config->has_audio, 12217 pipe_config->has_infoframe); 12218 12219 DRM_DEBUG_KMS("requested mode:\n"); 12220 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12221 DRM_DEBUG_KMS("adjusted mode:\n"); 12222 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12223 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12224 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 12225 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 12226 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 12227 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12228 crtc->num_scalers, 12229 pipe_config->scaler_state.scaler_users, 12230 pipe_config->scaler_state.scaler_id); 12231 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12232 pipe_config->gmch_pfit.control, 12233 pipe_config->gmch_pfit.pgm_ratios, 12234 pipe_config->gmch_pfit.lvds_border_bits); 12235 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 12236 pipe_config->pch_pfit.pos, 12237 pipe_config->pch_pfit.size, 12238 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 12239 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 12240 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 12241 12242 if (IS_BROXTON(dev)) { 12243 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 12244 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 12245 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 12246 pipe_config->ddi_pll_sel, 12247 pipe_config->dpll_hw_state.ebb0, 12248 pipe_config->dpll_hw_state.ebb4, 12249 pipe_config->dpll_hw_state.pll0, 12250 pipe_config->dpll_hw_state.pll1, 12251 pipe_config->dpll_hw_state.pll2, 12252 pipe_config->dpll_hw_state.pll3, 12253 pipe_config->dpll_hw_state.pll6, 12254 pipe_config->dpll_hw_state.pll8, 12255 pipe_config->dpll_hw_state.pll9, 12256 pipe_config->dpll_hw_state.pll10, 12257 pipe_config->dpll_hw_state.pcsdw12); 12258 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 12259 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12260 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12261 pipe_config->ddi_pll_sel, 12262 pipe_config->dpll_hw_state.ctrl1, 12263 pipe_config->dpll_hw_state.cfgcr1, 12264 pipe_config->dpll_hw_state.cfgcr2); 12265 } else if (HAS_DDI(dev)) { 12266 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 12267 pipe_config->ddi_pll_sel, 12268 pipe_config->dpll_hw_state.wrpll, 12269 pipe_config->dpll_hw_state.spll); 12270 } else { 12271 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12272 "fp0: 0x%x, fp1: 0x%x\n", 12273 pipe_config->dpll_hw_state.dpll, 12274 pipe_config->dpll_hw_state.dpll_md, 12275 pipe_config->dpll_hw_state.fp0, 12276 pipe_config->dpll_hw_state.fp1); 12277 } 12278 12279 DRM_DEBUG_KMS("planes on this crtc\n"); 12280 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 12281 intel_plane = to_intel_plane(plane); 12282 if (intel_plane->pipe != crtc->pipe) 12283 continue; 12284 12285 state = to_intel_plane_state(plane->state); 12286 fb = state->base.fb; 12287 if (!fb) { 12288 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12289 "disabled, scaler_id = %d\n", 12290 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12291 plane->base.id, intel_plane->pipe, 12292 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, 12293 drm_plane_index(plane), state->scaler_id); 12294 continue; 12295 } 12296 12297 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12298 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12299 plane->base.id, intel_plane->pipe, 12300 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12301 drm_plane_index(plane)); 12302 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12303 fb->base.id, fb->width, fb->height, fb->pixel_format); 12304 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12305 state->scaler_id, 12306 state->src.x1 >> 16, state->src.y1 >> 16, 12307 drm_rect_width(&state->src) >> 16, 12308 drm_rect_height(&state->src) >> 16, 12309 state->dst.x1, state->dst.y1, 12310 drm_rect_width(&state->dst), drm_rect_height(&state->dst)); 12311 } 12312 } 12313 12314 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12315 { 12316 struct drm_device *dev = state->dev; 12317 struct drm_connector *connector; 12318 unsigned int used_ports = 0; 12319 12320 /* 12321 * Walk the connector list instead of the encoder 12322 * list to detect the problem on ddi platforms 12323 * where there's just one encoder per digital port. 12324 */ 12325 drm_for_each_connector(connector, dev) { 12326 struct drm_connector_state *connector_state; 12327 struct intel_encoder *encoder; 12328 12329 connector_state = drm_atomic_get_existing_connector_state(state, connector); 12330 if (!connector_state) 12331 connector_state = connector->state; 12332 12333 if (!connector_state->best_encoder) 12334 continue; 12335 12336 encoder = to_intel_encoder(connector_state->best_encoder); 12337 12338 WARN_ON(!connector_state->crtc); 12339 12340 switch (encoder->type) { 12341 unsigned int port_mask; 12342 case INTEL_OUTPUT_UNKNOWN: 12343 if (WARN_ON(!HAS_DDI(dev))) 12344 break; 12345 case INTEL_OUTPUT_DISPLAYPORT: 12346 case INTEL_OUTPUT_HDMI: 12347 case INTEL_OUTPUT_EDP: 12348 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12349 12350 /* the same port mustn't appear more than once */ 12351 if (used_ports & port_mask) 12352 return false; 12353 12354 used_ports |= port_mask; 12355 default: 12356 break; 12357 } 12358 } 12359 12360 return true; 12361 } 12362 12363 static void 12364 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12365 { 12366 struct drm_crtc_state tmp_state; 12367 struct intel_crtc_scaler_state scaler_state; 12368 struct intel_dpll_hw_state dpll_hw_state; 12369 enum intel_dpll_id shared_dpll; 12370 uint32_t ddi_pll_sel; 12371 bool force_thru; 12372 12373 /* FIXME: before the switch to atomic started, a new pipe_config was 12374 * kzalloc'd. Code that depends on any field being zero should be 12375 * fixed, so that the crtc_state can be safely duplicated. For now, 12376 * only fields that are know to not cause problems are preserved. */ 12377 12378 tmp_state = crtc_state->base; 12379 scaler_state = crtc_state->scaler_state; 12380 shared_dpll = crtc_state->shared_dpll; 12381 dpll_hw_state = crtc_state->dpll_hw_state; 12382 ddi_pll_sel = crtc_state->ddi_pll_sel; 12383 force_thru = crtc_state->pch_pfit.force_thru; 12384 12385 memset(crtc_state, 0, sizeof *crtc_state); 12386 12387 crtc_state->base = tmp_state; 12388 crtc_state->scaler_state = scaler_state; 12389 crtc_state->shared_dpll = shared_dpll; 12390 crtc_state->dpll_hw_state = dpll_hw_state; 12391 crtc_state->ddi_pll_sel = ddi_pll_sel; 12392 crtc_state->pch_pfit.force_thru = force_thru; 12393 } 12394 12395 static int 12396 intel_modeset_pipe_config(struct drm_crtc *crtc, 12397 struct intel_crtc_state *pipe_config) 12398 { 12399 struct drm_atomic_state *state = pipe_config->base.state; 12400 struct intel_encoder *encoder; 12401 struct drm_connector *connector; 12402 struct drm_connector_state *connector_state; 12403 int base_bpp, ret = -EINVAL; 12404 int i; 12405 bool retry = true; 12406 12407 clear_intel_crtc_state(pipe_config); 12408 12409 pipe_config->cpu_transcoder = 12410 (enum transcoder) to_intel_crtc(crtc)->pipe; 12411 12412 /* 12413 * Sanitize sync polarity flags based on requested ones. If neither 12414 * positive or negative polarity is requested, treat this as meaning 12415 * negative polarity. 12416 */ 12417 if (!(pipe_config->base.adjusted_mode.flags & 12418 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12419 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12420 12421 if (!(pipe_config->base.adjusted_mode.flags & 12422 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12423 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12424 12425 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12426 pipe_config); 12427 if (base_bpp < 0) 12428 goto fail; 12429 12430 /* 12431 * Determine the real pipe dimensions. Note that stereo modes can 12432 * increase the actual pipe size due to the frame doubling and 12433 * insertion of additional space for blanks between the frame. This 12434 * is stored in the crtc timings. We use the requested mode to do this 12435 * computation to clearly distinguish it from the adjusted mode, which 12436 * can be changed by the connectors in the below retry loop. 12437 */ 12438 drm_crtc_get_hv_timing(&pipe_config->base.mode, 12439 &pipe_config->pipe_src_w, 12440 &pipe_config->pipe_src_h); 12441 12442 encoder_retry: 12443 /* Ensure the port clock defaults are reset when retrying. */ 12444 pipe_config->port_clock = 0; 12445 pipe_config->pixel_multiplier = 1; 12446 12447 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12448 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12449 CRTC_STEREO_DOUBLE); 12450 12451 /* Pass our mode to the connectors and the CRTC to give them a chance to 12452 * adjust it according to limitations or connector properties, and also 12453 * a chance to reject the mode entirely. 12454 */ 12455 for_each_connector_in_state(state, connector, connector_state, i) { 12456 if (connector_state->crtc != crtc) 12457 continue; 12458 12459 encoder = to_intel_encoder(connector_state->best_encoder); 12460 12461 if (!(encoder->compute_config(encoder, pipe_config))) { 12462 DRM_DEBUG_KMS("Encoder config failure\n"); 12463 goto fail; 12464 } 12465 } 12466 12467 /* Set default port clock if not overwritten by the encoder. Needs to be 12468 * done afterwards in case the encoder adjusts the mode. */ 12469 if (!pipe_config->port_clock) 12470 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12471 * pipe_config->pixel_multiplier; 12472 12473 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12474 if (ret < 0) { 12475 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12476 goto fail; 12477 } 12478 12479 if (ret == RETRY) { 12480 if (WARN(!retry, "loop in pipe configuration computation\n")) { 12481 ret = -EINVAL; 12482 goto fail; 12483 } 12484 12485 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12486 retry = false; 12487 goto encoder_retry; 12488 } 12489 12490 /* Dithering seems to not pass-through bits correctly when it should, so 12491 * only enable it on 6bpc panels. */ 12492 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12493 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12494 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12495 12496 fail: 12497 return ret; 12498 } 12499 12500 static void 12501 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 12502 { 12503 struct drm_crtc *crtc; 12504 struct drm_crtc_state *crtc_state; 12505 int i; 12506 12507 /* Double check state. */ 12508 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12509 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); 12510 12511 /* Update hwmode for vblank functions */ 12512 if (crtc->state->active) 12513 crtc->hwmode = crtc->state->adjusted_mode; 12514 else 12515 crtc->hwmode.crtc_clock = 0; 12516 12517 /* 12518 * Update legacy state to satisfy fbc code. This can 12519 * be removed when fbc uses the atomic state. 12520 */ 12521 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12522 struct drm_plane_state *plane_state = crtc->primary->state; 12523 12524 crtc->primary->fb = plane_state->fb; 12525 crtc->x = plane_state->src_x >> 16; 12526 crtc->y = plane_state->src_y >> 16; 12527 } 12528 } 12529 } 12530 12531 static bool intel_fuzzy_clock_check(int clock1, int clock2) 12532 { 12533 int diff; 12534 12535 if (clock1 == clock2) 12536 return true; 12537 12538 if (!clock1 || !clock2) 12539 return false; 12540 12541 diff = abs(clock1 - clock2); 12542 12543 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12544 return true; 12545 12546 return false; 12547 } 12548 12549 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 12550 list_for_each_entry((intel_crtc), \ 12551 &(dev)->mode_config.crtc_list, \ 12552 base.head) \ 12553 for_each_if (mask & (1 <<(intel_crtc)->pipe)) 12554 12555 static bool 12556 intel_compare_m_n(unsigned int m, unsigned int n, 12557 unsigned int m2, unsigned int n2, 12558 bool exact) 12559 { 12560 if (m == m2 && n == n2) 12561 return true; 12562 12563 if (exact || !m || !n || !m2 || !n2) 12564 return false; 12565 12566 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12567 12568 if (m > m2) { 12569 while (m > m2) { 12570 m2 <<= 1; 12571 n2 <<= 1; 12572 } 12573 } else if (m < m2) { 12574 while (m < m2) { 12575 m <<= 1; 12576 n <<= 1; 12577 } 12578 } 12579 12580 return m == m2 && n == n2; 12581 } 12582 12583 static bool 12584 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12585 struct intel_link_m_n *m2_n2, 12586 bool adjust) 12587 { 12588 if (m_n->tu == m2_n2->tu && 12589 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12590 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 12591 intel_compare_m_n(m_n->link_m, m_n->link_n, 12592 m2_n2->link_m, m2_n2->link_n, !adjust)) { 12593 if (adjust) 12594 *m2_n2 = *m_n; 12595 12596 return true; 12597 } 12598 12599 return false; 12600 } 12601 12602 static bool 12603 intel_pipe_config_compare(struct drm_device *dev, 12604 struct intel_crtc_state *current_config, 12605 struct intel_crtc_state *pipe_config, 12606 bool adjust) 12607 { 12608 bool ret = true; 12609 12610 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ 12611 do { \ 12612 if (!adjust) \ 12613 DRM_ERROR(fmt, ##__VA_ARGS__); \ 12614 else \ 12615 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ 12616 } while (0) 12617 12618 #define PIPE_CONF_CHECK_X(name) \ 12619 if (current_config->name != pipe_config->name) { \ 12620 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12621 "(expected 0x%08x, found 0x%08x)\n", \ 12622 current_config->name, \ 12623 pipe_config->name); \ 12624 ret = false; \ 12625 } 12626 12627 #define PIPE_CONF_CHECK_I(name) \ 12628 if (current_config->name != pipe_config->name) { \ 12629 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12630 "(expected %i, found %i)\n", \ 12631 current_config->name, \ 12632 pipe_config->name); \ 12633 ret = false; \ 12634 } 12635 12636 #define PIPE_CONF_CHECK_M_N(name) \ 12637 if (!intel_compare_link_m_n(¤t_config->name, \ 12638 &pipe_config->name,\ 12639 adjust)) { \ 12640 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12641 "(expected tu %i gmch %i/%i link %i/%i, " \ 12642 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12643 current_config->name.tu, \ 12644 current_config->name.gmch_m, \ 12645 current_config->name.gmch_n, \ 12646 current_config->name.link_m, \ 12647 current_config->name.link_n, \ 12648 pipe_config->name.tu, \ 12649 pipe_config->name.gmch_m, \ 12650 pipe_config->name.gmch_n, \ 12651 pipe_config->name.link_m, \ 12652 pipe_config->name.link_n); \ 12653 ret = false; \ 12654 } 12655 12656 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 12657 if (!intel_compare_link_m_n(¤t_config->name, \ 12658 &pipe_config->name, adjust) && \ 12659 !intel_compare_link_m_n(¤t_config->alt_name, \ 12660 &pipe_config->name, adjust)) { \ 12661 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12662 "(expected tu %i gmch %i/%i link %i/%i, " \ 12663 "or tu %i gmch %i/%i link %i/%i, " \ 12664 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12665 current_config->name.tu, \ 12666 current_config->name.gmch_m, \ 12667 current_config->name.gmch_n, \ 12668 current_config->name.link_m, \ 12669 current_config->name.link_n, \ 12670 current_config->alt_name.tu, \ 12671 current_config->alt_name.gmch_m, \ 12672 current_config->alt_name.gmch_n, \ 12673 current_config->alt_name.link_m, \ 12674 current_config->alt_name.link_n, \ 12675 pipe_config->name.tu, \ 12676 pipe_config->name.gmch_m, \ 12677 pipe_config->name.gmch_n, \ 12678 pipe_config->name.link_m, \ 12679 pipe_config->name.link_n); \ 12680 ret = false; \ 12681 } 12682 12683 /* This is required for BDW+ where there is only one set of registers for 12684 * switching between high and low RR. 12685 * This macro can be used whenever a comparison has to be made between one 12686 * hw state and multiple sw state variables. 12687 */ 12688 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 12689 if ((current_config->name != pipe_config->name) && \ 12690 (current_config->alt_name != pipe_config->name)) { \ 12691 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12692 "(expected %i or %i, found %i)\n", \ 12693 current_config->name, \ 12694 current_config->alt_name, \ 12695 pipe_config->name); \ 12696 ret = false; \ 12697 } 12698 12699 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 12700 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12701 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ 12702 "(expected %i, found %i)\n", \ 12703 current_config->name & (mask), \ 12704 pipe_config->name & (mask)); \ 12705 ret = false; \ 12706 } 12707 12708 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 12709 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12710 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12711 "(expected %i, found %i)\n", \ 12712 current_config->name, \ 12713 pipe_config->name); \ 12714 ret = false; \ 12715 } 12716 12717 #define PIPE_CONF_QUIRK(quirk) \ 12718 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12719 12720 PIPE_CONF_CHECK_I(cpu_transcoder); 12721 12722 PIPE_CONF_CHECK_I(has_pch_encoder); 12723 PIPE_CONF_CHECK_I(fdi_lanes); 12724 PIPE_CONF_CHECK_M_N(fdi_m_n); 12725 12726 PIPE_CONF_CHECK_I(has_dp_encoder); 12727 PIPE_CONF_CHECK_I(lane_count); 12728 12729 if (INTEL_INFO(dev)->gen < 8) { 12730 PIPE_CONF_CHECK_M_N(dp_m_n); 12731 12732 if (current_config->has_drrs) 12733 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12734 } else 12735 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12736 12737 PIPE_CONF_CHECK_I(has_dsi_encoder); 12738 12739 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12740 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12741 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12742 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12743 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12744 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12745 12746 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12747 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12748 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12749 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12750 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12751 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12752 12753 PIPE_CONF_CHECK_I(pixel_multiplier); 12754 PIPE_CONF_CHECK_I(has_hdmi_sink); 12755 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12756 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 12757 PIPE_CONF_CHECK_I(limited_color_range); 12758 PIPE_CONF_CHECK_I(has_infoframe); 12759 12760 PIPE_CONF_CHECK_I(has_audio); 12761 12762 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12763 DRM_MODE_FLAG_INTERLACE); 12764 12765 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12766 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12767 DRM_MODE_FLAG_PHSYNC); 12768 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12769 DRM_MODE_FLAG_NHSYNC); 12770 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12771 DRM_MODE_FLAG_PVSYNC); 12772 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12773 DRM_MODE_FLAG_NVSYNC); 12774 } 12775 12776 PIPE_CONF_CHECK_X(gmch_pfit.control); 12777 /* pfit ratios are autocomputed by the hw on gen4+ */ 12778 if (INTEL_INFO(dev)->gen < 4) 12779 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 12780 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12781 12782 if (!adjust) { 12783 PIPE_CONF_CHECK_I(pipe_src_w); 12784 PIPE_CONF_CHECK_I(pipe_src_h); 12785 12786 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12787 if (current_config->pch_pfit.enabled) { 12788 PIPE_CONF_CHECK_X(pch_pfit.pos); 12789 PIPE_CONF_CHECK_X(pch_pfit.size); 12790 } 12791 12792 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12793 } 12794 12795 /* BDW+ don't expose a synchronous way to read the state */ 12796 if (IS_HASWELL(dev)) 12797 PIPE_CONF_CHECK_I(ips_enabled); 12798 12799 PIPE_CONF_CHECK_I(double_wide); 12800 12801 PIPE_CONF_CHECK_X(ddi_pll_sel); 12802 12803 PIPE_CONF_CHECK_I(shared_dpll); 12804 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12805 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12806 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12807 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12808 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12809 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12810 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12811 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12812 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12813 12814 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12815 PIPE_CONF_CHECK_I(pipe_bpp); 12816 12817 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12818 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12819 12820 #undef PIPE_CONF_CHECK_X 12821 #undef PIPE_CONF_CHECK_I 12822 #undef PIPE_CONF_CHECK_I_ALT 12823 #undef PIPE_CONF_CHECK_FLAGS 12824 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12825 #undef PIPE_CONF_QUIRK 12826 #undef INTEL_ERR_OR_DBG_KMS 12827 12828 return ret; 12829 } 12830 12831 static void check_wm_state(struct drm_device *dev) 12832 { 12833 struct drm_i915_private *dev_priv = dev->dev_private; 12834 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12835 struct intel_crtc *intel_crtc; 12836 int plane; 12837 12838 if (INTEL_INFO(dev)->gen < 9) 12839 return; 12840 12841 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12842 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12843 12844 for_each_intel_crtc(dev, intel_crtc) { 12845 struct skl_ddb_entry *hw_entry, *sw_entry; 12846 const enum i915_pipe pipe = intel_crtc->pipe; 12847 12848 if (!intel_crtc->active) 12849 continue; 12850 12851 /* planes */ 12852 for_each_plane(dev_priv, pipe, plane) { 12853 hw_entry = &hw_ddb.plane[pipe][plane]; 12854 sw_entry = &sw_ddb->plane[pipe][plane]; 12855 12856 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12857 continue; 12858 12859 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 12860 "(expected (%u,%u), found (%u,%u))\n", 12861 pipe_name(pipe), plane + 1, 12862 sw_entry->start, sw_entry->end, 12863 hw_entry->start, hw_entry->end); 12864 } 12865 12866 /* cursor */ 12867 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 12868 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 12869 12870 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12871 continue; 12872 12873 DRM_ERROR("mismatch in DDB state pipe %c cursor " 12874 "(expected (%u,%u), found (%u,%u))\n", 12875 pipe_name(pipe), 12876 sw_entry->start, sw_entry->end, 12877 hw_entry->start, hw_entry->end); 12878 } 12879 } 12880 12881 static void 12882 check_connector_state(struct drm_device *dev, 12883 struct drm_atomic_state *old_state) 12884 { 12885 struct drm_connector_state *old_conn_state; 12886 struct drm_connector *connector; 12887 int i; 12888 12889 for_each_connector_in_state(old_state, connector, old_conn_state, i) { 12890 struct drm_encoder *encoder = connector->encoder; 12891 struct drm_connector_state *state = connector->state; 12892 12893 /* This also checks the encoder/connector hw state with the 12894 * ->get_hw_state callbacks. */ 12895 intel_connector_check_state(to_intel_connector(connector)); 12896 12897 I915_STATE_WARN(state->best_encoder != encoder, 12898 "connector's atomic encoder doesn't match legacy encoder\n"); 12899 } 12900 } 12901 12902 static void 12903 check_encoder_state(struct drm_device *dev) 12904 { 12905 struct intel_encoder *encoder; 12906 struct intel_connector *connector; 12907 12908 for_each_intel_encoder(dev, encoder) { 12909 bool enabled = false; 12910 enum i915_pipe pipe; 12911 12912 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 12913 encoder->base.base.id, 12914 encoder->base.name); 12915 12916 for_each_intel_connector(dev, connector) { 12917 if (connector->base.state->best_encoder != &encoder->base) 12918 continue; 12919 enabled = true; 12920 12921 I915_STATE_WARN(connector->base.state->crtc != 12922 encoder->base.crtc, 12923 "connector's crtc doesn't match encoder crtc\n"); 12924 } 12925 12926 I915_STATE_WARN(!!encoder->base.crtc != enabled, 12927 "encoder's enabled state mismatch " 12928 "(expected %i, found %i)\n", 12929 !!encoder->base.crtc, enabled); 12930 12931 if (!encoder->base.crtc) { 12932 bool active; 12933 12934 active = encoder->get_hw_state(encoder, &pipe); 12935 I915_STATE_WARN(active, 12936 "encoder detached but still enabled on pipe %c.\n", 12937 pipe_name(pipe)); 12938 } 12939 } 12940 } 12941 12942 static void 12943 check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state) 12944 { 12945 struct drm_i915_private *dev_priv = dev->dev_private; 12946 struct intel_encoder *encoder; 12947 struct drm_crtc_state *old_crtc_state; 12948 struct drm_crtc *crtc; 12949 int i; 12950 12951 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { 12952 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12953 struct intel_crtc_state *pipe_config, *sw_config; 12954 bool active; 12955 12956 if (!needs_modeset(crtc->state) && 12957 !to_intel_crtc_state(crtc->state)->update_pipe) 12958 continue; 12959 12960 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); 12961 pipe_config = to_intel_crtc_state(old_crtc_state); 12962 memset(pipe_config, 0, sizeof(*pipe_config)); 12963 pipe_config->base.crtc = crtc; 12964 pipe_config->base.state = old_state; 12965 12966 DRM_DEBUG_KMS("[CRTC:%d]\n", 12967 crtc->base.id); 12968 12969 active = dev_priv->display.get_pipe_config(intel_crtc, 12970 pipe_config); 12971 12972 /* hw state is inconsistent with the pipe quirk */ 12973 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12974 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12975 active = crtc->state->active; 12976 12977 I915_STATE_WARN(crtc->state->active != active, 12978 "crtc active state doesn't match with hw state " 12979 "(expected %i, found %i)\n", crtc->state->active, active); 12980 12981 I915_STATE_WARN(intel_crtc->active != crtc->state->active, 12982 "transitional active state does not match atomic hw state " 12983 "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active); 12984 12985 for_each_encoder_on_crtc(dev, crtc, encoder) { 12986 enum i915_pipe pipe; 12987 12988 active = encoder->get_hw_state(encoder, &pipe); 12989 I915_STATE_WARN(active != crtc->state->active, 12990 "[ENCODER:%i] active %i with crtc active %i\n", 12991 encoder->base.base.id, active, crtc->state->active); 12992 12993 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12994 "Encoder connected to wrong pipe %c\n", 12995 pipe_name(pipe)); 12996 12997 if (active) 12998 encoder->get_config(encoder, pipe_config); 12999 } 13000 13001 if (!crtc->state->active) 13002 continue; 13003 13004 sw_config = to_intel_crtc_state(crtc->state); 13005 if (!intel_pipe_config_compare(dev, sw_config, 13006 pipe_config, false)) { 13007 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13008 intel_dump_pipe_config(intel_crtc, pipe_config, 13009 "[hw state]"); 13010 intel_dump_pipe_config(intel_crtc, sw_config, 13011 "[sw state]"); 13012 } 13013 } 13014 } 13015 13016 static void 13017 check_shared_dpll_state(struct drm_device *dev) 13018 { 13019 struct drm_i915_private *dev_priv = dev->dev_private; 13020 struct intel_crtc *crtc; 13021 struct intel_dpll_hw_state dpll_hw_state; 13022 int i; 13023 13024 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13025 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13026 int enabled_crtcs = 0, active_crtcs = 0; 13027 bool active; 13028 13029 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13030 13031 DRM_DEBUG_KMS("%s\n", pll->name); 13032 13033 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 13034 13035 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 13036 "more active pll users than references: %i vs %i\n", 13037 pll->active, hweight32(pll->config.crtc_mask)); 13038 I915_STATE_WARN(pll->active && !pll->on, 13039 "pll in active use but not on in sw tracking\n"); 13040 I915_STATE_WARN(pll->on && !pll->active, 13041 "pll in on but not on in use in sw tracking\n"); 13042 I915_STATE_WARN(pll->on != active, 13043 "pll on state mismatch (expected %i, found %i)\n", 13044 pll->on, active); 13045 13046 for_each_intel_crtc(dev, crtc) { 13047 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) 13048 enabled_crtcs++; 13049 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 13050 active_crtcs++; 13051 } 13052 I915_STATE_WARN(pll->active != active_crtcs, 13053 "pll active crtcs mismatch (expected %i, found %i)\n", 13054 pll->active, active_crtcs); 13055 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 13056 "pll enabled crtcs mismatch (expected %i, found %i)\n", 13057 hweight32(pll->config.crtc_mask), enabled_crtcs); 13058 13059 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 13060 sizeof(dpll_hw_state)), 13061 "pll hw state mismatch\n"); 13062 } 13063 } 13064 13065 static void 13066 intel_modeset_check_state(struct drm_device *dev, 13067 struct drm_atomic_state *old_state) 13068 { 13069 check_wm_state(dev); 13070 check_connector_state(dev, old_state); 13071 check_encoder_state(dev); 13072 check_crtc_state(dev, old_state); 13073 check_shared_dpll_state(dev); 13074 } 13075 13076 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 13077 int dotclock) 13078 { 13079 /* 13080 * FDI already provided one idea for the dotclock. 13081 * Yell if the encoder disagrees. 13082 */ 13083 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 13084 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 13085 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 13086 } 13087 13088 static void update_scanline_offset(struct intel_crtc *crtc) 13089 { 13090 struct drm_device *dev = crtc->base.dev; 13091 13092 /* 13093 * The scanline counter increments at the leading edge of hsync. 13094 * 13095 * On most platforms it starts counting from vtotal-1 on the 13096 * first active line. That means the scanline counter value is 13097 * always one less than what we would expect. Ie. just after 13098 * start of vblank, which also occurs at start of hsync (on the 13099 * last active line), the scanline counter will read vblank_start-1. 13100 * 13101 * On gen2 the scanline counter starts counting from 1 instead 13102 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13103 * to keep the value positive), instead of adding one. 13104 * 13105 * On HSW+ the behaviour of the scanline counter depends on the output 13106 * type. For DP ports it behaves like most other platforms, but on HDMI 13107 * there's an extra 1 line difference. So we need to add two instead of 13108 * one to the value. 13109 */ 13110 if (IS_GEN2(dev)) { 13111 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 13112 int vtotal; 13113 13114 vtotal = adjusted_mode->crtc_vtotal; 13115 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13116 vtotal /= 2; 13117 13118 crtc->scanline_offset = vtotal - 1; 13119 } else if (HAS_DDI(dev) && 13120 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 13121 crtc->scanline_offset = 2; 13122 } else 13123 crtc->scanline_offset = 1; 13124 } 13125 13126 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 13127 { 13128 struct drm_device *dev = state->dev; 13129 struct drm_i915_private *dev_priv = to_i915(dev); 13130 struct intel_shared_dpll_config *shared_dpll = NULL; 13131 struct intel_crtc *intel_crtc; 13132 struct intel_crtc_state *intel_crtc_state; 13133 struct drm_crtc *crtc; 13134 struct drm_crtc_state *crtc_state; 13135 int i; 13136 13137 if (!dev_priv->display.crtc_compute_clock) 13138 return; 13139 13140 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13141 int dpll; 13142 13143 intel_crtc = to_intel_crtc(crtc); 13144 intel_crtc_state = to_intel_crtc_state(crtc_state); 13145 dpll = intel_crtc_state->shared_dpll; 13146 13147 if (!needs_modeset(crtc_state) || dpll == DPLL_ID_PRIVATE) 13148 continue; 13149 13150 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; 13151 13152 if (!shared_dpll) 13153 shared_dpll = intel_atomic_get_shared_dpll_state(state); 13154 13155 shared_dpll[dpll].crtc_mask &= ~(1 << intel_crtc->pipe); 13156 } 13157 } 13158 13159 /* 13160 * This implements the workaround described in the "notes" section of the mode 13161 * set sequence documentation. When going from no pipes or single pipe to 13162 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13163 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13164 */ 13165 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 13166 { 13167 struct drm_crtc_state *crtc_state; 13168 struct intel_crtc *intel_crtc; 13169 struct drm_crtc *crtc; 13170 struct intel_crtc_state *first_crtc_state = NULL; 13171 struct intel_crtc_state *other_crtc_state = NULL; 13172 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13173 int i; 13174 13175 /* look at all crtc's that are going to be enabled in during modeset */ 13176 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13177 intel_crtc = to_intel_crtc(crtc); 13178 13179 if (!crtc_state->active || !needs_modeset(crtc_state)) 13180 continue; 13181 13182 if (first_crtc_state) { 13183 other_crtc_state = to_intel_crtc_state(crtc_state); 13184 break; 13185 } else { 13186 first_crtc_state = to_intel_crtc_state(crtc_state); 13187 first_pipe = intel_crtc->pipe; 13188 } 13189 } 13190 13191 /* No workaround needed? */ 13192 if (!first_crtc_state) 13193 return 0; 13194 13195 /* w/a possibly needed, check how many crtc's are already enabled. */ 13196 for_each_intel_crtc(state->dev, intel_crtc) { 13197 struct intel_crtc_state *pipe_config; 13198 13199 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 13200 if (IS_ERR(pipe_config)) 13201 return PTR_ERR(pipe_config); 13202 13203 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 13204 13205 if (!pipe_config->base.active || 13206 needs_modeset(&pipe_config->base)) 13207 continue; 13208 13209 /* 2 or more enabled crtcs means no need for w/a */ 13210 if (enabled_pipe != INVALID_PIPE) 13211 return 0; 13212 13213 enabled_pipe = intel_crtc->pipe; 13214 } 13215 13216 if (enabled_pipe != INVALID_PIPE) 13217 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13218 else if (other_crtc_state) 13219 other_crtc_state->hsw_workaround_pipe = first_pipe; 13220 13221 return 0; 13222 } 13223 13224 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 13225 { 13226 struct drm_crtc *crtc; 13227 struct drm_crtc_state *crtc_state; 13228 int ret = 0; 13229 13230 /* add all active pipes to the state */ 13231 for_each_crtc(state->dev, crtc) { 13232 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13233 if (IS_ERR(crtc_state)) 13234 return PTR_ERR(crtc_state); 13235 13236 if (!crtc_state->active || needs_modeset(crtc_state)) 13237 continue; 13238 13239 crtc_state->mode_changed = true; 13240 13241 ret = drm_atomic_add_affected_connectors(state, crtc); 13242 if (ret) 13243 break; 13244 13245 ret = drm_atomic_add_affected_planes(state, crtc); 13246 if (ret) 13247 break; 13248 } 13249 13250 return ret; 13251 } 13252 13253 static int intel_modeset_checks(struct drm_atomic_state *state) 13254 { 13255 struct drm_device *dev = state->dev; 13256 struct drm_i915_private *dev_priv = dev->dev_private; 13257 int ret; 13258 13259 if (!check_digital_port_conflicts(state)) { 13260 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13261 return -EINVAL; 13262 } 13263 13264 /* 13265 * See if the config requires any additional preparation, e.g. 13266 * to adjust global state with pipes off. We need to do this 13267 * here so we can get the modeset_pipe updated config for the new 13268 * mode set on this crtc. For other crtcs we need to use the 13269 * adjusted_mode bits in the crtc directly. 13270 */ 13271 if (dev_priv->display.modeset_calc_cdclk) { 13272 unsigned int cdclk; 13273 13274 ret = dev_priv->display.modeset_calc_cdclk(state); 13275 13276 cdclk = to_intel_atomic_state(state)->cdclk; 13277 if (!ret && cdclk != dev_priv->cdclk_freq) 13278 ret = intel_modeset_all_pipes(state); 13279 13280 if (ret < 0) 13281 return ret; 13282 } else 13283 to_intel_atomic_state(state)->cdclk = dev_priv->cdclk_freq; 13284 13285 intel_modeset_clear_plls(state); 13286 13287 if (IS_HASWELL(dev)) 13288 return haswell_mode_set_planes_workaround(state); 13289 13290 return 0; 13291 } 13292 13293 /* 13294 * Handle calculation of various watermark data at the end of the atomic check 13295 * phase. The code here should be run after the per-crtc and per-plane 'check' 13296 * handlers to ensure that all derived state has been updated. 13297 */ 13298 static void calc_watermark_data(struct drm_atomic_state *state) 13299 { 13300 struct drm_device *dev = state->dev; 13301 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13302 struct drm_crtc *crtc; 13303 struct drm_crtc_state *cstate; 13304 struct drm_plane *plane; 13305 struct drm_plane_state *pstate; 13306 13307 /* 13308 * Calculate watermark configuration details now that derived 13309 * plane/crtc state is all properly updated. 13310 */ 13311 drm_for_each_crtc(crtc, dev) { 13312 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: 13313 crtc->state; 13314 13315 if (cstate->active) 13316 intel_state->wm_config.num_pipes_active++; 13317 } 13318 drm_for_each_legacy_plane(plane, dev) { 13319 pstate = drm_atomic_get_existing_plane_state(state, plane) ?: 13320 plane->state; 13321 13322 if (!to_intel_plane_state(pstate)->visible) 13323 continue; 13324 13325 intel_state->wm_config.sprites_enabled = true; 13326 if (pstate->crtc_w != pstate->src_w >> 16 || 13327 pstate->crtc_h != pstate->src_h >> 16) 13328 intel_state->wm_config.sprites_scaled = true; 13329 } 13330 } 13331 13332 /** 13333 * intel_atomic_check - validate state object 13334 * @dev: drm device 13335 * @state: state to validate 13336 */ 13337 static int intel_atomic_check(struct drm_device *dev, 13338 struct drm_atomic_state *state) 13339 { 13340 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13341 struct drm_crtc *crtc; 13342 struct drm_crtc_state *crtc_state; 13343 int ret, i; 13344 bool any_ms = false; 13345 13346 ret = drm_atomic_helper_check_modeset(dev, state); 13347 if (ret) 13348 return ret; 13349 13350 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13351 struct intel_crtc_state *pipe_config = 13352 to_intel_crtc_state(crtc_state); 13353 13354 memset(&to_intel_crtc(crtc)->atomic, 0, 13355 sizeof(struct intel_crtc_atomic_commit)); 13356 13357 /* Catch I915_MODE_FLAG_INHERITED */ 13358 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13359 crtc_state->mode_changed = true; 13360 13361 if (!crtc_state->enable) { 13362 if (needs_modeset(crtc_state)) 13363 any_ms = true; 13364 continue; 13365 } 13366 13367 if (!needs_modeset(crtc_state)) 13368 continue; 13369 13370 /* FIXME: For only active_changed we shouldn't need to do any 13371 * state recomputation at all. */ 13372 13373 ret = drm_atomic_add_affected_connectors(state, crtc); 13374 if (ret) 13375 return ret; 13376 13377 ret = intel_modeset_pipe_config(crtc, pipe_config); 13378 if (ret) 13379 return ret; 13380 13381 if (i915.fastboot && 13382 intel_pipe_config_compare(state->dev, 13383 to_intel_crtc_state(crtc->state), 13384 pipe_config, true)) { 13385 crtc_state->mode_changed = false; 13386 to_intel_crtc_state(crtc_state)->update_pipe = true; 13387 } 13388 13389 if (needs_modeset(crtc_state)) { 13390 any_ms = true; 13391 13392 ret = drm_atomic_add_affected_planes(state, crtc); 13393 if (ret) 13394 return ret; 13395 } 13396 13397 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13398 needs_modeset(crtc_state) ? 13399 "[modeset]" : "[fastset]"); 13400 } 13401 13402 if (any_ms) { 13403 ret = intel_modeset_checks(state); 13404 13405 if (ret) 13406 return ret; 13407 } else 13408 intel_state->cdclk = to_i915(state->dev)->cdclk_freq; 13409 13410 ret = drm_atomic_helper_check_planes(state->dev, state); 13411 if (ret) 13412 return ret; 13413 13414 calc_watermark_data(state); 13415 13416 return 0; 13417 } 13418 13419 static int intel_atomic_prepare_commit(struct drm_device *dev, 13420 struct drm_atomic_state *state, 13421 bool async) 13422 { 13423 struct drm_i915_private *dev_priv = dev->dev_private; 13424 struct drm_plane_state *plane_state; 13425 struct drm_crtc_state *crtc_state; 13426 struct drm_plane *plane; 13427 struct drm_crtc *crtc; 13428 int i, ret; 13429 13430 if (async) { 13431 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 13432 return -EINVAL; 13433 } 13434 13435 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13436 ret = intel_crtc_wait_for_pending_flips(crtc); 13437 if (ret) 13438 return ret; 13439 13440 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 13441 flush_workqueue(dev_priv->wq); 13442 } 13443 13444 ret = mutex_lock_interruptible(&dev->struct_mutex); 13445 if (ret) 13446 return ret; 13447 13448 ret = drm_atomic_helper_prepare_planes(dev, state); 13449 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { 13450 u32 reset_counter; 13451 13452 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 13453 mutex_unlock(&dev->struct_mutex); 13454 13455 for_each_plane_in_state(state, plane, plane_state, i) { 13456 struct intel_plane_state *intel_plane_state = 13457 to_intel_plane_state(plane_state); 13458 13459 if (!intel_plane_state->wait_req) 13460 continue; 13461 13462 ret = __i915_wait_request(intel_plane_state->wait_req, 13463 reset_counter, true, 13464 NULL, NULL); 13465 13466 /* Swallow -EIO errors to allow updates during hw lockup. */ 13467 if (ret == -EIO) 13468 ret = 0; 13469 13470 if (ret) 13471 break; 13472 } 13473 13474 if (!ret) 13475 return 0; 13476 13477 mutex_lock(&dev->struct_mutex); 13478 drm_atomic_helper_cleanup_planes(dev, state); 13479 } 13480 13481 mutex_unlock(&dev->struct_mutex); 13482 return ret; 13483 } 13484 13485 /** 13486 * intel_atomic_commit - commit validated state object 13487 * @dev: DRM device 13488 * @state: the top-level driver state object 13489 * @async: asynchronous commit 13490 * 13491 * This function commits a top-level state object that has been validated 13492 * with drm_atomic_helper_check(). 13493 * 13494 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment 13495 * we can only handle plane-related operations and do not yet support 13496 * asynchronous commit. 13497 * 13498 * RETURNS 13499 * Zero for success or -errno. 13500 */ 13501 static int intel_atomic_commit(struct drm_device *dev, 13502 struct drm_atomic_state *state, 13503 bool async) 13504 { 13505 struct drm_i915_private *dev_priv = dev->dev_private; 13506 struct drm_crtc_state *crtc_state; 13507 struct drm_crtc *crtc; 13508 int ret = 0; 13509 int i; 13510 bool any_ms = false; 13511 13512 ret = intel_atomic_prepare_commit(dev, state, async); 13513 if (ret) { 13514 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13515 return ret; 13516 } 13517 13518 drm_atomic_helper_swap_state(dev, state); 13519 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; 13520 13521 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13522 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13523 13524 if (!needs_modeset(crtc->state)) 13525 continue; 13526 13527 any_ms = true; 13528 intel_pre_plane_update(intel_crtc); 13529 13530 if (crtc_state->active) { 13531 intel_crtc_disable_planes(crtc, crtc_state->plane_mask); 13532 dev_priv->display.crtc_disable(crtc); 13533 intel_crtc->active = false; 13534 intel_disable_shared_dpll(intel_crtc); 13535 13536 /* 13537 * Underruns don't always raise 13538 * interrupts, so check manually. 13539 */ 13540 intel_check_cpu_fifo_underruns(dev_priv); 13541 intel_check_pch_fifo_underruns(dev_priv); 13542 13543 if (!crtc->state->active) 13544 intel_update_watermarks(crtc); 13545 } 13546 } 13547 13548 /* Only after disabling all output pipelines that will be changed can we 13549 * update the the output configuration. */ 13550 intel_modeset_update_crtc_state(state); 13551 13552 if (any_ms) { 13553 intel_shared_dpll_commit(state); 13554 13555 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13556 modeset_update_crtc_power_domains(state); 13557 } 13558 13559 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13560 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13561 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13562 bool modeset = needs_modeset(crtc->state); 13563 bool update_pipe = !modeset && 13564 to_intel_crtc_state(crtc->state)->update_pipe; 13565 unsigned long put_domains = 0; 13566 13567 if (modeset) 13568 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13569 13570 if (modeset && crtc->state->active) { 13571 update_scanline_offset(to_intel_crtc(crtc)); 13572 dev_priv->display.crtc_enable(crtc); 13573 } 13574 13575 if (update_pipe) { 13576 put_domains = modeset_get_crtc_power_domains(crtc); 13577 13578 /* make sure intel_modeset_check_state runs */ 13579 any_ms = true; 13580 } 13581 13582 if (!modeset) 13583 intel_pre_plane_update(intel_crtc); 13584 13585 if (crtc->state->active && 13586 (crtc->state->planes_changed || update_pipe)) 13587 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13588 13589 if (put_domains) 13590 modeset_put_power_domains(dev_priv, put_domains); 13591 13592 intel_post_plane_update(intel_crtc); 13593 13594 if (modeset) 13595 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13596 } 13597 13598 /* FIXME: add subpixel order */ 13599 13600 drm_atomic_helper_wait_for_vblanks(dev, state); 13601 13602 mutex_lock(&dev->struct_mutex); 13603 drm_atomic_helper_cleanup_planes(dev, state); 13604 mutex_unlock(&dev->struct_mutex); 13605 13606 if (any_ms) 13607 intel_modeset_check_state(dev, state); 13608 13609 drm_atomic_state_free(state); 13610 13611 return 0; 13612 } 13613 13614 void intel_crtc_restore_mode(struct drm_crtc *crtc) 13615 { 13616 struct drm_device *dev = crtc->dev; 13617 struct drm_atomic_state *state; 13618 struct drm_crtc_state *crtc_state; 13619 int ret; 13620 13621 state = drm_atomic_state_alloc(dev); 13622 if (!state) { 13623 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13624 crtc->base.id); 13625 return; 13626 } 13627 13628 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 13629 13630 retry: 13631 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13632 ret = PTR_ERR_OR_ZERO(crtc_state); 13633 if (!ret) { 13634 if (!crtc_state->active) 13635 goto out; 13636 13637 crtc_state->mode_changed = true; 13638 ret = drm_atomic_commit(state); 13639 } 13640 13641 if (ret == -EDEADLK) { 13642 drm_atomic_state_clear(state); 13643 drm_modeset_backoff(state->acquire_ctx); 13644 goto retry; 13645 } 13646 13647 if (ret) 13648 out: 13649 drm_atomic_state_free(state); 13650 } 13651 13652 #undef for_each_intel_crtc_masked 13653 13654 static const struct drm_crtc_funcs intel_crtc_funcs = { 13655 .gamma_set = intel_crtc_gamma_set, 13656 .set_config = drm_atomic_helper_set_config, 13657 .destroy = intel_crtc_destroy, 13658 .page_flip = intel_crtc_page_flip, 13659 .atomic_duplicate_state = intel_crtc_duplicate_state, 13660 .atomic_destroy_state = intel_crtc_destroy_state, 13661 }; 13662 13663 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 13664 struct intel_shared_dpll *pll, 13665 struct intel_dpll_hw_state *hw_state) 13666 { 13667 uint32_t val; 13668 13669 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13670 return false; 13671 13672 val = I915_READ(PCH_DPLL(pll->id)); 13673 hw_state->dpll = val; 13674 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13675 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13676 13677 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 13678 13679 return val & DPLL_VCO_ENABLE; 13680 } 13681 13682 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 13683 struct intel_shared_dpll *pll) 13684 { 13685 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 13686 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 13687 } 13688 13689 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 13690 struct intel_shared_dpll *pll) 13691 { 13692 /* PCH refclock must be enabled first */ 13693 ibx_assert_pch_refclk_enabled(dev_priv); 13694 13695 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13696 13697 /* Wait for the clocks to stabilize. */ 13698 POSTING_READ(PCH_DPLL(pll->id)); 13699 udelay(150); 13700 13701 /* The pixel multiplier can only be updated once the 13702 * DPLL is enabled and the clocks are stable. 13703 * 13704 * So write it again. 13705 */ 13706 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13707 POSTING_READ(PCH_DPLL(pll->id)); 13708 udelay(200); 13709 } 13710 13711 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 13712 struct intel_shared_dpll *pll) 13713 { 13714 struct drm_device *dev = dev_priv->dev; 13715 struct intel_crtc *crtc; 13716 13717 /* Make sure no transcoder isn't still depending on us. */ 13718 for_each_intel_crtc(dev, crtc) { 13719 if (intel_crtc_to_shared_dpll(crtc) == pll) 13720 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 13721 } 13722 13723 I915_WRITE(PCH_DPLL(pll->id), 0); 13724 POSTING_READ(PCH_DPLL(pll->id)); 13725 udelay(200); 13726 } 13727 13728 static char *ibx_pch_dpll_names[] = { 13729 "PCH DPLL A", 13730 "PCH DPLL B", 13731 }; 13732 13733 static void ibx_pch_dpll_init(struct drm_device *dev) 13734 { 13735 struct drm_i915_private *dev_priv = dev->dev_private; 13736 int i; 13737 13738 dev_priv->num_shared_dpll = 2; 13739 13740 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13741 dev_priv->shared_dplls[i].id = i; 13742 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 13743 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 13744 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 13745 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 13746 dev_priv->shared_dplls[i].get_hw_state = 13747 ibx_pch_dpll_get_hw_state; 13748 } 13749 } 13750 13751 static void intel_shared_dpll_init(struct drm_device *dev) 13752 { 13753 struct drm_i915_private *dev_priv = dev->dev_private; 13754 13755 if (HAS_DDI(dev)) 13756 intel_ddi_pll_init(dev); 13757 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13758 ibx_pch_dpll_init(dev); 13759 else 13760 dev_priv->num_shared_dpll = 0; 13761 13762 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 13763 } 13764 13765 /** 13766 * intel_prepare_plane_fb - Prepare fb for usage on plane 13767 * @plane: drm plane to prepare for 13768 * @fb: framebuffer to prepare for presentation 13769 * 13770 * Prepares a framebuffer for usage on a display plane. Generally this 13771 * involves pinning the underlying object and updating the frontbuffer tracking 13772 * bits. Some older platforms need special physical address handling for 13773 * cursor planes. 13774 * 13775 * Must be called with struct_mutex held. 13776 * 13777 * Returns 0 on success, negative error code on failure. 13778 */ 13779 int 13780 intel_prepare_plane_fb(struct drm_plane *plane, 13781 struct drm_plane_state *new_state) 13782 { 13783 struct drm_device *dev = plane->dev; 13784 struct drm_framebuffer *fb = new_state->fb; 13785 struct intel_plane *intel_plane = to_intel_plane(plane); 13786 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13787 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13788 int ret = 0; 13789 13790 if (!obj && !old_obj) 13791 return 0; 13792 13793 if (old_obj) { 13794 struct drm_crtc_state *crtc_state = 13795 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); 13796 13797 /* Big Hammer, we also need to ensure that any pending 13798 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13799 * current scanout is retired before unpinning the old 13800 * framebuffer. Note that we rely on userspace rendering 13801 * into the buffer attached to the pipe they are waiting 13802 * on. If not, userspace generates a GPU hang with IPEHR 13803 * point to the MI_WAIT_FOR_EVENT. 13804 * 13805 * This should only fail upon a hung GPU, in which case we 13806 * can safely continue. 13807 */ 13808 if (needs_modeset(crtc_state)) 13809 ret = i915_gem_object_wait_rendering(old_obj, true); 13810 13811 /* Swallow -EIO errors to allow updates during hw lockup. */ 13812 if (ret && ret != -EIO) 13813 return ret; 13814 } 13815 13816 /* For framebuffer backed by dmabuf, wait for fence */ 13817 #if 0 13818 if (obj && obj->base.dma_buf) { 13819 long lret; 13820 13821 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 13822 false, true, 13823 MAX_SCHEDULE_TIMEOUT); 13824 if (lret == -ERESTARTSYS) 13825 return lret; 13826 13827 WARN(lret < 0, "waiting returns %li\n", lret); 13828 } 13829 #endif 13830 13831 if (!obj) { 13832 ret = 0; 13833 } else if (plane->type == DRM_PLANE_TYPE_CURSOR && 13834 INTEL_INFO(dev)->cursor_needs_physical) { 13835 int align = IS_I830(dev) ? 16 * 1024 : 256; 13836 ret = i915_gem_object_attach_phys(obj, align); 13837 if (ret) 13838 DRM_DEBUG_KMS("failed to attach phys object\n"); 13839 } else { 13840 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state); 13841 } 13842 13843 if (ret == 0) { 13844 if (obj) { 13845 struct intel_plane_state *plane_state = 13846 to_intel_plane_state(new_state); 13847 13848 i915_gem_request_assign(&plane_state->wait_req, 13849 obj->last_write_req); 13850 } 13851 13852 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13853 } 13854 13855 return ret; 13856 } 13857 13858 /** 13859 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13860 * @plane: drm plane to clean up for 13861 * @fb: old framebuffer that was on plane 13862 * 13863 * Cleans up a framebuffer that has just been removed from a plane. 13864 * 13865 * Must be called with struct_mutex held. 13866 */ 13867 void 13868 intel_cleanup_plane_fb(struct drm_plane *plane, 13869 struct drm_plane_state *old_state) 13870 { 13871 struct drm_device *dev = plane->dev; 13872 struct intel_plane *intel_plane = to_intel_plane(plane); 13873 struct intel_plane_state *old_intel_state; 13874 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 13875 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 13876 13877 old_intel_state = to_intel_plane_state(old_state); 13878 13879 if (!obj && !old_obj) 13880 return; 13881 13882 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 13883 !INTEL_INFO(dev)->cursor_needs_physical)) 13884 intel_unpin_fb_obj(old_state->fb, old_state); 13885 13886 /* prepare_fb aborted? */ 13887 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || 13888 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) 13889 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13890 13891 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 13892 13893 } 13894 13895 int 13896 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13897 { 13898 int max_scale; 13899 struct drm_device *dev; 13900 struct drm_i915_private *dev_priv; 13901 int crtc_clock, cdclk; 13902 13903 if (!intel_crtc || !crtc_state) 13904 return DRM_PLANE_HELPER_NO_SCALING; 13905 13906 dev = intel_crtc->base.dev; 13907 dev_priv = dev->dev_private; 13908 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13909 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 13910 13911 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) 13912 return DRM_PLANE_HELPER_NO_SCALING; 13913 13914 /* 13915 * skl max scale is lower of: 13916 * close to 3 but not 3, -1 is for that purpose 13917 * or 13918 * cdclk/crtc_clock 13919 */ 13920 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 13921 13922 return max_scale; 13923 } 13924 13925 static int 13926 intel_check_primary_plane(struct drm_plane *plane, 13927 struct intel_crtc_state *crtc_state, 13928 struct intel_plane_state *state) 13929 { 13930 struct drm_crtc *crtc = state->base.crtc; 13931 struct drm_framebuffer *fb = state->base.fb; 13932 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13933 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13934 bool can_position = false; 13935 13936 if (INTEL_INFO(plane->dev)->gen >= 9) { 13937 /* use scaler when colorkey is not required */ 13938 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 13939 min_scale = 1; 13940 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13941 } 13942 can_position = true; 13943 } 13944 13945 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 13946 &state->dst, &state->clip, 13947 min_scale, max_scale, 13948 can_position, true, 13949 &state->visible); 13950 } 13951 13952 static void 13953 intel_commit_primary_plane(struct drm_plane *plane, 13954 struct intel_plane_state *state) 13955 { 13956 struct drm_crtc *crtc = state->base.crtc; 13957 struct drm_framebuffer *fb = state->base.fb; 13958 struct drm_device *dev = plane->dev; 13959 struct drm_i915_private *dev_priv = dev->dev_private; 13960 13961 crtc = crtc ? crtc : plane->crtc; 13962 13963 dev_priv->display.update_primary_plane(crtc, fb, 13964 state->src.x1 >> 16, 13965 state->src.y1 >> 16); 13966 } 13967 13968 static void 13969 intel_disable_primary_plane(struct drm_plane *plane, 13970 struct drm_crtc *crtc) 13971 { 13972 struct drm_device *dev = plane->dev; 13973 struct drm_i915_private *dev_priv = dev->dev_private; 13974 13975 dev_priv->display.update_primary_plane(crtc, NULL, 0, 0); 13976 } 13977 13978 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13979 struct drm_crtc_state *old_crtc_state) 13980 { 13981 struct drm_device *dev = crtc->dev; 13982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13983 struct intel_crtc_state *old_intel_state = 13984 to_intel_crtc_state(old_crtc_state); 13985 bool modeset = needs_modeset(crtc->state); 13986 13987 /* Perform vblank evasion around commit operation */ 13988 intel_pipe_update_start(intel_crtc); 13989 13990 if (modeset) 13991 return; 13992 13993 if (to_intel_crtc_state(crtc->state)->update_pipe) 13994 intel_update_pipe_config(intel_crtc, old_intel_state); 13995 else if (INTEL_INFO(dev)->gen >= 9) 13996 skl_detach_scalers(intel_crtc); 13997 } 13998 13999 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 14000 struct drm_crtc_state *old_crtc_state) 14001 { 14002 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14003 14004 intel_pipe_update_end(intel_crtc); 14005 } 14006 14007 /** 14008 * intel_plane_destroy - destroy a plane 14009 * @plane: plane to destroy 14010 * 14011 * Common destruction function for all types of planes (primary, cursor, 14012 * sprite). 14013 */ 14014 void intel_plane_destroy(struct drm_plane *plane) 14015 { 14016 struct intel_plane *intel_plane = to_intel_plane(plane); 14017 drm_plane_cleanup(plane); 14018 kfree(intel_plane); 14019 } 14020 14021 const struct drm_plane_funcs intel_plane_funcs = { 14022 .update_plane = drm_atomic_helper_update_plane, 14023 .disable_plane = drm_atomic_helper_disable_plane, 14024 .destroy = intel_plane_destroy, 14025 .set_property = drm_atomic_helper_plane_set_property, 14026 .atomic_get_property = intel_plane_atomic_get_property, 14027 .atomic_set_property = intel_plane_atomic_set_property, 14028 .atomic_duplicate_state = intel_plane_duplicate_state, 14029 .atomic_destroy_state = intel_plane_destroy_state, 14030 14031 }; 14032 14033 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 14034 int pipe) 14035 { 14036 struct intel_plane *primary; 14037 struct intel_plane_state *state; 14038 const uint32_t *intel_primary_formats; 14039 unsigned int num_formats; 14040 14041 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 14042 if (primary == NULL) 14043 return NULL; 14044 14045 state = intel_create_plane_state(&primary->base); 14046 if (!state) { 14047 kfree(primary); 14048 return NULL; 14049 } 14050 primary->base.state = &state->base; 14051 14052 primary->can_scale = false; 14053 primary->max_downscale = 1; 14054 if (INTEL_INFO(dev)->gen >= 9) { 14055 primary->can_scale = true; 14056 state->scaler_id = -1; 14057 } 14058 primary->pipe = pipe; 14059 primary->plane = pipe; 14060 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 14061 primary->check_plane = intel_check_primary_plane; 14062 primary->commit_plane = intel_commit_primary_plane; 14063 primary->disable_plane = intel_disable_primary_plane; 14064 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 14065 primary->plane = !pipe; 14066 14067 if (INTEL_INFO(dev)->gen >= 9) { 14068 intel_primary_formats = skl_primary_formats; 14069 num_formats = ARRAY_SIZE(skl_primary_formats); 14070 } else if (INTEL_INFO(dev)->gen >= 4) { 14071 intel_primary_formats = i965_primary_formats; 14072 num_formats = ARRAY_SIZE(i965_primary_formats); 14073 } else { 14074 intel_primary_formats = i8xx_primary_formats; 14075 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14076 } 14077 14078 drm_universal_plane_init(dev, &primary->base, 0, 14079 &intel_plane_funcs, 14080 intel_primary_formats, num_formats, 14081 DRM_PLANE_TYPE_PRIMARY, NULL); 14082 14083 if (INTEL_INFO(dev)->gen >= 4) 14084 intel_create_rotation_property(dev, primary); 14085 14086 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 14087 14088 return &primary->base; 14089 } 14090 14091 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 14092 { 14093 if (!dev->mode_config.rotation_property) { 14094 unsigned long flags = BIT(DRM_ROTATE_0) | 14095 BIT(DRM_ROTATE_180); 14096 14097 if (INTEL_INFO(dev)->gen >= 9) 14098 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); 14099 14100 dev->mode_config.rotation_property = 14101 drm_mode_create_rotation_property(dev, flags); 14102 } 14103 if (dev->mode_config.rotation_property) 14104 drm_object_attach_property(&plane->base.base, 14105 dev->mode_config.rotation_property, 14106 plane->base.state->rotation); 14107 } 14108 14109 static int 14110 intel_check_cursor_plane(struct drm_plane *plane, 14111 struct intel_crtc_state *crtc_state, 14112 struct intel_plane_state *state) 14113 { 14114 struct drm_crtc *crtc = crtc_state->base.crtc; 14115 struct drm_framebuffer *fb = state->base.fb; 14116 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14117 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 14118 unsigned stride; 14119 int ret; 14120 14121 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14122 &state->dst, &state->clip, 14123 DRM_PLANE_HELPER_NO_SCALING, 14124 DRM_PLANE_HELPER_NO_SCALING, 14125 true, true, &state->visible); 14126 if (ret) 14127 return ret; 14128 14129 /* if we want to turn off the cursor ignore width and height */ 14130 if (!obj) 14131 return 0; 14132 14133 /* Check for which cursor types we support */ 14134 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 14135 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 14136 state->base.crtc_w, state->base.crtc_h); 14137 return -EINVAL; 14138 } 14139 14140 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 14141 if (obj->base.size < stride * state->base.crtc_h) { 14142 DRM_DEBUG_KMS("buffer is too small\n"); 14143 return -ENOMEM; 14144 } 14145 14146 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 14147 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 14148 return -EINVAL; 14149 } 14150 14151 /* 14152 * There's something wrong with the cursor on CHV pipe C. 14153 * If it straddles the left edge of the screen then 14154 * moving it away from the edge or disabling it often 14155 * results in a pipe underrun, and often that can lead to 14156 * dead pipe (constant underrun reported, and it scans 14157 * out just a solid color). To recover from that, the 14158 * display power well must be turned off and on again. 14159 * Refuse the put the cursor into that compromised position. 14160 */ 14161 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 14162 state->visible && state->base.crtc_x < 0) { 14163 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 14164 return -EINVAL; 14165 } 14166 14167 return 0; 14168 } 14169 14170 static void 14171 intel_disable_cursor_plane(struct drm_plane *plane, 14172 struct drm_crtc *crtc) 14173 { 14174 intel_crtc_update_cursor(crtc, false); 14175 } 14176 14177 static void 14178 intel_commit_cursor_plane(struct drm_plane *plane, 14179 struct intel_plane_state *state) 14180 { 14181 struct drm_crtc *crtc = state->base.crtc; 14182 struct drm_device *dev = plane->dev; 14183 struct intel_crtc *intel_crtc; 14184 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 14185 uint32_t addr; 14186 14187 crtc = crtc ? crtc : plane->crtc; 14188 intel_crtc = to_intel_crtc(crtc); 14189 14190 if (!obj) 14191 addr = 0; 14192 else if (!INTEL_INFO(dev)->cursor_needs_physical) 14193 addr = i915_gem_obj_ggtt_offset(obj); 14194 else 14195 addr = obj->phys_handle->busaddr; 14196 14197 intel_crtc->cursor_addr = addr; 14198 14199 if (crtc->state->active) 14200 intel_crtc_update_cursor(crtc, state->visible); 14201 } 14202 14203 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14204 int pipe) 14205 { 14206 struct intel_plane *cursor; 14207 struct intel_plane_state *state; 14208 14209 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 14210 if (cursor == NULL) 14211 return NULL; 14212 14213 state = intel_create_plane_state(&cursor->base); 14214 if (!state) { 14215 kfree(cursor); 14216 return NULL; 14217 } 14218 cursor->base.state = &state->base; 14219 14220 cursor->can_scale = false; 14221 cursor->max_downscale = 1; 14222 cursor->pipe = pipe; 14223 cursor->plane = pipe; 14224 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 14225 cursor->check_plane = intel_check_cursor_plane; 14226 cursor->commit_plane = intel_commit_cursor_plane; 14227 cursor->disable_plane = intel_disable_cursor_plane; 14228 14229 drm_universal_plane_init(dev, &cursor->base, 0, 14230 &intel_plane_funcs, 14231 intel_cursor_formats, 14232 ARRAY_SIZE(intel_cursor_formats), 14233 DRM_PLANE_TYPE_CURSOR, NULL); 14234 14235 if (INTEL_INFO(dev)->gen >= 4) { 14236 if (!dev->mode_config.rotation_property) 14237 dev->mode_config.rotation_property = 14238 drm_mode_create_rotation_property(dev, 14239 BIT(DRM_ROTATE_0) | 14240 BIT(DRM_ROTATE_180)); 14241 if (dev->mode_config.rotation_property) 14242 drm_object_attach_property(&cursor->base.base, 14243 dev->mode_config.rotation_property, 14244 state->base.rotation); 14245 } 14246 14247 if (INTEL_INFO(dev)->gen >=9) 14248 state->scaler_id = -1; 14249 14250 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14251 14252 return &cursor->base; 14253 } 14254 14255 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 14256 struct intel_crtc_state *crtc_state) 14257 { 14258 int i; 14259 struct intel_scaler *intel_scaler; 14260 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 14261 14262 for (i = 0; i < intel_crtc->num_scalers; i++) { 14263 intel_scaler = &scaler_state->scalers[i]; 14264 intel_scaler->in_use = 0; 14265 intel_scaler->mode = PS_SCALER_MODE_DYN; 14266 } 14267 14268 scaler_state->scaler_id = -1; 14269 } 14270 14271 static void intel_crtc_init(struct drm_device *dev, int pipe) 14272 { 14273 struct drm_i915_private *dev_priv = dev->dev_private; 14274 struct intel_crtc *intel_crtc; 14275 struct intel_crtc_state *crtc_state = NULL; 14276 struct drm_plane *primary = NULL; 14277 struct drm_plane *cursor = NULL; 14278 int i, ret; 14279 14280 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 14281 if (intel_crtc == NULL) 14282 return; 14283 14284 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 14285 if (!crtc_state) 14286 goto fail; 14287 intel_crtc->config = crtc_state; 14288 intel_crtc->base.state = &crtc_state->base; 14289 crtc_state->base.crtc = &intel_crtc->base; 14290 14291 /* initialize shared scalers */ 14292 if (INTEL_INFO(dev)->gen >= 9) { 14293 if (pipe == PIPE_C) 14294 intel_crtc->num_scalers = 1; 14295 else 14296 intel_crtc->num_scalers = SKL_NUM_SCALERS; 14297 14298 skl_init_scalers(dev, intel_crtc, crtc_state); 14299 } 14300 14301 primary = intel_primary_plane_create(dev, pipe); 14302 if (!primary) 14303 goto fail; 14304 14305 cursor = intel_cursor_plane_create(dev, pipe); 14306 if (!cursor) 14307 goto fail; 14308 14309 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14310 cursor, &intel_crtc_funcs, NULL); 14311 if (ret) 14312 goto fail; 14313 14314 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 14315 for (i = 0; i < 256; i++) { 14316 intel_crtc->lut_r[i] = i; 14317 intel_crtc->lut_g[i] = i; 14318 intel_crtc->lut_b[i] = i; 14319 } 14320 14321 /* 14322 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 14323 * is hooked to pipe B. Hence we want plane A feeding pipe B. 14324 */ 14325 intel_crtc->pipe = pipe; 14326 intel_crtc->plane = pipe; 14327 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 14328 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 14329 intel_crtc->plane = !pipe; 14330 } 14331 14332 intel_crtc->cursor_base = ~0; 14333 intel_crtc->cursor_cntl = ~0; 14334 intel_crtc->cursor_size = ~0; 14335 14336 intel_crtc->wm.cxsr_allowed = true; 14337 14338 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 14339 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 14340 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 14341 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 14342 14343 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14344 14345 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14346 return; 14347 14348 fail: 14349 if (primary) 14350 drm_plane_cleanup(primary); 14351 if (cursor) 14352 drm_plane_cleanup(cursor); 14353 kfree(crtc_state); 14354 kfree(intel_crtc); 14355 } 14356 14357 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14358 { 14359 struct drm_encoder *encoder = connector->base.encoder; 14360 struct drm_device *dev = connector->base.dev; 14361 14362 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14363 14364 if (!encoder || WARN_ON(!encoder->crtc)) 14365 return INVALID_PIPE; 14366 14367 return to_intel_crtc(encoder->crtc)->pipe; 14368 } 14369 14370 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14371 struct drm_file *file) 14372 { 14373 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14374 struct drm_crtc *drmmode_crtc; 14375 struct intel_crtc *crtc; 14376 14377 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14378 14379 if (!drmmode_crtc) { 14380 DRM_ERROR("no such CRTC id\n"); 14381 return -ENOENT; 14382 } 14383 14384 crtc = to_intel_crtc(drmmode_crtc); 14385 pipe_from_crtc_id->pipe = crtc->pipe; 14386 14387 return 0; 14388 } 14389 14390 static int intel_encoder_clones(struct intel_encoder *encoder) 14391 { 14392 struct drm_device *dev = encoder->base.dev; 14393 struct intel_encoder *source_encoder; 14394 int index_mask = 0; 14395 int entry = 0; 14396 14397 for_each_intel_encoder(dev, source_encoder) { 14398 if (encoders_cloneable(encoder, source_encoder)) 14399 index_mask |= (1 << entry); 14400 14401 entry++; 14402 } 14403 14404 return index_mask; 14405 } 14406 14407 static bool has_edp_a(struct drm_device *dev) 14408 { 14409 struct drm_i915_private *dev_priv = dev->dev_private; 14410 14411 if (!IS_MOBILE(dev)) 14412 return false; 14413 14414 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14415 return false; 14416 14417 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14418 return false; 14419 14420 return true; 14421 } 14422 14423 static bool intel_crt_present(struct drm_device *dev) 14424 { 14425 struct drm_i915_private *dev_priv = dev->dev_private; 14426 14427 if (INTEL_INFO(dev)->gen >= 9) 14428 return false; 14429 14430 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 14431 return false; 14432 14433 if (IS_CHERRYVIEW(dev)) 14434 return false; 14435 14436 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14437 return false; 14438 14439 /* DDI E can't be used if DDI A requires 4 lanes */ 14440 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14441 return false; 14442 14443 if (!dev_priv->vbt.int_crt_support) 14444 return false; 14445 14446 return true; 14447 } 14448 14449 static void intel_setup_outputs(struct drm_device *dev) 14450 { 14451 struct drm_i915_private *dev_priv = dev->dev_private; 14452 struct intel_encoder *encoder; 14453 bool dpd_is_edp = false; 14454 14455 intel_lvds_init(dev); 14456 14457 if (intel_crt_present(dev)) 14458 intel_crt_init(dev); 14459 14460 if (IS_BROXTON(dev)) { 14461 /* 14462 * FIXME: Broxton doesn't support port detection via the 14463 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14464 * detect the ports. 14465 */ 14466 intel_ddi_init(dev, PORT_A); 14467 intel_ddi_init(dev, PORT_B); 14468 intel_ddi_init(dev, PORT_C); 14469 } else if (HAS_DDI(dev)) { 14470 int found; 14471 14472 /* 14473 * Haswell uses DDI functions to detect digital outputs. 14474 * On SKL pre-D0 the strap isn't connected, so we assume 14475 * it's there. 14476 */ 14477 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14478 /* WaIgnoreDDIAStrap: skl */ 14479 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14480 intel_ddi_init(dev, PORT_A); 14481 14482 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14483 * register */ 14484 found = I915_READ(SFUSE_STRAP); 14485 14486 if (found & SFUSE_STRAP_DDIB_DETECTED) 14487 intel_ddi_init(dev, PORT_B); 14488 if (found & SFUSE_STRAP_DDIC_DETECTED) 14489 intel_ddi_init(dev, PORT_C); 14490 if (found & SFUSE_STRAP_DDID_DETECTED) 14491 intel_ddi_init(dev, PORT_D); 14492 /* 14493 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14494 */ 14495 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && 14496 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14497 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14498 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14499 intel_ddi_init(dev, PORT_E); 14500 14501 } else if (HAS_PCH_SPLIT(dev)) { 14502 int found; 14503 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 14504 14505 if (has_edp_a(dev)) 14506 intel_dp_init(dev, DP_A, PORT_A); 14507 14508 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14509 /* PCH SDVOB multiplex with HDMIB */ 14510 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); 14511 if (!found) 14512 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14513 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14514 intel_dp_init(dev, PCH_DP_B, PORT_B); 14515 } 14516 14517 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14518 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14519 14520 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14521 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14522 14523 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14524 intel_dp_init(dev, PCH_DP_C, PORT_C); 14525 14526 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14527 intel_dp_init(dev, PCH_DP_D, PORT_D); 14528 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14529 /* 14530 * The DP_DETECTED bit is the latched state of the DDC 14531 * SDA pin at boot. However since eDP doesn't require DDC 14532 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14533 * eDP ports may have been muxed to an alternate function. 14534 * Thus we can't rely on the DP_DETECTED bit alone to detect 14535 * eDP ports. Consult the VBT as well as DP_DETECTED to 14536 * detect eDP ports. 14537 */ 14538 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14539 !intel_dp_is_edp(dev, PORT_B)) 14540 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14541 if (I915_READ(VLV_DP_B) & DP_DETECTED || 14542 intel_dp_is_edp(dev, PORT_B)) 14543 intel_dp_init(dev, VLV_DP_B, PORT_B); 14544 14545 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14546 !intel_dp_is_edp(dev, PORT_C)) 14547 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14548 if (I915_READ(VLV_DP_C) & DP_DETECTED || 14549 intel_dp_is_edp(dev, PORT_C)) 14550 intel_dp_init(dev, VLV_DP_C, PORT_C); 14551 14552 if (IS_CHERRYVIEW(dev)) { 14553 /* eDP not supported on port D, so don't check VBT */ 14554 if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14555 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14556 if (I915_READ(CHV_DP_D) & DP_DETECTED) 14557 intel_dp_init(dev, CHV_DP_D, PORT_D); 14558 } 14559 14560 intel_dsi_init(dev); 14561 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 14562 bool found = false; 14563 14564 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14565 DRM_DEBUG_KMS("probing SDVOB\n"); 14566 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); 14567 if (!found && IS_G4X(dev)) { 14568 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14569 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14570 } 14571 14572 if (!found && IS_G4X(dev)) 14573 intel_dp_init(dev, DP_B, PORT_B); 14574 } 14575 14576 /* Before G4X SDVOC doesn't have its own detect register */ 14577 14578 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14579 DRM_DEBUG_KMS("probing SDVOC\n"); 14580 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); 14581 } 14582 14583 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14584 14585 if (IS_G4X(dev)) { 14586 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14587 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14588 } 14589 if (IS_G4X(dev)) 14590 intel_dp_init(dev, DP_C, PORT_C); 14591 } 14592 14593 if (IS_G4X(dev) && 14594 (I915_READ(DP_D) & DP_DETECTED)) 14595 intel_dp_init(dev, DP_D, PORT_D); 14596 } else if (IS_GEN2(dev)) 14597 intel_dvo_init(dev); 14598 14599 if (SUPPORTS_TV(dev)) 14600 intel_tv_init(dev); 14601 14602 intel_psr_init(dev); 14603 14604 for_each_intel_encoder(dev, encoder) { 14605 encoder->base.possible_crtcs = encoder->crtc_mask; 14606 encoder->base.possible_clones = 14607 intel_encoder_clones(encoder); 14608 } 14609 14610 intel_init_pch_refclk(dev); 14611 14612 drm_helper_move_panel_connectors_to_head(dev); 14613 } 14614 14615 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14616 { 14617 struct drm_device *dev = fb->dev; 14618 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14619 14620 drm_framebuffer_cleanup(fb); 14621 mutex_lock(&dev->struct_mutex); 14622 WARN_ON(!intel_fb->obj->framebuffer_references--); 14623 drm_gem_object_unreference(&intel_fb->obj->base); 14624 mutex_unlock(&dev->struct_mutex); 14625 kfree(intel_fb); 14626 } 14627 14628 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14629 struct drm_file *file, 14630 unsigned int *handle) 14631 { 14632 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14633 struct drm_i915_gem_object *obj = intel_fb->obj; 14634 14635 if (obj->userptr.mm) { 14636 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14637 return -EINVAL; 14638 } 14639 14640 return drm_gem_handle_create(file, &obj->base, handle); 14641 } 14642 14643 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14644 struct drm_file *file, 14645 unsigned flags, unsigned color, 14646 struct drm_clip_rect *clips, 14647 unsigned num_clips) 14648 { 14649 struct drm_device *dev = fb->dev; 14650 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14651 struct drm_i915_gem_object *obj = intel_fb->obj; 14652 14653 mutex_lock(&dev->struct_mutex); 14654 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 14655 mutex_unlock(&dev->struct_mutex); 14656 14657 return 0; 14658 } 14659 14660 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14661 .destroy = intel_user_framebuffer_destroy, 14662 .create_handle = intel_user_framebuffer_create_handle, 14663 .dirty = intel_user_framebuffer_dirty, 14664 }; 14665 14666 static 14667 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14668 uint32_t pixel_format) 14669 { 14670 u32 gen = INTEL_INFO(dev)->gen; 14671 14672 if (gen >= 9) { 14673 /* "The stride in bytes must not exceed the of the size of 8K 14674 * pixels and 32K bytes." 14675 */ 14676 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); 14677 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 14678 return 32*1024; 14679 } else if (gen >= 4) { 14680 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14681 return 16*1024; 14682 else 14683 return 32*1024; 14684 } else if (gen >= 3) { 14685 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14686 return 8*1024; 14687 else 14688 return 16*1024; 14689 } else { 14690 /* XXX DSPC is limited to 4k tiled */ 14691 return 8*1024; 14692 } 14693 } 14694 14695 static int intel_framebuffer_init(struct drm_device *dev, 14696 struct intel_framebuffer *intel_fb, 14697 struct drm_mode_fb_cmd2 *mode_cmd, 14698 struct drm_i915_gem_object *obj) 14699 { 14700 unsigned int aligned_height; 14701 int ret; 14702 u32 pitch_limit, stride_alignment; 14703 14704 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 14705 14706 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14707 /* Enforce that fb modifier and tiling mode match, but only for 14708 * X-tiled. This is needed for FBC. */ 14709 if (!!(obj->tiling_mode == I915_TILING_X) != 14710 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 14711 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 14712 return -EINVAL; 14713 } 14714 } else { 14715 if (obj->tiling_mode == I915_TILING_X) 14716 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14717 else if (obj->tiling_mode == I915_TILING_Y) { 14718 DRM_DEBUG("No Y tiling for legacy addfb\n"); 14719 return -EINVAL; 14720 } 14721 } 14722 14723 /* Passed in modifier sanity checking. */ 14724 switch (mode_cmd->modifier[0]) { 14725 case I915_FORMAT_MOD_Y_TILED: 14726 case I915_FORMAT_MOD_Yf_TILED: 14727 if (INTEL_INFO(dev)->gen < 9) { 14728 DRM_DEBUG("Unsupported tiling 0x%lx!\n", 14729 mode_cmd->modifier[0]); 14730 return -EINVAL; 14731 } 14732 case DRM_FORMAT_MOD_NONE: 14733 case I915_FORMAT_MOD_X_TILED: 14734 break; 14735 default: 14736 DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", 14737 mode_cmd->modifier[0]); 14738 return -EINVAL; 14739 } 14740 14741 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], 14742 mode_cmd->pixel_format); 14743 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14744 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 14745 mode_cmd->pitches[0], stride_alignment); 14746 return -EINVAL; 14747 } 14748 14749 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 14750 mode_cmd->pixel_format); 14751 if (mode_cmd->pitches[0] > pitch_limit) { 14752 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 14753 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14754 "tiled" : "linear", 14755 mode_cmd->pitches[0], pitch_limit); 14756 return -EINVAL; 14757 } 14758 14759 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 14760 mode_cmd->pitches[0] != obj->stride) { 14761 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 14762 mode_cmd->pitches[0], obj->stride); 14763 return -EINVAL; 14764 } 14765 14766 /* Reject formats not supported by any plane early. */ 14767 switch (mode_cmd->pixel_format) { 14768 case DRM_FORMAT_C8: 14769 case DRM_FORMAT_RGB565: 14770 case DRM_FORMAT_XRGB8888: 14771 case DRM_FORMAT_ARGB8888: 14772 break; 14773 case DRM_FORMAT_XRGB1555: 14774 if (INTEL_INFO(dev)->gen > 3) { 14775 DRM_DEBUG("unsupported pixel format: %s\n", 14776 drm_get_format_name(mode_cmd->pixel_format)); 14777 return -EINVAL; 14778 } 14779 break; 14780 case DRM_FORMAT_ABGR8888: 14781 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 14782 INTEL_INFO(dev)->gen < 9) { 14783 DRM_DEBUG("unsupported pixel format: %s\n", 14784 drm_get_format_name(mode_cmd->pixel_format)); 14785 return -EINVAL; 14786 } 14787 break; 14788 case DRM_FORMAT_XBGR8888: 14789 case DRM_FORMAT_XRGB2101010: 14790 case DRM_FORMAT_XBGR2101010: 14791 if (INTEL_INFO(dev)->gen < 4) { 14792 DRM_DEBUG("unsupported pixel format: %s\n", 14793 drm_get_format_name(mode_cmd->pixel_format)); 14794 return -EINVAL; 14795 } 14796 break; 14797 case DRM_FORMAT_ABGR2101010: 14798 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 14799 DRM_DEBUG("unsupported pixel format: %s\n", 14800 drm_get_format_name(mode_cmd->pixel_format)); 14801 return -EINVAL; 14802 } 14803 break; 14804 case DRM_FORMAT_YUYV: 14805 case DRM_FORMAT_UYVY: 14806 case DRM_FORMAT_YVYU: 14807 case DRM_FORMAT_VYUY: 14808 if (INTEL_INFO(dev)->gen < 5) { 14809 DRM_DEBUG("unsupported pixel format: %s\n", 14810 drm_get_format_name(mode_cmd->pixel_format)); 14811 return -EINVAL; 14812 } 14813 break; 14814 default: 14815 DRM_DEBUG("unsupported pixel format: %s\n", 14816 drm_get_format_name(mode_cmd->pixel_format)); 14817 return -EINVAL; 14818 } 14819 14820 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14821 if (mode_cmd->offsets[0] != 0) 14822 return -EINVAL; 14823 14824 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 14825 mode_cmd->pixel_format, 14826 mode_cmd->modifier[0]); 14827 /* FIXME drm helper for size checks (especially planar formats)? */ 14828 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 14829 return -EINVAL; 14830 14831 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 14832 intel_fb->obj = obj; 14833 intel_fb->obj->framebuffer_references++; 14834 14835 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 14836 if (ret) { 14837 DRM_ERROR("framebuffer init failed %d\n", ret); 14838 return ret; 14839 } 14840 14841 return 0; 14842 } 14843 14844 static struct drm_framebuffer * 14845 intel_user_framebuffer_create(struct drm_device *dev, 14846 struct drm_file *filp, 14847 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14848 { 14849 struct drm_framebuffer *fb; 14850 struct drm_i915_gem_object *obj; 14851 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14852 14853 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14854 mode_cmd.handles[0])); 14855 if (&obj->base == NULL) 14856 return ERR_PTR(-ENOENT); 14857 14858 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 14859 if (IS_ERR(fb)) 14860 drm_gem_object_unreference_unlocked(&obj->base); 14861 14862 return fb; 14863 } 14864 14865 #ifndef CONFIG_DRM_FBDEV_EMULATION 14866 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14867 { 14868 } 14869 #endif 14870 14871 static const struct drm_mode_config_funcs intel_mode_funcs = { 14872 .fb_create = intel_user_framebuffer_create, 14873 .output_poll_changed = intel_fbdev_output_poll_changed, 14874 .atomic_check = intel_atomic_check, 14875 .atomic_commit = intel_atomic_commit, 14876 .atomic_state_alloc = intel_atomic_state_alloc, 14877 .atomic_state_clear = intel_atomic_state_clear, 14878 }; 14879 14880 /* Set up chip specific display functions */ 14881 static void intel_init_display(struct drm_device *dev) 14882 { 14883 struct drm_i915_private *dev_priv = dev->dev_private; 14884 14885 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 14886 dev_priv->display.find_dpll = g4x_find_best_dpll; 14887 else if (IS_CHERRYVIEW(dev)) 14888 dev_priv->display.find_dpll = chv_find_best_dpll; 14889 else if (IS_VALLEYVIEW(dev)) 14890 dev_priv->display.find_dpll = vlv_find_best_dpll; 14891 else if (IS_PINEVIEW(dev)) 14892 dev_priv->display.find_dpll = pnv_find_best_dpll; 14893 else 14894 dev_priv->display.find_dpll = i9xx_find_best_dpll; 14895 14896 if (INTEL_INFO(dev)->gen >= 9) { 14897 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14898 dev_priv->display.get_initial_plane_config = 14899 skylake_get_initial_plane_config; 14900 dev_priv->display.crtc_compute_clock = 14901 haswell_crtc_compute_clock; 14902 dev_priv->display.crtc_enable = haswell_crtc_enable; 14903 dev_priv->display.crtc_disable = haswell_crtc_disable; 14904 dev_priv->display.update_primary_plane = 14905 skylake_update_primary_plane; 14906 } else if (HAS_DDI(dev)) { 14907 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14908 dev_priv->display.get_initial_plane_config = 14909 ironlake_get_initial_plane_config; 14910 dev_priv->display.crtc_compute_clock = 14911 haswell_crtc_compute_clock; 14912 dev_priv->display.crtc_enable = haswell_crtc_enable; 14913 dev_priv->display.crtc_disable = haswell_crtc_disable; 14914 dev_priv->display.update_primary_plane = 14915 ironlake_update_primary_plane; 14916 } else if (HAS_PCH_SPLIT(dev)) { 14917 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14918 dev_priv->display.get_initial_plane_config = 14919 ironlake_get_initial_plane_config; 14920 dev_priv->display.crtc_compute_clock = 14921 ironlake_crtc_compute_clock; 14922 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14923 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14924 dev_priv->display.update_primary_plane = 14925 ironlake_update_primary_plane; 14926 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14927 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14928 dev_priv->display.get_initial_plane_config = 14929 i9xx_get_initial_plane_config; 14930 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14931 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14932 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14933 dev_priv->display.update_primary_plane = 14934 i9xx_update_primary_plane; 14935 } else { 14936 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14937 dev_priv->display.get_initial_plane_config = 14938 i9xx_get_initial_plane_config; 14939 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14940 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14941 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14942 dev_priv->display.update_primary_plane = 14943 i9xx_update_primary_plane; 14944 } 14945 14946 /* Returns the core display clock speed */ 14947 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14948 dev_priv->display.get_display_clock_speed = 14949 skylake_get_display_clock_speed; 14950 else if (IS_BROXTON(dev)) 14951 dev_priv->display.get_display_clock_speed = 14952 broxton_get_display_clock_speed; 14953 else if (IS_BROADWELL(dev)) 14954 dev_priv->display.get_display_clock_speed = 14955 broadwell_get_display_clock_speed; 14956 else if (IS_HASWELL(dev)) 14957 dev_priv->display.get_display_clock_speed = 14958 haswell_get_display_clock_speed; 14959 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 14960 dev_priv->display.get_display_clock_speed = 14961 valleyview_get_display_clock_speed; 14962 else if (IS_GEN5(dev)) 14963 dev_priv->display.get_display_clock_speed = 14964 ilk_get_display_clock_speed; 14965 else if (IS_I945G(dev) || IS_BROADWATER(dev) || 14966 IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 14967 dev_priv->display.get_display_clock_speed = 14968 i945_get_display_clock_speed; 14969 else if (IS_GM45(dev)) 14970 dev_priv->display.get_display_clock_speed = 14971 gm45_get_display_clock_speed; 14972 else if (IS_CRESTLINE(dev)) 14973 dev_priv->display.get_display_clock_speed = 14974 i965gm_get_display_clock_speed; 14975 else if (IS_PINEVIEW(dev)) 14976 dev_priv->display.get_display_clock_speed = 14977 pnv_get_display_clock_speed; 14978 else if (IS_G33(dev) || IS_G4X(dev)) 14979 dev_priv->display.get_display_clock_speed = 14980 g33_get_display_clock_speed; 14981 else if (IS_I915G(dev)) 14982 dev_priv->display.get_display_clock_speed = 14983 i915_get_display_clock_speed; 14984 else if (IS_I945GM(dev) || IS_845G(dev)) 14985 dev_priv->display.get_display_clock_speed = 14986 i9xx_misc_get_display_clock_speed; 14987 else if (IS_I915GM(dev)) 14988 dev_priv->display.get_display_clock_speed = 14989 i915gm_get_display_clock_speed; 14990 else if (IS_I865G(dev)) 14991 dev_priv->display.get_display_clock_speed = 14992 i865_get_display_clock_speed; 14993 else if (IS_I85X(dev)) 14994 dev_priv->display.get_display_clock_speed = 14995 i85x_get_display_clock_speed; 14996 else { /* 830 */ 14997 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); 14998 dev_priv->display.get_display_clock_speed = 14999 i830_get_display_clock_speed; 15000 } 15001 15002 if (IS_GEN5(dev)) { 15003 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15004 } else if (IS_GEN6(dev)) { 15005 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15006 } else if (IS_IVYBRIDGE(dev)) { 15007 /* FIXME: detect B0+ stepping and use auto training */ 15008 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15009 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 15010 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15011 if (IS_BROADWELL(dev)) { 15012 dev_priv->display.modeset_commit_cdclk = 15013 broadwell_modeset_commit_cdclk; 15014 dev_priv->display.modeset_calc_cdclk = 15015 broadwell_modeset_calc_cdclk; 15016 } 15017 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 15018 dev_priv->display.modeset_commit_cdclk = 15019 valleyview_modeset_commit_cdclk; 15020 dev_priv->display.modeset_calc_cdclk = 15021 valleyview_modeset_calc_cdclk; 15022 } else if (IS_BROXTON(dev)) { 15023 dev_priv->display.modeset_commit_cdclk = 15024 broxton_modeset_commit_cdclk; 15025 dev_priv->display.modeset_calc_cdclk = 15026 broxton_modeset_calc_cdclk; 15027 } 15028 15029 switch (INTEL_INFO(dev)->gen) { 15030 case 2: 15031 dev_priv->display.queue_flip = intel_gen2_queue_flip; 15032 break; 15033 15034 case 3: 15035 dev_priv->display.queue_flip = intel_gen3_queue_flip; 15036 break; 15037 15038 case 4: 15039 case 5: 15040 dev_priv->display.queue_flip = intel_gen4_queue_flip; 15041 break; 15042 15043 case 6: 15044 dev_priv->display.queue_flip = intel_gen6_queue_flip; 15045 break; 15046 case 7: 15047 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 15048 dev_priv->display.queue_flip = intel_gen7_queue_flip; 15049 break; 15050 case 9: 15051 /* Drop through - unsupported since execlist only. */ 15052 default: 15053 /* Default just returns -ENODEV to indicate unsupported */ 15054 dev_priv->display.queue_flip = intel_default_queue_flip; 15055 } 15056 15057 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 15058 } 15059 15060 /* 15061 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 15062 * resume, or other times. This quirk makes sure that's the case for 15063 * affected systems. 15064 */ 15065 static void quirk_pipea_force(struct drm_device *dev) 15066 { 15067 struct drm_i915_private *dev_priv = dev->dev_private; 15068 15069 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15070 DRM_INFO("applying pipe a force quirk\n"); 15071 } 15072 15073 static void quirk_pipeb_force(struct drm_device *dev) 15074 { 15075 struct drm_i915_private *dev_priv = dev->dev_private; 15076 15077 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15078 DRM_INFO("applying pipe b force quirk\n"); 15079 } 15080 15081 /* 15082 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 15083 */ 15084 static void quirk_ssc_force_disable(struct drm_device *dev) 15085 { 15086 struct drm_i915_private *dev_priv = dev->dev_private; 15087 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15088 DRM_INFO("applying lvds SSC disable quirk\n"); 15089 } 15090 15091 /* 15092 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 15093 * brightness value 15094 */ 15095 static void quirk_invert_brightness(struct drm_device *dev) 15096 { 15097 struct drm_i915_private *dev_priv = dev->dev_private; 15098 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15099 DRM_INFO("applying inverted panel brightness quirk\n"); 15100 } 15101 15102 /* Some VBT's incorrectly indicate no backlight is present */ 15103 static void quirk_backlight_present(struct drm_device *dev) 15104 { 15105 struct drm_i915_private *dev_priv = dev->dev_private; 15106 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15107 DRM_INFO("applying backlight present quirk\n"); 15108 } 15109 15110 struct intel_quirk { 15111 int device; 15112 int subsystem_vendor; 15113 int subsystem_device; 15114 void (*hook)(struct drm_device *dev); 15115 }; 15116 15117 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 15118 struct intel_dmi_quirk { 15119 void (*hook)(struct drm_device *dev); 15120 const struct dmi_system_id (*dmi_id_list)[]; 15121 }; 15122 15123 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 15124 { 15125 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 15126 return 1; 15127 } 15128 15129 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 15130 { 15131 .dmi_id_list = &(const struct dmi_system_id[]) { 15132 { 15133 .callback = intel_dmi_reverse_brightness, 15134 .ident = "NCR Corporation", 15135 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 15136 DMI_MATCH(DMI_PRODUCT_NAME, ""), 15137 }, 15138 }, 15139 { } /* terminating entry */ 15140 }, 15141 .hook = quirk_invert_brightness, 15142 }, 15143 }; 15144 15145 static struct intel_quirk intel_quirks[] = { 15146 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 15147 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 15148 15149 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 15150 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 15151 15152 /* 830 needs to leave pipe A & dpll A up */ 15153 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 15154 15155 /* 830 needs to leave pipe B & dpll B up */ 15156 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 15157 15158 /* Lenovo U160 cannot use SSC on LVDS */ 15159 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 15160 15161 /* Sony Vaio Y cannot use SSC on LVDS */ 15162 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 15163 15164 /* Acer Aspire 5734Z must invert backlight brightness */ 15165 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 15166 15167 /* Acer/eMachines G725 */ 15168 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 15169 15170 /* Acer/eMachines e725 */ 15171 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 15172 15173 /* Acer/Packard Bell NCL20 */ 15174 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 15175 15176 /* Acer Aspire 4736Z */ 15177 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 15178 15179 /* Acer Aspire 5336 */ 15180 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 15181 15182 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 15183 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 15184 15185 /* Acer C720 Chromebook (Core i3 4005U) */ 15186 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 15187 15188 /* Apple Macbook 2,1 (Core 2 T7400) */ 15189 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 15190 15191 /* Apple Macbook 4,1 */ 15192 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 15193 15194 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 15195 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 15196 15197 /* HP Chromebook 14 (Celeron 2955U) */ 15198 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 15199 15200 /* Dell Chromebook 11 */ 15201 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 15202 15203 /* Dell Chromebook 11 (2015 version) */ 15204 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 15205 }; 15206 15207 static void intel_init_quirks(struct drm_device *dev) 15208 { 15209 struct pci_dev *d = dev->pdev; 15210 int i; 15211 15212 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 15213 struct intel_quirk *q = &intel_quirks[i]; 15214 15215 if (d->device == q->device && 15216 (d->subsystem_vendor == q->subsystem_vendor || 15217 q->subsystem_vendor == PCI_ANY_ID) && 15218 (d->subsystem_device == q->subsystem_device || 15219 q->subsystem_device == PCI_ANY_ID)) 15220 q->hook(dev); 15221 } 15222 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 15223 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 15224 intel_dmi_quirks[i].hook(dev); 15225 } 15226 } 15227 15228 /* Disable the VGA plane that we never use */ 15229 static void i915_disable_vga(struct drm_device *dev) 15230 { 15231 struct drm_i915_private *dev_priv = dev->dev_private; 15232 u8 sr1; 15233 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15234 15235 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15236 #if 0 15237 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15238 #endif 15239 outb(VGA_SR_INDEX, SR01); 15240 sr1 = inb(VGA_SR_DATA); 15241 outb(VGA_SR_DATA, sr1 | 1 << 5); 15242 #if 0 15243 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 15244 #endif 15245 udelay(300); 15246 15247 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15248 POSTING_READ(vga_reg); 15249 } 15250 15251 void intel_modeset_init_hw(struct drm_device *dev) 15252 { 15253 intel_update_cdclk(dev); 15254 intel_prepare_ddi(dev); 15255 intel_init_clock_gating(dev); 15256 intel_enable_gt_powersave(dev); 15257 } 15258 15259 void intel_modeset_init(struct drm_device *dev) 15260 { 15261 struct drm_i915_private *dev_priv = dev->dev_private; 15262 int sprite, ret; 15263 enum i915_pipe pipe; 15264 struct intel_crtc *crtc; 15265 15266 drm_mode_config_init(dev); 15267 15268 dev->mode_config.min_width = 0; 15269 dev->mode_config.min_height = 0; 15270 15271 dev->mode_config.preferred_depth = 24; 15272 dev->mode_config.prefer_shadow = 1; 15273 15274 dev->mode_config.allow_fb_modifiers = true; 15275 15276 dev->mode_config.funcs = &intel_mode_funcs; 15277 15278 intel_init_quirks(dev); 15279 15280 intel_init_pm(dev); 15281 15282 if (INTEL_INFO(dev)->num_pipes == 0) 15283 return; 15284 15285 /* 15286 * There may be no VBT; and if the BIOS enabled SSC we can 15287 * just keep using it to avoid unnecessary flicker. Whereas if the 15288 * BIOS isn't using it, don't assume it will work even if the VBT 15289 * indicates as much. 15290 */ 15291 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 15292 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15293 DREF_SSC1_ENABLE); 15294 15295 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15296 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15297 bios_lvds_use_ssc ? "en" : "dis", 15298 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15299 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15300 } 15301 } 15302 15303 intel_init_display(dev); 15304 intel_init_audio(dev); 15305 15306 if (IS_GEN2(dev)) { 15307 dev->mode_config.max_width = 2048; 15308 dev->mode_config.max_height = 2048; 15309 } else if (IS_GEN3(dev)) { 15310 dev->mode_config.max_width = 4096; 15311 dev->mode_config.max_height = 4096; 15312 } else { 15313 dev->mode_config.max_width = 8192; 15314 dev->mode_config.max_height = 8192; 15315 } 15316 15317 if (IS_845G(dev) || IS_I865G(dev)) { 15318 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 15319 dev->mode_config.cursor_height = 1023; 15320 } else if (IS_GEN2(dev)) { 15321 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15322 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15323 } else { 15324 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15325 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15326 } 15327 15328 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 15329 15330 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15331 INTEL_INFO(dev)->num_pipes, 15332 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 15333 15334 for_each_pipe(dev_priv, pipe) { 15335 intel_crtc_init(dev, pipe); 15336 for_each_sprite(dev_priv, pipe, sprite) { 15337 ret = intel_plane_init(dev, pipe, sprite); 15338 if (ret) 15339 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 15340 pipe_name(pipe), sprite_name(pipe, sprite), ret); 15341 } 15342 } 15343 15344 intel_update_czclk(dev_priv); 15345 intel_update_cdclk(dev); 15346 15347 intel_shared_dpll_init(dev); 15348 15349 /* Just disable it once at startup */ 15350 i915_disable_vga(dev); 15351 intel_setup_outputs(dev); 15352 15353 drm_modeset_lock_all(dev); 15354 intel_modeset_setup_hw_state(dev); 15355 drm_modeset_unlock_all(dev); 15356 15357 for_each_intel_crtc(dev, crtc) { 15358 struct intel_initial_plane_config plane_config = {}; 15359 15360 if (!crtc->active) 15361 continue; 15362 15363 /* 15364 * Note that reserving the BIOS fb up front prevents us 15365 * from stuffing other stolen allocations like the ring 15366 * on top. This prevents some ugliness at boot time, and 15367 * can even allow for smooth boot transitions if the BIOS 15368 * fb is large enough for the active pipe configuration. 15369 */ 15370 dev_priv->display.get_initial_plane_config(crtc, 15371 &plane_config); 15372 15373 /* 15374 * If the fb is shared between multiple heads, we'll 15375 * just get the first one. 15376 */ 15377 intel_find_initial_plane_obj(crtc, &plane_config); 15378 } 15379 } 15380 15381 static void intel_enable_pipe_a(struct drm_device *dev) 15382 { 15383 struct intel_connector *connector; 15384 struct drm_connector *crt = NULL; 15385 struct intel_load_detect_pipe load_detect_temp; 15386 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15387 15388 /* We can't just switch on the pipe A, we need to set things up with a 15389 * proper mode and output configuration. As a gross hack, enable pipe A 15390 * by enabling the load detect pipe once. */ 15391 for_each_intel_connector(dev, connector) { 15392 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15393 crt = &connector->base; 15394 break; 15395 } 15396 } 15397 15398 if (!crt) 15399 return; 15400 15401 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15402 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15403 } 15404 15405 static bool 15406 intel_check_plane_mapping(struct intel_crtc *crtc) 15407 { 15408 struct drm_device *dev = crtc->base.dev; 15409 struct drm_i915_private *dev_priv = dev->dev_private; 15410 u32 val; 15411 15412 if (INTEL_INFO(dev)->num_pipes == 1) 15413 return true; 15414 15415 val = I915_READ(DSPCNTR(!crtc->plane)); 15416 15417 if ((val & DISPLAY_PLANE_ENABLE) && 15418 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15419 return false; 15420 15421 return true; 15422 } 15423 15424 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15425 { 15426 struct drm_device *dev = crtc->base.dev; 15427 struct intel_encoder *encoder; 15428 15429 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15430 return true; 15431 15432 return false; 15433 } 15434 15435 static void intel_sanitize_crtc(struct intel_crtc *crtc) 15436 { 15437 struct drm_device *dev = crtc->base.dev; 15438 struct drm_i915_private *dev_priv = dev->dev_private; 15439 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder); 15440 15441 /* Clear any frame start delays used for debugging left by the BIOS */ 15442 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15443 15444 /* restore vblank interrupts to correct state */ 15445 drm_crtc_vblank_reset(&crtc->base); 15446 if (crtc->active) { 15447 struct intel_plane *plane; 15448 15449 drm_crtc_vblank_on(&crtc->base); 15450 15451 /* Disable everything but the primary plane */ 15452 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15453 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15454 continue; 15455 15456 plane->disable_plane(&plane->base, &crtc->base); 15457 } 15458 } 15459 15460 /* We need to sanitize the plane -> pipe mapping first because this will 15461 * disable the crtc (and hence change the state) if it is wrong. Note 15462 * that gen4+ has a fixed plane -> pipe mapping. */ 15463 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15464 bool plane; 15465 15466 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15467 crtc->base.base.id); 15468 15469 /* Pipe has the wrong plane attached and the plane is active. 15470 * Temporarily change the plane mapping and disable everything 15471 * ... */ 15472 plane = crtc->plane; 15473 to_intel_plane_state(crtc->base.primary->state)->visible = true; 15474 crtc->plane = !plane; 15475 intel_crtc_disable_noatomic(&crtc->base); 15476 crtc->plane = plane; 15477 } 15478 15479 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15480 crtc->pipe == PIPE_A && !crtc->active) { 15481 /* BIOS forgot to enable pipe A, this mostly happens after 15482 * resume. Force-enable the pipe to fix this, the update_dpms 15483 * call below we restore the pipe to the right state, but leave 15484 * the required bits on. */ 15485 intel_enable_pipe_a(dev); 15486 } 15487 15488 /* Adjust the state of the output pipe according to whether we 15489 * have active connectors/encoders. */ 15490 if (!intel_crtc_has_encoders(crtc)) 15491 intel_crtc_disable_noatomic(&crtc->base); 15492 15493 if (crtc->active != crtc->base.state->active) { 15494 struct intel_encoder *encoder; 15495 15496 /* This can happen either due to bugs in the get_hw_state 15497 * functions or because of calls to intel_crtc_disable_noatomic, 15498 * or because the pipe is force-enabled due to the 15499 * pipe A quirk. */ 15500 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 15501 crtc->base.base.id, 15502 crtc->base.state->enable ? "enabled" : "disabled", 15503 crtc->active ? "enabled" : "disabled"); 15504 15505 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); 15506 crtc->base.state->active = crtc->active; 15507 crtc->base.enabled = crtc->active; 15508 crtc->base.state->connector_mask = 0; 15509 15510 /* Because we only establish the connector -> encoder -> 15511 * crtc links if something is active, this means the 15512 * crtc is now deactivated. Break the links. connector 15513 * -> encoder links are only establish when things are 15514 * actually up, hence no need to break them. */ 15515 WARN_ON(crtc->active); 15516 15517 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15518 encoder->base.crtc = NULL; 15519 } 15520 15521 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 15522 /* 15523 * We start out with underrun reporting disabled to avoid races. 15524 * For correct bookkeeping mark this on active crtcs. 15525 * 15526 * Also on gmch platforms we dont have any hardware bits to 15527 * disable the underrun reporting. Which means we need to start 15528 * out with underrun reporting disabled also on inactive pipes, 15529 * since otherwise we'll complain about the garbage we read when 15530 * e.g. coming up after runtime pm. 15531 * 15532 * No protection against concurrent access is required - at 15533 * worst a fifo underrun happens which also sets this to false. 15534 */ 15535 crtc->cpu_fifo_underrun_disabled = true; 15536 crtc->pch_fifo_underrun_disabled = true; 15537 } 15538 } 15539 15540 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15541 { 15542 struct intel_connector *connector; 15543 struct drm_device *dev = encoder->base.dev; 15544 bool active = false; 15545 15546 /* We need to check both for a crtc link (meaning that the 15547 * encoder is active and trying to read from a pipe) and the 15548 * pipe itself being active. */ 15549 bool has_active_crtc = encoder->base.crtc && 15550 to_intel_crtc(encoder->base.crtc)->active; 15551 15552 for_each_intel_connector(dev, connector) { 15553 if (connector->base.encoder != &encoder->base) 15554 continue; 15555 15556 active = true; 15557 break; 15558 } 15559 15560 if (active && !has_active_crtc) { 15561 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15562 encoder->base.base.id, 15563 encoder->base.name); 15564 15565 /* Connector is active, but has no active pipe. This is 15566 * fallout from our resume register restoring. Disable 15567 * the encoder manually again. */ 15568 if (encoder->base.crtc) { 15569 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15570 encoder->base.base.id, 15571 encoder->base.name); 15572 encoder->disable(encoder); 15573 if (encoder->post_disable) 15574 encoder->post_disable(encoder); 15575 } 15576 encoder->base.crtc = NULL; 15577 15578 /* Inconsistent output/port/pipe state happens presumably due to 15579 * a bug in one of the get_hw_state functions. Or someplace else 15580 * in our code, like the register restore mess on resume. Clamp 15581 * things to off as a safer default. */ 15582 for_each_intel_connector(dev, connector) { 15583 if (connector->encoder != encoder) 15584 continue; 15585 connector->base.dpms = DRM_MODE_DPMS_OFF; 15586 connector->base.encoder = NULL; 15587 } 15588 } 15589 /* Enabled encoders without active connectors will be fixed in 15590 * the crtc fixup. */ 15591 } 15592 15593 void i915_redisable_vga_power_on(struct drm_device *dev) 15594 { 15595 struct drm_i915_private *dev_priv = dev->dev_private; 15596 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15597 15598 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15599 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15600 i915_disable_vga(dev); 15601 } 15602 } 15603 15604 void i915_redisable_vga(struct drm_device *dev) 15605 { 15606 struct drm_i915_private *dev_priv = dev->dev_private; 15607 15608 /* This function can be called both from intel_modeset_setup_hw_state or 15609 * at a very early point in our resume sequence, where the power well 15610 * structures are not yet restored. Since this function is at a very 15611 * paranoid "someone might have enabled VGA while we were not looking" 15612 * level, just check if the power well is enabled instead of trying to 15613 * follow the "don't touch the power well if we don't need it" policy 15614 * the rest of the driver uses. */ 15615 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 15616 return; 15617 15618 i915_redisable_vga_power_on(dev); 15619 15620 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 15621 } 15622 15623 static bool primary_get_hw_state(struct intel_plane *plane) 15624 { 15625 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15626 15627 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15628 } 15629 15630 /* FIXME read out full plane state for all planes */ 15631 static void readout_plane_state(struct intel_crtc *crtc) 15632 { 15633 struct drm_plane *primary = crtc->base.primary; 15634 struct intel_plane_state *plane_state = 15635 to_intel_plane_state(primary->state); 15636 15637 plane_state->visible = crtc->active && 15638 primary_get_hw_state(to_intel_plane(primary)); 15639 15640 if (plane_state->visible) 15641 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15642 } 15643 15644 static void intel_modeset_readout_hw_state(struct drm_device *dev) 15645 { 15646 struct drm_i915_private *dev_priv = dev->dev_private; 15647 enum i915_pipe pipe; 15648 struct intel_crtc *crtc; 15649 struct intel_encoder *encoder; 15650 struct intel_connector *connector; 15651 int i; 15652 15653 for_each_intel_crtc(dev, crtc) { 15654 __drm_atomic_helper_crtc_destroy_state(&crtc->base, crtc->base.state); 15655 memset(crtc->config, 0, sizeof(*crtc->config)); 15656 crtc->config->base.crtc = &crtc->base; 15657 15658 crtc->active = dev_priv->display.get_pipe_config(crtc, 15659 crtc->config); 15660 15661 crtc->base.state->active = crtc->active; 15662 crtc->base.enabled = crtc->active; 15663 15664 readout_plane_state(crtc); 15665 15666 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15667 crtc->base.base.id, 15668 crtc->active ? "enabled" : "disabled"); 15669 } 15670 15671 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15672 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15673 15674 pll->on = pll->get_hw_state(dev_priv, pll, 15675 &pll->config.hw_state); 15676 pll->active = 0; 15677 pll->config.crtc_mask = 0; 15678 for_each_intel_crtc(dev, crtc) { 15679 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 15680 pll->active++; 15681 pll->config.crtc_mask |= 1 << crtc->pipe; 15682 } 15683 } 15684 15685 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15686 pll->name, pll->config.crtc_mask, pll->on); 15687 15688 if (pll->config.crtc_mask) 15689 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 15690 } 15691 15692 for_each_intel_encoder(dev, encoder) { 15693 pipe = 0; 15694 15695 if (encoder->get_hw_state(encoder, &pipe)) { 15696 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15697 encoder->base.crtc = &crtc->base; 15698 encoder->get_config(encoder, crtc->config); 15699 } else { 15700 encoder->base.crtc = NULL; 15701 } 15702 15703 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15704 encoder->base.base.id, 15705 encoder->base.name, 15706 encoder->base.crtc ? "enabled" : "disabled", 15707 pipe_name(pipe)); 15708 } 15709 15710 for_each_intel_connector(dev, connector) { 15711 if (connector->get_hw_state(connector)) { 15712 connector->base.dpms = DRM_MODE_DPMS_ON; 15713 15714 encoder = connector->encoder; 15715 connector->base.encoder = &encoder->base; 15716 15717 if (encoder->base.crtc && 15718 encoder->base.crtc->state->active) { 15719 /* 15720 * This has to be done during hardware readout 15721 * because anything calling .crtc_disable may 15722 * rely on the connector_mask being accurate. 15723 */ 15724 encoder->base.crtc->state->connector_mask |= 15725 1 << drm_connector_index(&connector->base); 15726 } 15727 15728 } else { 15729 connector->base.dpms = DRM_MODE_DPMS_OFF; 15730 connector->base.encoder = NULL; 15731 } 15732 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15733 connector->base.base.id, 15734 connector->base.name, 15735 connector->base.encoder ? "enabled" : "disabled"); 15736 } 15737 15738 for_each_intel_crtc(dev, crtc) { 15739 crtc->base.hwmode = crtc->config->base.adjusted_mode; 15740 15741 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15742 if (crtc->base.state->active) { 15743 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15744 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15745 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15746 15747 /* 15748 * The initial mode needs to be set in order to keep 15749 * the atomic core happy. It wants a valid mode if the 15750 * crtc's enabled, so we do the above call. 15751 * 15752 * At this point some state updated by the connectors 15753 * in their ->detect() callback has not run yet, so 15754 * no recalculation can be done yet. 15755 * 15756 * Even if we could do a recalculation and modeset 15757 * right now it would cause a double modeset if 15758 * fbdev or userspace chooses a different initial mode. 15759 * 15760 * If that happens, someone indicated they wanted a 15761 * mode change, which means it's safe to do a full 15762 * recalculation. 15763 */ 15764 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15765 15766 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 15767 update_scanline_offset(crtc); 15768 } 15769 } 15770 } 15771 15772 /* Scan out the current hw modeset state, 15773 * and sanitizes it to the current state 15774 */ 15775 static void 15776 intel_modeset_setup_hw_state(struct drm_device *dev) 15777 { 15778 struct drm_i915_private *dev_priv = dev->dev_private; 15779 enum i915_pipe pipe; 15780 struct intel_crtc *crtc; 15781 struct intel_encoder *encoder; 15782 int i; 15783 15784 intel_modeset_readout_hw_state(dev); 15785 15786 /* HW state is read out, now we need to sanitize this mess. */ 15787 for_each_intel_encoder(dev, encoder) { 15788 intel_sanitize_encoder(encoder); 15789 } 15790 15791 for_each_pipe(dev_priv, pipe) { 15792 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15793 intel_sanitize_crtc(crtc); 15794 intel_dump_pipe_config(crtc, crtc->config, 15795 "[setup_hw_state]"); 15796 } 15797 15798 intel_modeset_update_connector_atomic_state(dev); 15799 15800 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15801 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15802 15803 if (!pll->on || pll->active) 15804 continue; 15805 15806 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15807 15808 pll->disable(dev_priv, pll); 15809 pll->on = false; 15810 } 15811 15812 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 15813 vlv_wm_get_hw_state(dev); 15814 else if (IS_GEN9(dev)) 15815 skl_wm_get_hw_state(dev); 15816 else if (HAS_PCH_SPLIT(dev)) 15817 ilk_wm_get_hw_state(dev); 15818 15819 for_each_intel_crtc(dev, crtc) { 15820 unsigned long put_domains; 15821 15822 put_domains = modeset_get_crtc_power_domains(&crtc->base); 15823 if (WARN_ON(put_domains)) 15824 modeset_put_power_domains(dev_priv, put_domains); 15825 } 15826 intel_display_set_init_power(dev_priv, false); 15827 } 15828 15829 void intel_display_resume(struct drm_device *dev) 15830 { 15831 struct drm_atomic_state *state = drm_atomic_state_alloc(dev); 15832 struct intel_connector *conn; 15833 struct intel_plane *plane; 15834 struct drm_crtc *crtc; 15835 int ret; 15836 15837 if (!state) 15838 return; 15839 15840 state->acquire_ctx = dev->mode_config.acquire_ctx; 15841 15842 /* preserve complete old state, including dpll */ 15843 intel_atomic_get_shared_dpll_state(state); 15844 15845 for_each_crtc(dev, crtc) { 15846 struct drm_crtc_state *crtc_state = 15847 drm_atomic_get_crtc_state(state, crtc); 15848 15849 ret = PTR_ERR_OR_ZERO(crtc_state); 15850 if (ret) 15851 goto err; 15852 15853 /* force a restore */ 15854 crtc_state->mode_changed = true; 15855 } 15856 15857 for_each_intel_plane(dev, plane) { 15858 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(state, &plane->base)); 15859 if (ret) 15860 goto err; 15861 } 15862 15863 for_each_intel_connector(dev, conn) { 15864 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(state, &conn->base)); 15865 if (ret) 15866 goto err; 15867 } 15868 15869 intel_modeset_setup_hw_state(dev); 15870 15871 i915_redisable_vga(dev); 15872 ret = drm_atomic_commit(state); 15873 if (!ret) 15874 return; 15875 15876 err: 15877 DRM_ERROR("Restoring old state failed with %i\n", ret); 15878 drm_atomic_state_free(state); 15879 } 15880 15881 void intel_modeset_gem_init(struct drm_device *dev) 15882 { 15883 struct drm_crtc *c; 15884 struct drm_i915_gem_object *obj; 15885 int ret; 15886 15887 mutex_lock(&dev->struct_mutex); 15888 intel_init_gt_powersave(dev); 15889 mutex_unlock(&dev->struct_mutex); 15890 15891 intel_modeset_init_hw(dev); 15892 15893 intel_setup_overlay(dev); 15894 15895 /* 15896 * Make sure any fbs we allocated at startup are properly 15897 * pinned & fenced. When we do the allocation it's too early 15898 * for this. 15899 */ 15900 for_each_crtc(dev, c) { 15901 obj = intel_fb_obj(c->primary->fb); 15902 if (obj == NULL) 15903 continue; 15904 15905 mutex_lock(&dev->struct_mutex); 15906 ret = intel_pin_and_fence_fb_obj(c->primary, 15907 c->primary->fb, 15908 c->primary->state); 15909 mutex_unlock(&dev->struct_mutex); 15910 if (ret) { 15911 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15912 to_intel_crtc(c)->pipe); 15913 drm_framebuffer_unreference(c->primary->fb); 15914 c->primary->fb = NULL; 15915 c->primary->crtc = c->primary->state->crtc = NULL; 15916 update_state_fb(c->primary); 15917 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 15918 } 15919 } 15920 15921 intel_backlight_register(dev); 15922 } 15923 15924 void intel_connector_unregister(struct intel_connector *intel_connector) 15925 { 15926 struct drm_connector *connector = &intel_connector->base; 15927 15928 intel_panel_destroy_backlight(connector); 15929 drm_connector_unregister(connector); 15930 } 15931 15932 void intel_modeset_cleanup(struct drm_device *dev) 15933 { 15934 struct drm_i915_private *dev_priv = dev->dev_private; 15935 struct intel_connector *connector; 15936 15937 intel_disable_gt_powersave(dev); 15938 15939 intel_backlight_unregister(dev); 15940 15941 /* 15942 * Interrupts and polling as the first thing to avoid creating havoc. 15943 * Too much stuff here (turning of connectors, ...) would 15944 * experience fancy races otherwise. 15945 */ 15946 intel_irq_uninstall(dev_priv); 15947 15948 /* 15949 * Due to the hpd irq storm handling the hotplug work can re-arm the 15950 * poll handlers. Hence disable polling after hpd handling is shut down. 15951 */ 15952 drm_kms_helper_poll_fini(dev); 15953 15954 intel_unregister_dsm_handler(); 15955 15956 intel_fbc_disable(dev_priv); 15957 15958 /* flush any delayed tasks or pending work */ 15959 flush_scheduled_work(); 15960 15961 /* destroy the backlight and sysfs files before encoders/connectors */ 15962 for_each_intel_connector(dev, connector) 15963 connector->unregister(connector); 15964 15965 drm_mode_config_cleanup(dev); 15966 15967 intel_cleanup_overlay(dev); 15968 15969 mutex_lock(&dev->struct_mutex); 15970 intel_cleanup_gt_powersave(dev); 15971 mutex_unlock(&dev->struct_mutex); 15972 15973 intel_teardown_gmbus(dev); 15974 } 15975 15976 /* 15977 * Return which encoder is currently attached for connector. 15978 */ 15979 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 15980 { 15981 return &intel_attached_encoder(connector)->base; 15982 } 15983 15984 void intel_connector_attach_encoder(struct intel_connector *connector, 15985 struct intel_encoder *encoder) 15986 { 15987 connector->encoder = encoder; 15988 drm_mode_connector_attach_encoder(&connector->base, 15989 &encoder->base); 15990 } 15991 15992 /* 15993 * set vga decode state - true == enable VGA decode 15994 */ 15995 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 15996 { 15997 struct drm_i915_private *dev_priv = dev->dev_private; 15998 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15999 u16 gmch_ctrl; 16000 16001 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 16002 DRM_ERROR("failed to read control word\n"); 16003 return -EIO; 16004 } 16005 16006 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 16007 return 0; 16008 16009 if (state) 16010 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 16011 else 16012 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 16013 16014 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 16015 DRM_ERROR("failed to write control word\n"); 16016 return -EIO; 16017 } 16018 16019 return 0; 16020 } 16021 16022 #if 0 16023 struct intel_display_error_state { 16024 16025 u32 power_well_driver; 16026 16027 int num_transcoders; 16028 16029 struct intel_cursor_error_state { 16030 u32 control; 16031 u32 position; 16032 u32 base; 16033 u32 size; 16034 } cursor[I915_MAX_PIPES]; 16035 16036 struct intel_pipe_error_state { 16037 bool power_domain_on; 16038 u32 source; 16039 u32 stat; 16040 } pipe[I915_MAX_PIPES]; 16041 16042 struct intel_plane_error_state { 16043 u32 control; 16044 u32 stride; 16045 u32 size; 16046 u32 pos; 16047 u32 addr; 16048 u32 surface; 16049 u32 tile_offset; 16050 } plane[I915_MAX_PIPES]; 16051 16052 struct intel_transcoder_error_state { 16053 bool power_domain_on; 16054 enum transcoder cpu_transcoder; 16055 16056 u32 conf; 16057 16058 u32 htotal; 16059 u32 hblank; 16060 u32 hsync; 16061 u32 vtotal; 16062 u32 vblank; 16063 u32 vsync; 16064 } transcoder[4]; 16065 }; 16066 16067 struct intel_display_error_state * 16068 intel_display_capture_error_state(struct drm_device *dev) 16069 { 16070 struct drm_i915_private *dev_priv = dev->dev_private; 16071 struct intel_display_error_state *error; 16072 int transcoders[] = { 16073 TRANSCODER_A, 16074 TRANSCODER_B, 16075 TRANSCODER_C, 16076 TRANSCODER_EDP, 16077 }; 16078 int i; 16079 16080 if (INTEL_INFO(dev)->num_pipes == 0) 16081 return NULL; 16082 16083 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16084 if (error == NULL) 16085 return NULL; 16086 16087 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16088 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16089 16090 for_each_pipe(dev_priv, i) { 16091 error->pipe[i].power_domain_on = 16092 __intel_display_power_is_enabled(dev_priv, 16093 POWER_DOMAIN_PIPE(i)); 16094 if (!error->pipe[i].power_domain_on) 16095 continue; 16096 16097 error->cursor[i].control = I915_READ(CURCNTR(i)); 16098 error->cursor[i].position = I915_READ(CURPOS(i)); 16099 error->cursor[i].base = I915_READ(CURBASE(i)); 16100 16101 error->plane[i].control = I915_READ(DSPCNTR(i)); 16102 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16103 if (INTEL_INFO(dev)->gen <= 3) { 16104 error->plane[i].size = I915_READ(DSPSIZE(i)); 16105 error->plane[i].pos = I915_READ(DSPPOS(i)); 16106 } 16107 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16108 error->plane[i].addr = I915_READ(DSPADDR(i)); 16109 if (INTEL_INFO(dev)->gen >= 4) { 16110 error->plane[i].surface = I915_READ(DSPSURF(i)); 16111 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16112 } 16113 16114 error->pipe[i].source = I915_READ(PIPESRC(i)); 16115 16116 if (HAS_GMCH_DISPLAY(dev)) 16117 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16118 } 16119 16120 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16121 if (HAS_DDI(dev_priv->dev)) 16122 error->num_transcoders++; /* Account for eDP. */ 16123 16124 for (i = 0; i < error->num_transcoders; i++) { 16125 enum transcoder cpu_transcoder = transcoders[i]; 16126 16127 error->transcoder[i].power_domain_on = 16128 __intel_display_power_is_enabled(dev_priv, 16129 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 16130 if (!error->transcoder[i].power_domain_on) 16131 continue; 16132 16133 error->transcoder[i].cpu_transcoder = cpu_transcoder; 16134 16135 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 16136 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 16137 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 16138 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 16139 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 16140 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 16141 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 16142 } 16143 16144 return error; 16145 } 16146 16147 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 16148 16149 void 16150 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 16151 struct drm_device *dev, 16152 struct intel_display_error_state *error) 16153 { 16154 struct drm_i915_private *dev_priv = dev->dev_private; 16155 int i; 16156 16157 if (!error) 16158 return; 16159 16160 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 16161 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16162 err_printf(m, "PWR_WELL_CTL2: %08x\n", 16163 error->power_well_driver); 16164 for_each_pipe(dev_priv, i) { 16165 err_printf(m, "Pipe [%d]:\n", i); 16166 err_printf(m, " Power: %s\n", 16167 error->pipe[i].power_domain_on ? "on" : "off"); 16168 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 16169 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 16170 16171 err_printf(m, "Plane [%d]:\n", i); 16172 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 16173 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 16174 if (INTEL_INFO(dev)->gen <= 3) { 16175 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 16176 err_printf(m, " POS: %08x\n", error->plane[i].pos); 16177 } 16178 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16179 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 16180 if (INTEL_INFO(dev)->gen >= 4) { 16181 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 16182 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 16183 } 16184 16185 err_printf(m, "Cursor [%d]:\n", i); 16186 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 16187 err_printf(m, " POS: %08x\n", error->cursor[i].position); 16188 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 16189 } 16190 16191 for (i = 0; i < error->num_transcoders; i++) { 16192 err_printf(m, "CPU transcoder: %c\n", 16193 transcoder_name(error->transcoder[i].cpu_transcoder)); 16194 err_printf(m, " Power: %s\n", 16195 error->transcoder[i].power_domain_on ? "on" : "off"); 16196 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 16197 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 16198 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 16199 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 16200 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 16201 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 16202 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 16203 } 16204 } 16205 #endif 16206 16207 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 16208 { 16209 struct intel_crtc *crtc; 16210 16211 for_each_intel_crtc(dev, crtc) { 16212 struct intel_unpin_work *work; 16213 16214 spin_lock_irq(&dev->event_lock); 16215 16216 work = crtc->unpin_work; 16217 16218 if (work && work->event && 16219 work->event->base.file_priv == file) { 16220 kfree(work->event); 16221 work->event = NULL; 16222 } 16223 16224 spin_unlock_irq(&dev->event_lock); 16225 } 16226 } 16227