1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/input.h> 30 #include <linux/i2c.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/vgaarb.h> 34 #include <drm/drm_edid.h> 35 #include <drm/drmP.h> 36 #include "intel_drv.h" 37 #include "intel_frontbuffer.h" 38 #include <drm/i915_drm.h> 39 #include "i915_drv.h" 40 #include "i915_gem_clflush.h" 41 #include "intel_dsi.h" 42 #include "i915_trace.h" 43 #include <drm/drm_atomic.h> 44 #include <drm/drm_atomic_helper.h> 45 #include <drm/drm_dp_helper.h> 46 #include <drm/drm_crtc_helper.h> 47 #include <drm/drm_plane_helper.h> 48 #include <drm/drm_rect.h> 49 #include <linux/dma_remapping.h> 50 #include <linux/reservation.h> 51 52 /* Primary plane formats for gen <= 3 */ 53 static const uint32_t i8xx_primary_formats[] = { 54 DRM_FORMAT_C8, 55 DRM_FORMAT_RGB565, 56 DRM_FORMAT_XRGB1555, 57 DRM_FORMAT_XRGB8888, 58 }; 59 60 /* Primary plane formats for gen >= 4 */ 61 static const uint32_t i965_primary_formats[] = { 62 DRM_FORMAT_C8, 63 DRM_FORMAT_RGB565, 64 DRM_FORMAT_XRGB8888, 65 DRM_FORMAT_XBGR8888, 66 DRM_FORMAT_XRGB2101010, 67 DRM_FORMAT_XBGR2101010, 68 }; 69 70 static const uint64_t i9xx_format_modifiers[] = { 71 I915_FORMAT_MOD_X_TILED, 72 DRM_FORMAT_MOD_LINEAR, 73 DRM_FORMAT_MOD_INVALID 74 }; 75 76 static const uint32_t skl_primary_formats[] = { 77 DRM_FORMAT_C8, 78 DRM_FORMAT_RGB565, 79 DRM_FORMAT_XRGB8888, 80 DRM_FORMAT_XBGR8888, 81 DRM_FORMAT_ARGB8888, 82 DRM_FORMAT_ABGR8888, 83 DRM_FORMAT_XRGB2101010, 84 DRM_FORMAT_XBGR2101010, 85 DRM_FORMAT_YUYV, 86 DRM_FORMAT_YVYU, 87 DRM_FORMAT_UYVY, 88 DRM_FORMAT_VYUY, 89 }; 90 91 static const uint64_t skl_format_modifiers_noccs[] = { 92 I915_FORMAT_MOD_Yf_TILED, 93 I915_FORMAT_MOD_Y_TILED, 94 I915_FORMAT_MOD_X_TILED, 95 DRM_FORMAT_MOD_LINEAR, 96 DRM_FORMAT_MOD_INVALID 97 }; 98 99 static const uint64_t skl_format_modifiers_ccs[] = { 100 I915_FORMAT_MOD_Yf_TILED_CCS, 101 I915_FORMAT_MOD_Y_TILED_CCS, 102 I915_FORMAT_MOD_Yf_TILED, 103 I915_FORMAT_MOD_Y_TILED, 104 I915_FORMAT_MOD_X_TILED, 105 DRM_FORMAT_MOD_LINEAR, 106 DRM_FORMAT_MOD_INVALID 107 }; 108 109 /* Cursor formats */ 110 static const uint32_t intel_cursor_formats[] = { 111 DRM_FORMAT_ARGB8888, 112 }; 113 114 static const uint64_t cursor_format_modifiers[] = { 115 DRM_FORMAT_MOD_LINEAR, 116 DRM_FORMAT_MOD_INVALID 117 }; 118 119 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 120 struct intel_crtc_state *pipe_config); 121 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 122 struct intel_crtc_state *pipe_config); 123 124 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 125 struct drm_i915_gem_object *obj, 126 struct drm_mode_fb_cmd2 *mode_cmd); 127 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 128 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 129 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 130 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 131 struct intel_link_m_n *m_n, 132 struct intel_link_m_n *m2_n2); 133 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 134 static void haswell_set_pipeconf(struct drm_crtc *crtc); 135 static void haswell_set_pipemisc(struct drm_crtc *crtc); 136 static void vlv_prepare_pll(struct intel_crtc *crtc, 137 const struct intel_crtc_state *pipe_config); 138 static void chv_prepare_pll(struct intel_crtc *crtc, 139 const struct intel_crtc_state *pipe_config); 140 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 141 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 142 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 143 struct intel_crtc_state *crtc_state); 144 static void skylake_pfit_enable(struct intel_crtc *crtc); 145 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 146 static void ironlake_pfit_enable(struct intel_crtc *crtc); 147 static void intel_modeset_setup_hw_state(struct drm_device *dev, 148 struct drm_modeset_acquire_ctx *ctx); 149 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 150 151 struct intel_limit { 152 struct { 153 int min, max; 154 } dot, vco, n, m, m1, m2, p, p1; 155 156 struct { 157 int dot_limit; 158 int p2_slow, p2_fast; 159 } p2; 160 }; 161 162 /* returns HPLL frequency in kHz */ 163 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 164 { 165 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 166 167 /* Obtain SKU information */ 168 mutex_lock(&dev_priv->sb_lock); 169 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 170 CCK_FUSE_HPLL_FREQ_MASK; 171 mutex_unlock(&dev_priv->sb_lock); 172 173 return vco_freq[hpll_freq] * 1000; 174 } 175 176 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 177 const char *name, u32 reg, int ref_freq) 178 { 179 u32 val; 180 int divider; 181 182 mutex_lock(&dev_priv->sb_lock); 183 val = vlv_cck_read(dev_priv, reg); 184 mutex_unlock(&dev_priv->sb_lock); 185 186 divider = val & CCK_FREQUENCY_VALUES; 187 188 WARN((val & CCK_FREQUENCY_STATUS) != 189 (divider << CCK_FREQUENCY_STATUS_SHIFT), 190 "%s change in progress\n", name); 191 192 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 193 } 194 195 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 196 const char *name, u32 reg) 197 { 198 if (dev_priv->hpll_freq == 0) 199 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 200 201 return vlv_get_cck_clock(dev_priv, name, reg, 202 dev_priv->hpll_freq); 203 } 204 205 static void intel_update_czclk(struct drm_i915_private *dev_priv) 206 { 207 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 208 return; 209 210 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 211 CCK_CZ_CLOCK_CONTROL); 212 213 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 214 } 215 216 static inline u32 /* units of 100MHz */ 217 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 218 const struct intel_crtc_state *pipe_config) 219 { 220 if (HAS_DDI(dev_priv)) 221 return pipe_config->port_clock; /* SPLL */ 222 else if (IS_GEN5(dev_priv)) 223 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 224 else 225 return 270000; 226 } 227 228 static const struct intel_limit intel_limits_i8xx_dac = { 229 .dot = { .min = 25000, .max = 350000 }, 230 .vco = { .min = 908000, .max = 1512000 }, 231 .n = { .min = 2, .max = 16 }, 232 .m = { .min = 96, .max = 140 }, 233 .m1 = { .min = 18, .max = 26 }, 234 .m2 = { .min = 6, .max = 16 }, 235 .p = { .min = 4, .max = 128 }, 236 .p1 = { .min = 2, .max = 33 }, 237 .p2 = { .dot_limit = 165000, 238 .p2_slow = 4, .p2_fast = 2 }, 239 }; 240 241 static const struct intel_limit intel_limits_i8xx_dvo = { 242 .dot = { .min = 25000, .max = 350000 }, 243 .vco = { .min = 908000, .max = 1512000 }, 244 .n = { .min = 2, .max = 16 }, 245 .m = { .min = 96, .max = 140 }, 246 .m1 = { .min = 18, .max = 26 }, 247 .m2 = { .min = 6, .max = 16 }, 248 .p = { .min = 4, .max = 128 }, 249 .p1 = { .min = 2, .max = 33 }, 250 .p2 = { .dot_limit = 165000, 251 .p2_slow = 4, .p2_fast = 4 }, 252 }; 253 254 static const struct intel_limit intel_limits_i8xx_lvds = { 255 .dot = { .min = 25000, .max = 350000 }, 256 .vco = { .min = 908000, .max = 1512000 }, 257 .n = { .min = 2, .max = 16 }, 258 .m = { .min = 96, .max = 140 }, 259 .m1 = { .min = 18, .max = 26 }, 260 .m2 = { .min = 6, .max = 16 }, 261 .p = { .min = 4, .max = 128 }, 262 .p1 = { .min = 1, .max = 6 }, 263 .p2 = { .dot_limit = 165000, 264 .p2_slow = 14, .p2_fast = 7 }, 265 }; 266 267 static const struct intel_limit intel_limits_i9xx_sdvo = { 268 .dot = { .min = 20000, .max = 400000 }, 269 .vco = { .min = 1400000, .max = 2800000 }, 270 .n = { .min = 1, .max = 6 }, 271 .m = { .min = 70, .max = 120 }, 272 .m1 = { .min = 8, .max = 18 }, 273 .m2 = { .min = 3, .max = 7 }, 274 .p = { .min = 5, .max = 80 }, 275 .p1 = { .min = 1, .max = 8 }, 276 .p2 = { .dot_limit = 200000, 277 .p2_slow = 10, .p2_fast = 5 }, 278 }; 279 280 static const struct intel_limit intel_limits_i9xx_lvds = { 281 .dot = { .min = 20000, .max = 400000 }, 282 .vco = { .min = 1400000, .max = 2800000 }, 283 .n = { .min = 1, .max = 6 }, 284 .m = { .min = 70, .max = 120 }, 285 .m1 = { .min = 8, .max = 18 }, 286 .m2 = { .min = 3, .max = 7 }, 287 .p = { .min = 7, .max = 98 }, 288 .p1 = { .min = 1, .max = 8 }, 289 .p2 = { .dot_limit = 112000, 290 .p2_slow = 14, .p2_fast = 7 }, 291 }; 292 293 294 static const struct intel_limit intel_limits_g4x_sdvo = { 295 .dot = { .min = 25000, .max = 270000 }, 296 .vco = { .min = 1750000, .max = 3500000}, 297 .n = { .min = 1, .max = 4 }, 298 .m = { .min = 104, .max = 138 }, 299 .m1 = { .min = 17, .max = 23 }, 300 .m2 = { .min = 5, .max = 11 }, 301 .p = { .min = 10, .max = 30 }, 302 .p1 = { .min = 1, .max = 3}, 303 .p2 = { .dot_limit = 270000, 304 .p2_slow = 10, 305 .p2_fast = 10 306 }, 307 }; 308 309 static const struct intel_limit intel_limits_g4x_hdmi = { 310 .dot = { .min = 22000, .max = 400000 }, 311 .vco = { .min = 1750000, .max = 3500000}, 312 .n = { .min = 1, .max = 4 }, 313 .m = { .min = 104, .max = 138 }, 314 .m1 = { .min = 16, .max = 23 }, 315 .m2 = { .min = 5, .max = 11 }, 316 .p = { .min = 5, .max = 80 }, 317 .p1 = { .min = 1, .max = 8}, 318 .p2 = { .dot_limit = 165000, 319 .p2_slow = 10, .p2_fast = 5 }, 320 }; 321 322 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 323 .dot = { .min = 20000, .max = 115000 }, 324 .vco = { .min = 1750000, .max = 3500000 }, 325 .n = { .min = 1, .max = 3 }, 326 .m = { .min = 104, .max = 138 }, 327 .m1 = { .min = 17, .max = 23 }, 328 .m2 = { .min = 5, .max = 11 }, 329 .p = { .min = 28, .max = 112 }, 330 .p1 = { .min = 2, .max = 8 }, 331 .p2 = { .dot_limit = 0, 332 .p2_slow = 14, .p2_fast = 14 333 }, 334 }; 335 336 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 337 .dot = { .min = 80000, .max = 224000 }, 338 .vco = { .min = 1750000, .max = 3500000 }, 339 .n = { .min = 1, .max = 3 }, 340 .m = { .min = 104, .max = 138 }, 341 .m1 = { .min = 17, .max = 23 }, 342 .m2 = { .min = 5, .max = 11 }, 343 .p = { .min = 14, .max = 42 }, 344 .p1 = { .min = 2, .max = 6 }, 345 .p2 = { .dot_limit = 0, 346 .p2_slow = 7, .p2_fast = 7 347 }, 348 }; 349 350 static const struct intel_limit intel_limits_pineview_sdvo = { 351 .dot = { .min = 20000, .max = 400000}, 352 .vco = { .min = 1700000, .max = 3500000 }, 353 /* Pineview's Ncounter is a ring counter */ 354 .n = { .min = 3, .max = 6 }, 355 .m = { .min = 2, .max = 256 }, 356 /* Pineview only has one combined m divider, which we treat as m2. */ 357 .m1 = { .min = 0, .max = 0 }, 358 .m2 = { .min = 0, .max = 254 }, 359 .p = { .min = 5, .max = 80 }, 360 .p1 = { .min = 1, .max = 8 }, 361 .p2 = { .dot_limit = 200000, 362 .p2_slow = 10, .p2_fast = 5 }, 363 }; 364 365 static const struct intel_limit intel_limits_pineview_lvds = { 366 .dot = { .min = 20000, .max = 400000 }, 367 .vco = { .min = 1700000, .max = 3500000 }, 368 .n = { .min = 3, .max = 6 }, 369 .m = { .min = 2, .max = 256 }, 370 .m1 = { .min = 0, .max = 0 }, 371 .m2 = { .min = 0, .max = 254 }, 372 .p = { .min = 7, .max = 112 }, 373 .p1 = { .min = 1, .max = 8 }, 374 .p2 = { .dot_limit = 112000, 375 .p2_slow = 14, .p2_fast = 14 }, 376 }; 377 378 /* Ironlake / Sandybridge 379 * 380 * We calculate clock using (register_value + 2) for N/M1/M2, so here 381 * the range value for them is (actual_value - 2). 382 */ 383 static const struct intel_limit intel_limits_ironlake_dac = { 384 .dot = { .min = 25000, .max = 350000 }, 385 .vco = { .min = 1760000, .max = 3510000 }, 386 .n = { .min = 1, .max = 5 }, 387 .m = { .min = 79, .max = 127 }, 388 .m1 = { .min = 12, .max = 22 }, 389 .m2 = { .min = 5, .max = 9 }, 390 .p = { .min = 5, .max = 80 }, 391 .p1 = { .min = 1, .max = 8 }, 392 .p2 = { .dot_limit = 225000, 393 .p2_slow = 10, .p2_fast = 5 }, 394 }; 395 396 static const struct intel_limit intel_limits_ironlake_single_lvds = { 397 .dot = { .min = 25000, .max = 350000 }, 398 .vco = { .min = 1760000, .max = 3510000 }, 399 .n = { .min = 1, .max = 3 }, 400 .m = { .min = 79, .max = 118 }, 401 .m1 = { .min = 12, .max = 22 }, 402 .m2 = { .min = 5, .max = 9 }, 403 .p = { .min = 28, .max = 112 }, 404 .p1 = { .min = 2, .max = 8 }, 405 .p2 = { .dot_limit = 225000, 406 .p2_slow = 14, .p2_fast = 14 }, 407 }; 408 409 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 410 .dot = { .min = 25000, .max = 350000 }, 411 .vco = { .min = 1760000, .max = 3510000 }, 412 .n = { .min = 1, .max = 3 }, 413 .m = { .min = 79, .max = 127 }, 414 .m1 = { .min = 12, .max = 22 }, 415 .m2 = { .min = 5, .max = 9 }, 416 .p = { .min = 14, .max = 56 }, 417 .p1 = { .min = 2, .max = 8 }, 418 .p2 = { .dot_limit = 225000, 419 .p2_slow = 7, .p2_fast = 7 }, 420 }; 421 422 /* LVDS 100mhz refclk limits. */ 423 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 424 .dot = { .min = 25000, .max = 350000 }, 425 .vco = { .min = 1760000, .max = 3510000 }, 426 .n = { .min = 1, .max = 2 }, 427 .m = { .min = 79, .max = 126 }, 428 .m1 = { .min = 12, .max = 22 }, 429 .m2 = { .min = 5, .max = 9 }, 430 .p = { .min = 28, .max = 112 }, 431 .p1 = { .min = 2, .max = 8 }, 432 .p2 = { .dot_limit = 225000, 433 .p2_slow = 14, .p2_fast = 14 }, 434 }; 435 436 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 437 .dot = { .min = 25000, .max = 350000 }, 438 .vco = { .min = 1760000, .max = 3510000 }, 439 .n = { .min = 1, .max = 3 }, 440 .m = { .min = 79, .max = 126 }, 441 .m1 = { .min = 12, .max = 22 }, 442 .m2 = { .min = 5, .max = 9 }, 443 .p = { .min = 14, .max = 42 }, 444 .p1 = { .min = 2, .max = 6 }, 445 .p2 = { .dot_limit = 225000, 446 .p2_slow = 7, .p2_fast = 7 }, 447 }; 448 449 static const struct intel_limit intel_limits_vlv = { 450 /* 451 * These are the data rate limits (measured in fast clocks) 452 * since those are the strictest limits we have. The fast 453 * clock and actual rate limits are more relaxed, so checking 454 * them would make no difference. 455 */ 456 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 457 .vco = { .min = 4000000, .max = 6000000 }, 458 .n = { .min = 1, .max = 7 }, 459 .m1 = { .min = 2, .max = 3 }, 460 .m2 = { .min = 11, .max = 156 }, 461 .p1 = { .min = 2, .max = 3 }, 462 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 463 }; 464 465 static const struct intel_limit intel_limits_chv = { 466 /* 467 * These are the data rate limits (measured in fast clocks) 468 * since those are the strictest limits we have. The fast 469 * clock and actual rate limits are more relaxed, so checking 470 * them would make no difference. 471 */ 472 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 473 .vco = { .min = 4800000, .max = 6480000 }, 474 .n = { .min = 1, .max = 1 }, 475 .m1 = { .min = 2, .max = 2 }, 476 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 477 .p1 = { .min = 2, .max = 4 }, 478 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 479 }; 480 481 static const struct intel_limit intel_limits_bxt = { 482 /* FIXME: find real dot limits */ 483 .dot = { .min = 0, .max = INT_MAX }, 484 .vco = { .min = 4800000, .max = 6700000 }, 485 .n = { .min = 1, .max = 1 }, 486 .m1 = { .min = 2, .max = 2 }, 487 /* FIXME: find real m2 limits */ 488 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 489 .p1 = { .min = 2, .max = 4 }, 490 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 491 }; 492 493 static bool 494 needs_modeset(struct drm_crtc_state *state) 495 { 496 return drm_atomic_crtc_needs_modeset(state); 497 } 498 499 /* 500 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 501 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 502 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 503 * The helpers' return value is the rate of the clock that is fed to the 504 * display engine's pipe which can be the above fast dot clock rate or a 505 * divided-down version of it. 506 */ 507 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 508 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 509 { 510 clock->m = clock->m2 + 2; 511 clock->p = clock->p1 * clock->p2; 512 if (WARN_ON(clock->n == 0 || clock->p == 0)) 513 return 0; 514 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 515 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 516 517 return clock->dot; 518 } 519 520 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 521 { 522 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 523 } 524 525 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 526 { 527 clock->m = i9xx_dpll_compute_m(clock); 528 clock->p = clock->p1 * clock->p2; 529 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 530 return 0; 531 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 532 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 533 534 return clock->dot; 535 } 536 537 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 538 { 539 clock->m = clock->m1 * clock->m2; 540 clock->p = clock->p1 * clock->p2; 541 if (WARN_ON(clock->n == 0 || clock->p == 0)) 542 return 0; 543 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 544 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 545 546 return clock->dot / 5; 547 } 548 549 int chv_calc_dpll_params(int refclk, struct dpll *clock) 550 { 551 clock->m = clock->m1 * clock->m2; 552 clock->p = clock->p1 * clock->p2; 553 if (WARN_ON(clock->n == 0 || clock->p == 0)) 554 return 0; 555 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 556 clock->n << 22); 557 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 558 559 return clock->dot / 5; 560 } 561 562 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 563 /** 564 * Returns whether the given set of divisors are valid for a given refclk with 565 * the given connectors. 566 */ 567 568 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 569 const struct intel_limit *limit, 570 const struct dpll *clock) 571 { 572 if (clock->n < limit->n.min || limit->n.max < clock->n) 573 INTELPllInvalid("n out of range\n"); 574 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 575 INTELPllInvalid("p1 out of range\n"); 576 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 577 INTELPllInvalid("m2 out of range\n"); 578 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 579 INTELPllInvalid("m1 out of range\n"); 580 581 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 582 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 583 if (clock->m1 <= clock->m2) 584 INTELPllInvalid("m1 <= m2\n"); 585 586 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 587 !IS_GEN9_LP(dev_priv)) { 588 if (clock->p < limit->p.min || limit->p.max < clock->p) 589 INTELPllInvalid("p out of range\n"); 590 if (clock->m < limit->m.min || limit->m.max < clock->m) 591 INTELPllInvalid("m out of range\n"); 592 } 593 594 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 595 INTELPllInvalid("vco out of range\n"); 596 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 597 * connector, etc., rather than just a single range. 598 */ 599 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 600 INTELPllInvalid("dot out of range\n"); 601 602 return true; 603 } 604 605 static int 606 i9xx_select_p2_div(const struct intel_limit *limit, 607 const struct intel_crtc_state *crtc_state, 608 int target) 609 { 610 struct drm_device *dev = crtc_state->base.crtc->dev; 611 612 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 613 /* 614 * For LVDS just rely on its current settings for dual-channel. 615 * We haven't figured out how to reliably set up different 616 * single/dual channel state, if we even can. 617 */ 618 if (intel_is_dual_link_lvds(dev)) 619 return limit->p2.p2_fast; 620 else 621 return limit->p2.p2_slow; 622 } else { 623 if (target < limit->p2.dot_limit) 624 return limit->p2.p2_slow; 625 else 626 return limit->p2.p2_fast; 627 } 628 } 629 630 /* 631 * Returns a set of divisors for the desired target clock with the given 632 * refclk, or FALSE. The returned values represent the clock equation: 633 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 634 * 635 * Target and reference clocks are specified in kHz. 636 * 637 * If match_clock is provided, then best_clock P divider must match the P 638 * divider from @match_clock used for LVDS downclocking. 639 */ 640 static bool 641 i9xx_find_best_dpll(const struct intel_limit *limit, 642 struct intel_crtc_state *crtc_state, 643 int target, int refclk, struct dpll *match_clock, 644 struct dpll *best_clock) 645 { 646 struct drm_device *dev = crtc_state->base.crtc->dev; 647 struct dpll clock; 648 int err = target; 649 650 memset(best_clock, 0, sizeof(*best_clock)); 651 652 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 653 654 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 655 clock.m1++) { 656 for (clock.m2 = limit->m2.min; 657 clock.m2 <= limit->m2.max; clock.m2++) { 658 if (clock.m2 >= clock.m1) 659 break; 660 for (clock.n = limit->n.min; 661 clock.n <= limit->n.max; clock.n++) { 662 for (clock.p1 = limit->p1.min; 663 clock.p1 <= limit->p1.max; clock.p1++) { 664 int this_err; 665 666 i9xx_calc_dpll_params(refclk, &clock); 667 if (!intel_PLL_is_valid(to_i915(dev), 668 limit, 669 &clock)) 670 continue; 671 if (match_clock && 672 clock.p != match_clock->p) 673 continue; 674 675 this_err = abs(clock.dot - target); 676 if (this_err < err) { 677 *best_clock = clock; 678 err = this_err; 679 } 680 } 681 } 682 } 683 } 684 685 return (err != target); 686 } 687 688 /* 689 * Returns a set of divisors for the desired target clock with the given 690 * refclk, or FALSE. The returned values represent the clock equation: 691 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 692 * 693 * Target and reference clocks are specified in kHz. 694 * 695 * If match_clock is provided, then best_clock P divider must match the P 696 * divider from @match_clock used for LVDS downclocking. 697 */ 698 static bool 699 pnv_find_best_dpll(const struct intel_limit *limit, 700 struct intel_crtc_state *crtc_state, 701 int target, int refclk, struct dpll *match_clock, 702 struct dpll *best_clock) 703 { 704 struct drm_device *dev = crtc_state->base.crtc->dev; 705 struct dpll clock; 706 int err = target; 707 708 memset(best_clock, 0, sizeof(*best_clock)); 709 710 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 711 712 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 713 clock.m1++) { 714 for (clock.m2 = limit->m2.min; 715 clock.m2 <= limit->m2.max; clock.m2++) { 716 for (clock.n = limit->n.min; 717 clock.n <= limit->n.max; clock.n++) { 718 for (clock.p1 = limit->p1.min; 719 clock.p1 <= limit->p1.max; clock.p1++) { 720 int this_err; 721 722 pnv_calc_dpll_params(refclk, &clock); 723 if (!intel_PLL_is_valid(to_i915(dev), 724 limit, 725 &clock)) 726 continue; 727 if (match_clock && 728 clock.p != match_clock->p) 729 continue; 730 731 this_err = abs(clock.dot - target); 732 if (this_err < err) { 733 *best_clock = clock; 734 err = this_err; 735 } 736 } 737 } 738 } 739 } 740 741 return (err != target); 742 } 743 744 /* 745 * Returns a set of divisors for the desired target clock with the given 746 * refclk, or FALSE. The returned values represent the clock equation: 747 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 748 * 749 * Target and reference clocks are specified in kHz. 750 * 751 * If match_clock is provided, then best_clock P divider must match the P 752 * divider from @match_clock used for LVDS downclocking. 753 */ 754 static bool 755 g4x_find_best_dpll(const struct intel_limit *limit, 756 struct intel_crtc_state *crtc_state, 757 int target, int refclk, struct dpll *match_clock, 758 struct dpll *best_clock) 759 { 760 struct drm_device *dev = crtc_state->base.crtc->dev; 761 struct dpll clock; 762 int max_n; 763 bool found = false; 764 /* approximately equals target * 0.00585 */ 765 int err_most = (target >> 8) + (target >> 9); 766 767 memset(best_clock, 0, sizeof(*best_clock)); 768 769 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 770 771 max_n = limit->n.max; 772 /* based on hardware requirement, prefer smaller n to precision */ 773 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 774 /* based on hardware requirement, prefere larger m1,m2 */ 775 for (clock.m1 = limit->m1.max; 776 clock.m1 >= limit->m1.min; clock.m1--) { 777 for (clock.m2 = limit->m2.max; 778 clock.m2 >= limit->m2.min; clock.m2--) { 779 for (clock.p1 = limit->p1.max; 780 clock.p1 >= limit->p1.min; clock.p1--) { 781 int this_err; 782 783 i9xx_calc_dpll_params(refclk, &clock); 784 if (!intel_PLL_is_valid(to_i915(dev), 785 limit, 786 &clock)) 787 continue; 788 789 this_err = abs(clock.dot - target); 790 if (this_err < err_most) { 791 *best_clock = clock; 792 err_most = this_err; 793 max_n = clock.n; 794 found = true; 795 } 796 } 797 } 798 } 799 } 800 return found; 801 } 802 803 /* 804 * Check if the calculated PLL configuration is more optimal compared to the 805 * best configuration and error found so far. Return the calculated error. 806 */ 807 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 808 const struct dpll *calculated_clock, 809 const struct dpll *best_clock, 810 unsigned int best_error_ppm, 811 unsigned int *error_ppm) 812 { 813 /* 814 * For CHV ignore the error and consider only the P value. 815 * Prefer a bigger P value based on HW requirements. 816 */ 817 if (IS_CHERRYVIEW(to_i915(dev))) { 818 *error_ppm = 0; 819 820 return calculated_clock->p > best_clock->p; 821 } 822 823 if (WARN_ON_ONCE(!target_freq)) 824 return false; 825 826 *error_ppm = div_u64(1000000ULL * 827 abs(target_freq - calculated_clock->dot), 828 target_freq); 829 /* 830 * Prefer a better P value over a better (smaller) error if the error 831 * is small. Ensure this preference for future configurations too by 832 * setting the error to 0. 833 */ 834 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 835 *error_ppm = 0; 836 837 return true; 838 } 839 840 return *error_ppm + 10 < best_error_ppm; 841 } 842 843 /* 844 * Returns a set of divisors for the desired target clock with the given 845 * refclk, or FALSE. The returned values represent the clock equation: 846 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 847 */ 848 static bool 849 vlv_find_best_dpll(const struct intel_limit *limit, 850 struct intel_crtc_state *crtc_state, 851 int target, int refclk, struct dpll *match_clock, 852 struct dpll *best_clock) 853 { 854 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 855 struct drm_device *dev = crtc->base.dev; 856 struct dpll clock; 857 unsigned int bestppm = 1000000; 858 /* min update 19.2 MHz */ 859 int max_n = min(limit->n.max, refclk / 19200); 860 bool found = false; 861 862 target *= 5; /* fast clock */ 863 864 memset(best_clock, 0, sizeof(*best_clock)); 865 866 /* based on hardware requirement, prefer smaller n to precision */ 867 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 868 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 869 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 870 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 871 clock.p = clock.p1 * clock.p2; 872 /* based on hardware requirement, prefer bigger m1,m2 values */ 873 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 874 unsigned int ppm; 875 876 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 877 refclk * clock.m1); 878 879 vlv_calc_dpll_params(refclk, &clock); 880 881 if (!intel_PLL_is_valid(to_i915(dev), 882 limit, 883 &clock)) 884 continue; 885 886 if (!vlv_PLL_is_optimal(dev, target, 887 &clock, 888 best_clock, 889 bestppm, &ppm)) 890 continue; 891 892 *best_clock = clock; 893 bestppm = ppm; 894 found = true; 895 } 896 } 897 } 898 } 899 900 return found; 901 } 902 903 /* 904 * Returns a set of divisors for the desired target clock with the given 905 * refclk, or FALSE. The returned values represent the clock equation: 906 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 907 */ 908 static bool 909 chv_find_best_dpll(const struct intel_limit *limit, 910 struct intel_crtc_state *crtc_state, 911 int target, int refclk, struct dpll *match_clock, 912 struct dpll *best_clock) 913 { 914 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 915 struct drm_device *dev = crtc->base.dev; 916 unsigned int best_error_ppm; 917 struct dpll clock; 918 uint64_t m2; 919 int found = false; 920 921 memset(best_clock, 0, sizeof(*best_clock)); 922 best_error_ppm = 1000000; 923 924 /* 925 * Based on hardware doc, the n always set to 1, and m1 always 926 * set to 2. If requires to support 200Mhz refclk, we need to 927 * revisit this because n may not 1 anymore. 928 */ 929 clock.n = 1, clock.m1 = 2; 930 target *= 5; /* fast clock */ 931 932 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 933 for (clock.p2 = limit->p2.p2_fast; 934 clock.p2 >= limit->p2.p2_slow; 935 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 936 unsigned int error_ppm; 937 938 clock.p = clock.p1 * clock.p2; 939 940 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 941 clock.n) << 22, refclk * clock.m1); 942 943 if (m2 > INT_MAX/clock.m1) 944 continue; 945 946 clock.m2 = m2; 947 948 chv_calc_dpll_params(refclk, &clock); 949 950 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 951 continue; 952 953 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 954 best_error_ppm, &error_ppm)) 955 continue; 956 957 *best_clock = clock; 958 best_error_ppm = error_ppm; 959 found = true; 960 } 961 } 962 963 return found; 964 } 965 966 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 967 struct dpll *best_clock) 968 { 969 int refclk = 100000; 970 const struct intel_limit *limit = &intel_limits_bxt; 971 972 return chv_find_best_dpll(limit, crtc_state, 973 target_clock, refclk, NULL, best_clock); 974 } 975 976 bool intel_crtc_active(struct intel_crtc *crtc) 977 { 978 /* Be paranoid as we can arrive here with only partial 979 * state retrieved from the hardware during setup. 980 * 981 * We can ditch the adjusted_mode.crtc_clock check as soon 982 * as Haswell has gained clock readout/fastboot support. 983 * 984 * We can ditch the crtc->primary->fb check as soon as we can 985 * properly reconstruct framebuffers. 986 * 987 * FIXME: The intel_crtc->active here should be switched to 988 * crtc->state->active once we have proper CRTC states wired up 989 * for atomic. 990 */ 991 return crtc->active && crtc->base.primary->state->fb && 992 crtc->config->base.adjusted_mode.crtc_clock; 993 } 994 995 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 996 enum i915_pipe pipe) 997 { 998 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 999 1000 return crtc->config->cpu_transcoder; 1001 } 1002 1003 static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, 1004 enum i915_pipe pipe) 1005 { 1006 i915_reg_t reg = PIPEDSL(pipe); 1007 u32 line1, line2; 1008 u32 line_mask; 1009 1010 if (IS_GEN2(dev_priv)) 1011 line_mask = DSL_LINEMASK_GEN2; 1012 else 1013 line_mask = DSL_LINEMASK_GEN3; 1014 1015 line1 = I915_READ(reg) & line_mask; 1016 msleep(5); 1017 line2 = I915_READ(reg) & line_mask; 1018 1019 return line1 != line2; 1020 } 1021 1022 static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) 1023 { 1024 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1025 enum i915_pipe pipe = crtc->pipe; 1026 1027 /* Wait for the display line to settle/start moving */ 1028 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100)) 1029 DRM_ERROR("pipe %c scanline %s wait timed out\n", 1030 pipe_name(pipe), onoff(state)); 1031 } 1032 1033 static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc) 1034 { 1035 wait_for_pipe_scanline_moving(crtc, false); 1036 } 1037 1038 static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc) 1039 { 1040 wait_for_pipe_scanline_moving(crtc, true); 1041 } 1042 1043 /* 1044 * intel_wait_for_pipe_off - wait for pipe to turn off 1045 * @crtc: crtc whose pipe to wait for 1046 * 1047 * After disabling a pipe, we can't wait for vblank in the usual way, 1048 * spinning on the vblank interrupt status bit, since we won't actually 1049 * see an interrupt when the pipe is disabled. 1050 * 1051 * On Gen4 and above: 1052 * wait for the pipe register state bit to turn off 1053 * 1054 * Otherwise: 1055 * wait for the display line value to settle (it usually 1056 * ends up stopping at the start of the next frame). 1057 * 1058 */ 1059 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1060 { 1061 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1062 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1063 1064 if (INTEL_GEN(dev_priv) >= 4) { 1065 i915_reg_t reg = PIPECONF(cpu_transcoder); 1066 1067 /* Wait for the Pipe State to go off */ 1068 if (intel_wait_for_register(dev_priv, 1069 reg, I965_PIPECONF_ACTIVE, 0, 1070 100)) 1071 WARN(1, "pipe_off wait timed out\n"); 1072 } else { 1073 intel_wait_for_pipe_scanline_stopped(crtc); 1074 } 1075 } 1076 1077 /* Only for pre-ILK configs */ 1078 void assert_pll(struct drm_i915_private *dev_priv, 1079 enum i915_pipe pipe, bool state) 1080 { 1081 u32 val; 1082 bool cur_state; 1083 1084 val = I915_READ(DPLL(pipe)); 1085 cur_state = !!(val & DPLL_VCO_ENABLE); 1086 I915_STATE_WARN(cur_state != state, 1087 "PLL state assertion failure (expected %s, current %s)\n", 1088 onoff(state), onoff(cur_state)); 1089 } 1090 1091 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1092 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1093 { 1094 u32 val; 1095 bool cur_state; 1096 1097 mutex_lock(&dev_priv->sb_lock); 1098 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1099 mutex_unlock(&dev_priv->sb_lock); 1100 1101 cur_state = val & DSI_PLL_VCO_EN; 1102 I915_STATE_WARN(cur_state != state, 1103 "DSI PLL state assertion failure (expected %s, current %s)\n", 1104 onoff(state), onoff(cur_state)); 1105 } 1106 1107 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1108 enum i915_pipe pipe, bool state) 1109 { 1110 bool cur_state; 1111 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1112 pipe); 1113 1114 if (HAS_DDI(dev_priv)) { 1115 /* DDI does not have a specific FDI_TX register */ 1116 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1117 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1118 } else { 1119 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1120 cur_state = !!(val & FDI_TX_ENABLE); 1121 } 1122 I915_STATE_WARN(cur_state != state, 1123 "FDI TX state assertion failure (expected %s, current %s)\n", 1124 onoff(state), onoff(cur_state)); 1125 } 1126 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1127 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1128 1129 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1130 enum i915_pipe pipe, bool state) 1131 { 1132 u32 val; 1133 bool cur_state; 1134 1135 val = I915_READ(FDI_RX_CTL(pipe)); 1136 cur_state = !!(val & FDI_RX_ENABLE); 1137 I915_STATE_WARN(cur_state != state, 1138 "FDI RX state assertion failure (expected %s, current %s)\n", 1139 onoff(state), onoff(cur_state)); 1140 } 1141 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1142 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1143 1144 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1145 enum i915_pipe pipe) 1146 { 1147 u32 val; 1148 1149 /* ILK FDI PLL is always enabled */ 1150 if (IS_GEN5(dev_priv)) 1151 return; 1152 1153 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1154 if (HAS_DDI(dev_priv)) 1155 return; 1156 1157 val = I915_READ(FDI_TX_CTL(pipe)); 1158 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1159 } 1160 1161 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1162 enum i915_pipe pipe, bool state) 1163 { 1164 u32 val; 1165 bool cur_state; 1166 1167 val = I915_READ(FDI_RX_CTL(pipe)); 1168 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1169 I915_STATE_WARN(cur_state != state, 1170 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1171 onoff(state), onoff(cur_state)); 1172 } 1173 1174 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1175 { 1176 i915_reg_t pp_reg; 1177 u32 val; 1178 enum i915_pipe panel_pipe = PIPE_A; 1179 bool locked = true; 1180 1181 if (WARN_ON(HAS_DDI(dev_priv))) 1182 return; 1183 1184 if (HAS_PCH_SPLIT(dev_priv)) { 1185 u32 port_sel; 1186 1187 pp_reg = PP_CONTROL(0); 1188 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1189 1190 if (port_sel == PANEL_PORT_SELECT_LVDS && 1191 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1192 panel_pipe = PIPE_B; 1193 /* XXX: else fix for eDP */ 1194 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1195 /* presumably write lock depends on pipe, not port select */ 1196 pp_reg = PP_CONTROL(pipe); 1197 panel_pipe = pipe; 1198 } else { 1199 pp_reg = PP_CONTROL(0); 1200 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1201 panel_pipe = PIPE_B; 1202 } 1203 1204 val = I915_READ(pp_reg); 1205 if (!(val & PANEL_POWER_ON) || 1206 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1207 locked = false; 1208 1209 I915_STATE_WARN(panel_pipe == pipe && locked, 1210 "panel assertion failure, pipe %c regs locked\n", 1211 pipe_name(pipe)); 1212 } 1213 1214 void assert_pipe(struct drm_i915_private *dev_priv, 1215 enum i915_pipe pipe, bool state) 1216 { 1217 bool cur_state; 1218 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1219 pipe); 1220 enum intel_display_power_domain power_domain; 1221 1222 /* we keep both pipes enabled on 830 */ 1223 if (IS_I830(dev_priv)) 1224 state = true; 1225 1226 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1227 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1228 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1229 cur_state = !!(val & PIPECONF_ENABLE); 1230 1231 intel_display_power_put(dev_priv, power_domain); 1232 } else { 1233 cur_state = false; 1234 } 1235 1236 I915_STATE_WARN(cur_state != state, 1237 "pipe %c assertion failure (expected %s, current %s)\n", 1238 pipe_name(pipe), onoff(state), onoff(cur_state)); 1239 } 1240 1241 static void assert_plane(struct intel_plane *plane, bool state) 1242 { 1243 bool cur_state = plane->get_hw_state(plane); 1244 1245 I915_STATE_WARN(cur_state != state, 1246 "%s assertion failure (expected %s, current %s)\n", 1247 plane->base.name, onoff(state), onoff(cur_state)); 1248 } 1249 1250 #define assert_plane_enabled(p) assert_plane(p, true) 1251 #define assert_plane_disabled(p) assert_plane(p, false) 1252 1253 static void assert_planes_disabled(struct intel_crtc *crtc) 1254 { 1255 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1256 struct intel_plane *plane; 1257 1258 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) 1259 assert_plane_disabled(plane); 1260 } 1261 1262 static void assert_vblank_disabled(struct drm_crtc *crtc) 1263 { 1264 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1265 drm_crtc_vblank_put(crtc); 1266 } 1267 1268 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1269 enum i915_pipe pipe) 1270 { 1271 u32 val; 1272 bool enabled; 1273 1274 val = I915_READ(PCH_TRANSCONF(pipe)); 1275 enabled = !!(val & TRANS_ENABLE); 1276 I915_STATE_WARN(enabled, 1277 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1278 pipe_name(pipe)); 1279 } 1280 1281 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1282 enum i915_pipe pipe, u32 port_sel, u32 val) 1283 { 1284 if ((val & DP_PORT_EN) == 0) 1285 return false; 1286 1287 if (HAS_PCH_CPT(dev_priv)) { 1288 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1289 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1290 return false; 1291 } else if (IS_CHERRYVIEW(dev_priv)) { 1292 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1293 return false; 1294 } else { 1295 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1296 return false; 1297 } 1298 return true; 1299 } 1300 1301 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1302 enum i915_pipe pipe, u32 val) 1303 { 1304 if ((val & SDVO_ENABLE) == 0) 1305 return false; 1306 1307 if (HAS_PCH_CPT(dev_priv)) { 1308 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1309 return false; 1310 } else if (IS_CHERRYVIEW(dev_priv)) { 1311 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1312 return false; 1313 } else { 1314 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1315 return false; 1316 } 1317 return true; 1318 } 1319 1320 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1321 enum i915_pipe pipe, u32 val) 1322 { 1323 if ((val & LVDS_PORT_EN) == 0) 1324 return false; 1325 1326 if (HAS_PCH_CPT(dev_priv)) { 1327 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1328 return false; 1329 } else { 1330 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1331 return false; 1332 } 1333 return true; 1334 } 1335 1336 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1337 enum i915_pipe pipe, u32 val) 1338 { 1339 if ((val & ADPA_DAC_ENABLE) == 0) 1340 return false; 1341 if (HAS_PCH_CPT(dev_priv)) { 1342 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1343 return false; 1344 } else { 1345 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1346 return false; 1347 } 1348 return true; 1349 } 1350 1351 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1352 enum i915_pipe pipe, i915_reg_t reg, 1353 u32 port_sel) 1354 { 1355 u32 val = I915_READ(reg); 1356 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1357 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1358 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1359 1360 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1361 && (val & DP_PIPEB_SELECT), 1362 "IBX PCH dp port still using transcoder B\n"); 1363 } 1364 1365 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1366 enum i915_pipe pipe, i915_reg_t reg) 1367 { 1368 u32 val = I915_READ(reg); 1369 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1370 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1371 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1372 1373 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1374 && (val & SDVO_PIPE_B_SELECT), 1375 "IBX PCH hdmi port still using transcoder B\n"); 1376 } 1377 1378 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1379 enum i915_pipe pipe) 1380 { 1381 u32 val; 1382 1383 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1384 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1385 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1386 1387 val = I915_READ(PCH_ADPA); 1388 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1389 "PCH VGA enabled on transcoder %c, should be disabled\n", 1390 pipe_name(pipe)); 1391 1392 val = I915_READ(PCH_LVDS); 1393 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1394 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1395 pipe_name(pipe)); 1396 1397 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1398 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1399 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1400 } 1401 1402 static void _vlv_enable_pll(struct intel_crtc *crtc, 1403 const struct intel_crtc_state *pipe_config) 1404 { 1405 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1406 enum i915_pipe pipe = crtc->pipe; 1407 1408 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1409 POSTING_READ(DPLL(pipe)); 1410 udelay(150); 1411 1412 if (intel_wait_for_register(dev_priv, 1413 DPLL(pipe), 1414 DPLL_LOCK_VLV, 1415 DPLL_LOCK_VLV, 1416 1)) 1417 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1418 } 1419 1420 static void vlv_enable_pll(struct intel_crtc *crtc, 1421 const struct intel_crtc_state *pipe_config) 1422 { 1423 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1424 enum i915_pipe pipe = crtc->pipe; 1425 1426 assert_pipe_disabled(dev_priv, pipe); 1427 1428 /* PLL is protected by panel, make sure we can write it */ 1429 assert_panel_unlocked(dev_priv, pipe); 1430 1431 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1432 _vlv_enable_pll(crtc, pipe_config); 1433 1434 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1435 POSTING_READ(DPLL_MD(pipe)); 1436 } 1437 1438 1439 static void _chv_enable_pll(struct intel_crtc *crtc, 1440 const struct intel_crtc_state *pipe_config) 1441 { 1442 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1443 enum i915_pipe pipe = crtc->pipe; 1444 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1445 u32 tmp; 1446 1447 mutex_lock(&dev_priv->sb_lock); 1448 1449 /* Enable back the 10bit clock to display controller */ 1450 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1451 tmp |= DPIO_DCLKP_EN; 1452 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1453 1454 mutex_unlock(&dev_priv->sb_lock); 1455 1456 /* 1457 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1458 */ 1459 udelay(1); 1460 1461 /* Enable PLL */ 1462 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1463 1464 /* Check PLL is locked */ 1465 if (intel_wait_for_register(dev_priv, 1466 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1467 1)) 1468 DRM_ERROR("PLL %d failed to lock\n", pipe); 1469 } 1470 1471 static void chv_enable_pll(struct intel_crtc *crtc, 1472 const struct intel_crtc_state *pipe_config) 1473 { 1474 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1475 enum i915_pipe pipe = crtc->pipe; 1476 1477 assert_pipe_disabled(dev_priv, pipe); 1478 1479 /* PLL is protected by panel, make sure we can write it */ 1480 assert_panel_unlocked(dev_priv, pipe); 1481 1482 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1483 _chv_enable_pll(crtc, pipe_config); 1484 1485 if (pipe != PIPE_A) { 1486 /* 1487 * WaPixelRepeatModeFixForC0:chv 1488 * 1489 * DPLLCMD is AWOL. Use chicken bits to propagate 1490 * the value from DPLLBMD to either pipe B or C. 1491 */ 1492 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe)); 1493 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1494 I915_WRITE(CBR4_VLV, 0); 1495 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1496 1497 /* 1498 * DPLLB VGA mode also seems to cause problems. 1499 * We should always have it disabled. 1500 */ 1501 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1502 } else { 1503 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1504 POSTING_READ(DPLL_MD(pipe)); 1505 } 1506 } 1507 1508 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) 1509 { 1510 struct intel_crtc *crtc; 1511 int count = 0; 1512 1513 for_each_intel_crtc(&dev_priv->drm, crtc) { 1514 count += crtc->base.state->active && 1515 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1516 } 1517 1518 return count; 1519 } 1520 1521 static void i9xx_enable_pll(struct intel_crtc *crtc, 1522 const struct intel_crtc_state *crtc_state) 1523 { 1524 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1525 i915_reg_t reg = DPLL(crtc->pipe); 1526 u32 dpll = crtc_state->dpll_hw_state.dpll; 1527 int i; 1528 1529 assert_pipe_disabled(dev_priv, crtc->pipe); 1530 1531 /* PLL is protected by panel, make sure we can write it */ 1532 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 1533 assert_panel_unlocked(dev_priv, crtc->pipe); 1534 1535 /* Enable DVO 2x clock on both PLLs if necessary */ 1536 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { 1537 /* 1538 * It appears to be important that we don't enable this 1539 * for the current pipe before otherwise configuring the 1540 * PLL. No idea how this should be handled if multiple 1541 * DVO outputs are enabled simultaneosly. 1542 */ 1543 dpll |= DPLL_DVO_2X_MODE; 1544 I915_WRITE(DPLL(!crtc->pipe), 1545 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1546 } 1547 1548 /* 1549 * Apparently we need to have VGA mode enabled prior to changing 1550 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1551 * dividers, even though the register value does change. 1552 */ 1553 I915_WRITE(reg, 0); 1554 1555 I915_WRITE(reg, dpll); 1556 1557 /* Wait for the clocks to stabilize. */ 1558 POSTING_READ(reg); 1559 udelay(150); 1560 1561 if (INTEL_GEN(dev_priv) >= 4) { 1562 I915_WRITE(DPLL_MD(crtc->pipe), 1563 crtc_state->dpll_hw_state.dpll_md); 1564 } else { 1565 /* The pixel multiplier can only be updated once the 1566 * DPLL is enabled and the clocks are stable. 1567 * 1568 * So write it again. 1569 */ 1570 I915_WRITE(reg, dpll); 1571 } 1572 1573 /* We do this three times for luck */ 1574 for (i = 0; i < 3; i++) { 1575 I915_WRITE(reg, dpll); 1576 POSTING_READ(reg); 1577 udelay(150); /* wait for warmup */ 1578 } 1579 } 1580 1581 static void i9xx_disable_pll(struct intel_crtc *crtc) 1582 { 1583 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1584 enum i915_pipe pipe = crtc->pipe; 1585 1586 /* Disable DVO 2x clock on both PLLs if necessary */ 1587 if (IS_I830(dev_priv) && 1588 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1589 !intel_num_dvo_pipes(dev_priv)) { 1590 I915_WRITE(DPLL(PIPE_B), 1591 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1592 I915_WRITE(DPLL(PIPE_A), 1593 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1594 } 1595 1596 /* Don't disable pipe or pipe PLLs if needed */ 1597 if (IS_I830(dev_priv)) 1598 return; 1599 1600 /* Make sure the pipe isn't still relying on us */ 1601 assert_pipe_disabled(dev_priv, pipe); 1602 1603 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1604 POSTING_READ(DPLL(pipe)); 1605 } 1606 1607 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1608 { 1609 u32 val; 1610 1611 /* Make sure the pipe isn't still relying on us */ 1612 assert_pipe_disabled(dev_priv, pipe); 1613 1614 val = DPLL_INTEGRATED_REF_CLK_VLV | 1615 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1616 if (pipe != PIPE_A) 1617 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1618 1619 I915_WRITE(DPLL(pipe), val); 1620 POSTING_READ(DPLL(pipe)); 1621 } 1622 1623 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1624 { 1625 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1626 u32 val; 1627 1628 /* Make sure the pipe isn't still relying on us */ 1629 assert_pipe_disabled(dev_priv, pipe); 1630 1631 val = DPLL_SSC_REF_CLK_CHV | 1632 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1633 if (pipe != PIPE_A) 1634 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1635 1636 I915_WRITE(DPLL(pipe), val); 1637 POSTING_READ(DPLL(pipe)); 1638 1639 mutex_lock(&dev_priv->sb_lock); 1640 1641 /* Disable 10bit clock to display controller */ 1642 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1643 val &= ~DPIO_DCLKP_EN; 1644 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1645 1646 mutex_unlock(&dev_priv->sb_lock); 1647 } 1648 1649 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1650 struct intel_digital_port *dport, 1651 unsigned int expected_mask) 1652 { 1653 u32 port_mask; 1654 i915_reg_t dpll_reg; 1655 1656 switch (dport->port) { 1657 case PORT_B: 1658 port_mask = DPLL_PORTB_READY_MASK; 1659 dpll_reg = DPLL(0); 1660 break; 1661 case PORT_C: 1662 port_mask = DPLL_PORTC_READY_MASK; 1663 dpll_reg = DPLL(0); 1664 expected_mask <<= 4; 1665 break; 1666 case PORT_D: 1667 port_mask = DPLL_PORTD_READY_MASK; 1668 dpll_reg = DPIO_PHY_STATUS; 1669 break; 1670 default: 1671 BUG(); 1672 } 1673 1674 if (intel_wait_for_register(dev_priv, 1675 dpll_reg, port_mask, expected_mask, 1676 1000)) 1677 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1678 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1679 } 1680 1681 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1682 enum i915_pipe pipe) 1683 { 1684 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1685 pipe); 1686 i915_reg_t reg; 1687 uint32_t val, pipeconf_val; 1688 1689 /* Make sure PCH DPLL is enabled */ 1690 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1691 1692 /* FDI must be feeding us bits for PCH ports */ 1693 assert_fdi_tx_enabled(dev_priv, pipe); 1694 assert_fdi_rx_enabled(dev_priv, pipe); 1695 1696 if (HAS_PCH_CPT(dev_priv)) { 1697 /* Workaround: Set the timing override bit before enabling the 1698 * pch transcoder. */ 1699 reg = TRANS_CHICKEN2(pipe); 1700 val = I915_READ(reg); 1701 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1702 I915_WRITE(reg, val); 1703 } 1704 1705 reg = PCH_TRANSCONF(pipe); 1706 val = I915_READ(reg); 1707 pipeconf_val = I915_READ(PIPECONF(pipe)); 1708 1709 if (HAS_PCH_IBX(dev_priv)) { 1710 /* 1711 * Make the BPC in transcoder be consistent with 1712 * that in pipeconf reg. For HDMI we must use 8bpc 1713 * here for both 8bpc and 12bpc. 1714 */ 1715 val &= ~PIPECONF_BPC_MASK; 1716 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1717 val |= PIPECONF_8BPC; 1718 else 1719 val |= pipeconf_val & PIPECONF_BPC_MASK; 1720 } 1721 1722 val &= ~TRANS_INTERLACE_MASK; 1723 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1724 if (HAS_PCH_IBX(dev_priv) && 1725 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1726 val |= TRANS_LEGACY_INTERLACED_ILK; 1727 else 1728 val |= TRANS_INTERLACED; 1729 else 1730 val |= TRANS_PROGRESSIVE; 1731 1732 I915_WRITE(reg, val | TRANS_ENABLE); 1733 if (intel_wait_for_register(dev_priv, 1734 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1735 100)) 1736 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1737 } 1738 1739 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1740 enum transcoder cpu_transcoder) 1741 { 1742 u32 val, pipeconf_val; 1743 1744 /* FDI must be feeding us bits for PCH ports */ 1745 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1746 assert_fdi_rx_enabled(dev_priv, PIPE_A); 1747 1748 /* Workaround: set timing override bit. */ 1749 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1750 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1751 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1752 1753 val = TRANS_ENABLE; 1754 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1755 1756 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1757 PIPECONF_INTERLACED_ILK) 1758 val |= TRANS_INTERLACED; 1759 else 1760 val |= TRANS_PROGRESSIVE; 1761 1762 I915_WRITE(LPT_TRANSCONF, val); 1763 if (intel_wait_for_register(dev_priv, 1764 LPT_TRANSCONF, 1765 TRANS_STATE_ENABLE, 1766 TRANS_STATE_ENABLE, 1767 100)) 1768 DRM_ERROR("Failed to enable PCH transcoder\n"); 1769 } 1770 1771 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1772 enum i915_pipe pipe) 1773 { 1774 i915_reg_t reg; 1775 uint32_t val; 1776 1777 /* FDI relies on the transcoder */ 1778 assert_fdi_tx_disabled(dev_priv, pipe); 1779 assert_fdi_rx_disabled(dev_priv, pipe); 1780 1781 /* Ports must be off as well */ 1782 assert_pch_ports_disabled(dev_priv, pipe); 1783 1784 reg = PCH_TRANSCONF(pipe); 1785 val = I915_READ(reg); 1786 val &= ~TRANS_ENABLE; 1787 I915_WRITE(reg, val); 1788 /* wait for PCH transcoder off, transcoder state */ 1789 if (intel_wait_for_register(dev_priv, 1790 reg, TRANS_STATE_ENABLE, 0, 1791 50)) 1792 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1793 1794 if (HAS_PCH_CPT(dev_priv)) { 1795 /* Workaround: Clear the timing override chicken bit again. */ 1796 reg = TRANS_CHICKEN2(pipe); 1797 val = I915_READ(reg); 1798 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1799 I915_WRITE(reg, val); 1800 } 1801 } 1802 1803 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1804 { 1805 u32 val; 1806 1807 val = I915_READ(LPT_TRANSCONF); 1808 val &= ~TRANS_ENABLE; 1809 I915_WRITE(LPT_TRANSCONF, val); 1810 /* wait for PCH transcoder off, transcoder state */ 1811 if (intel_wait_for_register(dev_priv, 1812 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1813 50)) 1814 DRM_ERROR("Failed to disable PCH transcoder\n"); 1815 1816 /* Workaround: clear timing override bit. */ 1817 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1818 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1819 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1820 } 1821 1822 enum i915_pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1823 { 1824 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1825 1826 WARN_ON(!crtc->config->has_pch_encoder); 1827 1828 if (HAS_PCH_LPT(dev_priv)) 1829 return PIPE_A; 1830 else 1831 return crtc->pipe; 1832 } 1833 1834 /** 1835 * intel_enable_pipe - enable a pipe, asserting requirements 1836 * @crtc: crtc responsible for the pipe 1837 * 1838 * Enable @crtc's pipe, making sure that various hardware specific requirements 1839 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1840 */ 1841 static void intel_enable_pipe(struct intel_crtc *crtc) 1842 { 1843 struct drm_device *dev = crtc->base.dev; 1844 struct drm_i915_private *dev_priv = to_i915(dev); 1845 enum i915_pipe pipe = crtc->pipe; 1846 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1847 i915_reg_t reg; 1848 u32 val; 1849 1850 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1851 1852 assert_planes_disabled(crtc); 1853 1854 /* 1855 * A pipe without a PLL won't actually be able to drive bits from 1856 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1857 * need the check. 1858 */ 1859 if (HAS_GMCH_DISPLAY(dev_priv)) { 1860 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1861 assert_dsi_pll_enabled(dev_priv); 1862 else 1863 assert_pll_enabled(dev_priv, pipe); 1864 } else { 1865 if (crtc->config->has_pch_encoder) { 1866 /* if driving the PCH, we need FDI enabled */ 1867 assert_fdi_rx_pll_enabled(dev_priv, 1868 intel_crtc_pch_transcoder(crtc)); 1869 assert_fdi_tx_pll_enabled(dev_priv, 1870 (enum i915_pipe) cpu_transcoder); 1871 } 1872 /* FIXME: assert CPU port conditions for SNB+ */ 1873 } 1874 1875 reg = PIPECONF(cpu_transcoder); 1876 val = I915_READ(reg); 1877 if (val & PIPECONF_ENABLE) { 1878 /* we keep both pipes enabled on 830 */ 1879 WARN_ON(!IS_I830(dev_priv)); 1880 return; 1881 } 1882 1883 I915_WRITE(reg, val | PIPECONF_ENABLE); 1884 POSTING_READ(reg); 1885 1886 /* 1887 * Until the pipe starts PIPEDSL reads will return a stale value, 1888 * which causes an apparent vblank timestamp jump when PIPEDSL 1889 * resets to its proper value. That also messes up the frame count 1890 * when it's derived from the timestamps. So let's wait for the 1891 * pipe to start properly before we call drm_crtc_vblank_on() 1892 */ 1893 if (dev->max_vblank_count == 0) 1894 intel_wait_for_pipe_scanline_moving(crtc); 1895 } 1896 1897 /** 1898 * intel_disable_pipe - disable a pipe, asserting requirements 1899 * @crtc: crtc whose pipes is to be disabled 1900 * 1901 * Disable the pipe of @crtc, making sure that various hardware 1902 * specific requirements are met, if applicable, e.g. plane 1903 * disabled, panel fitter off, etc. 1904 * 1905 * Will wait until the pipe has shut down before returning. 1906 */ 1907 static void intel_disable_pipe(struct intel_crtc *crtc) 1908 { 1909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1910 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1911 enum i915_pipe pipe = crtc->pipe; 1912 i915_reg_t reg; 1913 u32 val; 1914 1915 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1916 1917 /* 1918 * Make sure planes won't keep trying to pump pixels to us, 1919 * or we might hang the display. 1920 */ 1921 assert_planes_disabled(crtc); 1922 1923 reg = PIPECONF(cpu_transcoder); 1924 val = I915_READ(reg); 1925 if ((val & PIPECONF_ENABLE) == 0) 1926 return; 1927 1928 /* 1929 * Double wide has implications for planes 1930 * so best keep it disabled when not needed. 1931 */ 1932 if (crtc->config->double_wide) 1933 val &= ~PIPECONF_DOUBLE_WIDE; 1934 1935 /* Don't disable pipe or pipe PLLs if needed */ 1936 if (!IS_I830(dev_priv)) 1937 val &= ~PIPECONF_ENABLE; 1938 1939 I915_WRITE(reg, val); 1940 if ((val & PIPECONF_ENABLE) == 0) 1941 intel_wait_for_pipe_off(crtc); 1942 } 1943 1944 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1945 { 1946 return IS_GEN2(dev_priv) ? 2048 : 4096; 1947 } 1948 1949 static unsigned int 1950 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) 1951 { 1952 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1953 unsigned int cpp = fb->format->cpp[plane]; 1954 1955 switch (fb->modifier) { 1956 case DRM_FORMAT_MOD_LINEAR: 1957 return cpp; 1958 case I915_FORMAT_MOD_X_TILED: 1959 if (IS_GEN2(dev_priv)) 1960 return 128; 1961 else 1962 return 512; 1963 case I915_FORMAT_MOD_Y_TILED_CCS: 1964 if (plane == 1) 1965 return 128; 1966 /* fall through */ 1967 case I915_FORMAT_MOD_Y_TILED: 1968 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 1969 return 128; 1970 else 1971 return 512; 1972 case I915_FORMAT_MOD_Yf_TILED_CCS: 1973 if (plane == 1) 1974 return 128; 1975 /* fall through */ 1976 case I915_FORMAT_MOD_Yf_TILED: 1977 switch (cpp) { 1978 case 1: 1979 return 64; 1980 case 2: 1981 case 4: 1982 return 128; 1983 case 8: 1984 case 16: 1985 return 256; 1986 default: 1987 MISSING_CASE(cpp); 1988 return cpp; 1989 } 1990 break; 1991 default: 1992 MISSING_CASE(fb->modifier); 1993 return cpp; 1994 } 1995 } 1996 1997 static unsigned int 1998 intel_tile_height(const struct drm_framebuffer *fb, int plane) 1999 { 2000 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 2001 return 1; 2002 else 2003 return intel_tile_size(to_i915(fb->dev)) / 2004 intel_tile_width_bytes(fb, plane); 2005 } 2006 2007 /* Return the tile dimensions in pixel units */ 2008 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane, 2009 unsigned int *tile_width, 2010 unsigned int *tile_height) 2011 { 2012 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane); 2013 unsigned int cpp = fb->format->cpp[plane]; 2014 2015 *tile_width = tile_width_bytes / cpp; 2016 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 2017 } 2018 2019 unsigned int 2020 intel_fb_align_height(const struct drm_framebuffer *fb, 2021 int plane, unsigned int height) 2022 { 2023 unsigned int tile_height = intel_tile_height(fb, plane); 2024 2025 return ALIGN(height, tile_height); 2026 } 2027 2028 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2029 { 2030 unsigned int size = 0; 2031 int i; 2032 2033 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2034 size += rot_info->plane[i].width * rot_info->plane[i].height; 2035 2036 return size; 2037 } 2038 2039 static void 2040 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2041 const struct drm_framebuffer *fb, 2042 unsigned int rotation) 2043 { 2044 view->type = I915_GGTT_VIEW_NORMAL; 2045 if (drm_rotation_90_or_270(rotation)) { 2046 view->type = I915_GGTT_VIEW_ROTATED; 2047 view->rotated = to_intel_framebuffer(fb)->rot_info; 2048 } 2049 } 2050 2051 static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv) 2052 { 2053 if (IS_I830(dev_priv)) 2054 return 16 * 1024; 2055 else if (IS_I85X(dev_priv)) 2056 return 256; 2057 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 2058 return 32; 2059 else 2060 return 4 * 1024; 2061 } 2062 2063 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2064 { 2065 if (INTEL_INFO(dev_priv)->gen >= 9) 2066 return 256 * 1024; 2067 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2068 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2069 return 128 * 1024; 2070 else if (INTEL_INFO(dev_priv)->gen >= 4) 2071 return 4 * 1024; 2072 else 2073 return 0; 2074 } 2075 2076 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2077 int plane) 2078 { 2079 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2080 2081 /* AUX_DIST needs only 4K alignment */ 2082 if (plane == 1) 2083 return 4096; 2084 2085 switch (fb->modifier) { 2086 case DRM_FORMAT_MOD_LINEAR: 2087 return intel_linear_alignment(dev_priv); 2088 case I915_FORMAT_MOD_X_TILED: 2089 if (INTEL_GEN(dev_priv) >= 9) 2090 return 256 * 1024; 2091 return 0; 2092 case I915_FORMAT_MOD_Y_TILED_CCS: 2093 case I915_FORMAT_MOD_Yf_TILED_CCS: 2094 case I915_FORMAT_MOD_Y_TILED: 2095 case I915_FORMAT_MOD_Yf_TILED: 2096 return 1 * 1024 * 1024; 2097 default: 2098 MISSING_CASE(fb->modifier); 2099 return 0; 2100 } 2101 } 2102 2103 struct i915_vma * 2104 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2105 { 2106 struct drm_device *dev = fb->dev; 2107 struct drm_i915_private *dev_priv = to_i915(dev); 2108 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2109 struct i915_ggtt_view view; 2110 struct i915_vma *vma; 2111 u32 alignment; 2112 2113 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2114 2115 alignment = intel_surf_alignment(fb, 0); 2116 2117 intel_fill_fb_ggtt_view(&view, fb, rotation); 2118 2119 /* Note that the w/a also requires 64 PTE of padding following the 2120 * bo. We currently fill all unused PTE with the shadow page and so 2121 * we should always have valid PTE following the scanout preventing 2122 * the VT-d warning. 2123 */ 2124 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2125 alignment = 256 * 1024; 2126 2127 /* 2128 * Global gtt pte registers are special registers which actually forward 2129 * writes to a chunk of system memory. Which means that there is no risk 2130 * that the register values disappear as soon as we call 2131 * intel_runtime_pm_put(), so it is correct to wrap only the 2132 * pin/unpin/fence and not more. 2133 */ 2134 intel_runtime_pm_get(dev_priv); 2135 2136 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2137 2138 vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2139 if (IS_ERR(vma)) 2140 goto err; 2141 2142 if (i915_vma_is_map_and_fenceable(vma)) { 2143 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2144 * fence, whereas 965+ only requires a fence if using 2145 * framebuffer compression. For simplicity, we always, when 2146 * possible, install a fence as the cost is not that onerous. 2147 * 2148 * If we fail to fence the tiled scanout, then either the 2149 * modeset will reject the change (which is highly unlikely as 2150 * the affected systems, all but one, do not have unmappable 2151 * space) or we will not be able to enable full powersaving 2152 * techniques (also likely not to apply due to various limits 2153 * FBC and the like impose on the size of the buffer, which 2154 * presumably we violated anyway with this unmappable buffer). 2155 * Anyway, it is presumably better to stumble onwards with 2156 * something and try to run the system in a "less than optimal" 2157 * mode that matches the user configuration. 2158 */ 2159 i915_vma_pin_fence(vma); 2160 } 2161 2162 i915_vma_get(vma); 2163 err: 2164 atomic_dec(&dev_priv->gpu_error.pending_fb_pin); 2165 2166 intel_runtime_pm_put(dev_priv); 2167 return vma; 2168 } 2169 2170 void intel_unpin_fb_vma(struct i915_vma *vma) 2171 { 2172 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2173 2174 i915_vma_unpin_fence(vma); 2175 i915_gem_object_unpin_from_display_plane(vma); 2176 i915_vma_put(vma); 2177 } 2178 2179 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2180 unsigned int rotation) 2181 { 2182 if (drm_rotation_90_or_270(rotation)) 2183 return to_intel_framebuffer(fb)->rotated[plane].pitch; 2184 else 2185 return fb->pitches[plane]; 2186 } 2187 2188 /* 2189 * Convert the x/y offsets into a linear offset. 2190 * Only valid with 0/180 degree rotation, which is fine since linear 2191 * offset is only used with linear buffers on pre-hsw and tiled buffers 2192 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2193 */ 2194 u32 intel_fb_xy_to_linear(int x, int y, 2195 const struct intel_plane_state *state, 2196 int plane) 2197 { 2198 const struct drm_framebuffer *fb = state->base.fb; 2199 unsigned int cpp = fb->format->cpp[plane]; 2200 unsigned int pitch = fb->pitches[plane]; 2201 2202 return y * pitch + x * cpp; 2203 } 2204 2205 /* 2206 * Add the x/y offsets derived from fb->offsets[] to the user 2207 * specified plane src x/y offsets. The resulting x/y offsets 2208 * specify the start of scanout from the beginning of the gtt mapping. 2209 */ 2210 void intel_add_fb_offsets(int *x, int *y, 2211 const struct intel_plane_state *state, 2212 int plane) 2213 2214 { 2215 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); 2216 unsigned int rotation = state->base.rotation; 2217 2218 if (drm_rotation_90_or_270(rotation)) { 2219 *x += intel_fb->rotated[plane].x; 2220 *y += intel_fb->rotated[plane].y; 2221 } else { 2222 *x += intel_fb->normal[plane].x; 2223 *y += intel_fb->normal[plane].y; 2224 } 2225 } 2226 2227 static u32 __intel_adjust_tile_offset(int *x, int *y, 2228 unsigned int tile_width, 2229 unsigned int tile_height, 2230 unsigned int tile_size, 2231 unsigned int pitch_tiles, 2232 u32 old_offset, 2233 u32 new_offset) 2234 { 2235 unsigned int pitch_pixels = pitch_tiles * tile_width; 2236 unsigned int tiles; 2237 2238 WARN_ON(old_offset & (tile_size - 1)); 2239 WARN_ON(new_offset & (tile_size - 1)); 2240 WARN_ON(new_offset > old_offset); 2241 2242 tiles = (old_offset - new_offset) / tile_size; 2243 2244 *y += tiles / pitch_tiles * tile_height; 2245 *x += tiles % pitch_tiles * tile_width; 2246 2247 /* minimize x in case it got needlessly big */ 2248 *y += *x / pitch_pixels * tile_height; 2249 *x %= pitch_pixels; 2250 2251 return new_offset; 2252 } 2253 2254 static u32 _intel_adjust_tile_offset(int *x, int *y, 2255 const struct drm_framebuffer *fb, int plane, 2256 unsigned int rotation, 2257 u32 old_offset, u32 new_offset) 2258 { 2259 const struct drm_i915_private *dev_priv = to_i915(fb->dev); 2260 unsigned int cpp = fb->format->cpp[plane]; 2261 unsigned int pitch = intel_fb_pitch(fb, plane, rotation); 2262 2263 WARN_ON(new_offset > old_offset); 2264 2265 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2266 unsigned int tile_size, tile_width, tile_height; 2267 unsigned int pitch_tiles; 2268 2269 tile_size = intel_tile_size(dev_priv); 2270 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2271 2272 if (drm_rotation_90_or_270(rotation)) { 2273 pitch_tiles = pitch / tile_height; 2274 swap(tile_width, tile_height); 2275 } else { 2276 pitch_tiles = pitch / (tile_width * cpp); 2277 } 2278 2279 __intel_adjust_tile_offset(x, y, tile_width, tile_height, 2280 tile_size, pitch_tiles, 2281 old_offset, new_offset); 2282 } else { 2283 old_offset += *y * pitch + *x * cpp; 2284 2285 *y = (old_offset - new_offset) / pitch; 2286 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2287 } 2288 2289 return new_offset; 2290 } 2291 2292 /* 2293 * Adjust the tile offset by moving the difference into 2294 * the x/y offsets. 2295 */ 2296 static u32 intel_adjust_tile_offset(int *x, int *y, 2297 const struct intel_plane_state *state, int plane, 2298 u32 old_offset, u32 new_offset) 2299 { 2300 return _intel_adjust_tile_offset(x, y, state->base.fb, plane, 2301 state->base.rotation, 2302 old_offset, new_offset); 2303 } 2304 2305 /* 2306 * Computes the linear offset to the base tile and adjusts 2307 * x, y. bytes per pixel is assumed to be a power-of-two. 2308 * 2309 * In the 90/270 rotated case, x and y are assumed 2310 * to be already rotated to match the rotated GTT view, and 2311 * pitch is the tile_height aligned framebuffer height. 2312 * 2313 * This function is used when computing the derived information 2314 * under intel_framebuffer, so using any of that information 2315 * here is not allowed. Anything under drm_framebuffer can be 2316 * used. This is why the user has to pass in the pitch since it 2317 * is specified in the rotated orientation. 2318 */ 2319 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, 2320 int *x, int *y, 2321 const struct drm_framebuffer *fb, int plane, 2322 unsigned int pitch, 2323 unsigned int rotation, 2324 u32 alignment) 2325 { 2326 uint64_t fb_modifier = fb->modifier; 2327 unsigned int cpp = fb->format->cpp[plane]; 2328 u32 offset, offset_aligned; 2329 2330 if (alignment) 2331 alignment--; 2332 2333 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2334 unsigned int tile_size, tile_width, tile_height; 2335 unsigned int tile_rows, tiles, pitch_tiles; 2336 2337 tile_size = intel_tile_size(dev_priv); 2338 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2339 2340 if (drm_rotation_90_or_270(rotation)) { 2341 pitch_tiles = pitch / tile_height; 2342 swap(tile_width, tile_height); 2343 } else { 2344 pitch_tiles = pitch / (tile_width * cpp); 2345 } 2346 2347 tile_rows = *y / tile_height; 2348 *y %= tile_height; 2349 2350 tiles = *x / tile_width; 2351 *x %= tile_width; 2352 2353 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2354 offset_aligned = offset & ~alignment; 2355 2356 __intel_adjust_tile_offset(x, y, tile_width, tile_height, 2357 tile_size, pitch_tiles, 2358 offset, offset_aligned); 2359 } else { 2360 offset = *y * pitch + *x * cpp; 2361 offset_aligned = offset & ~alignment; 2362 2363 *y = (offset & alignment) / pitch; 2364 *x = ((offset & alignment) - *y * pitch) / cpp; 2365 } 2366 2367 return offset_aligned; 2368 } 2369 2370 u32 intel_compute_tile_offset(int *x, int *y, 2371 const struct intel_plane_state *state, 2372 int plane) 2373 { 2374 struct intel_plane *intel_plane = to_intel_plane(state->base.plane); 2375 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); 2376 const struct drm_framebuffer *fb = state->base.fb; 2377 unsigned int rotation = state->base.rotation; 2378 int pitch = intel_fb_pitch(fb, plane, rotation); 2379 u32 alignment; 2380 2381 if (intel_plane->id == PLANE_CURSOR) 2382 alignment = intel_cursor_alignment(dev_priv); 2383 else 2384 alignment = intel_surf_alignment(fb, plane); 2385 2386 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, 2387 rotation, alignment); 2388 } 2389 2390 /* Convert the fb->offset[] into x/y offsets */ 2391 static int intel_fb_offset_to_xy(int *x, int *y, 2392 const struct drm_framebuffer *fb, int plane) 2393 { 2394 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2395 2396 if (fb->modifier != DRM_FORMAT_MOD_LINEAR && 2397 fb->offsets[plane] % intel_tile_size(dev_priv)) 2398 return -EINVAL; 2399 2400 *x = 0; 2401 *y = 0; 2402 2403 _intel_adjust_tile_offset(x, y, 2404 fb, plane, DRM_MODE_ROTATE_0, 2405 fb->offsets[plane], 0); 2406 2407 return 0; 2408 } 2409 2410 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) 2411 { 2412 switch (fb_modifier) { 2413 case I915_FORMAT_MOD_X_TILED: 2414 return I915_TILING_X; 2415 case I915_FORMAT_MOD_Y_TILED: 2416 case I915_FORMAT_MOD_Y_TILED_CCS: 2417 return I915_TILING_Y; 2418 default: 2419 return I915_TILING_NONE; 2420 } 2421 } 2422 2423 static const struct drm_format_info ccs_formats[] = { 2424 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2425 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2426 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2427 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, }, 2428 }; 2429 2430 static const struct drm_format_info * 2431 lookup_format_info(const struct drm_format_info formats[], 2432 int num_formats, u32 format) 2433 { 2434 int i; 2435 2436 for (i = 0; i < num_formats; i++) { 2437 if (formats[i].format == format) 2438 return &formats[i]; 2439 } 2440 2441 return NULL; 2442 } 2443 2444 static const struct drm_format_info * 2445 intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd) 2446 { 2447 switch (cmd->modifier[0]) { 2448 case I915_FORMAT_MOD_Y_TILED_CCS: 2449 case I915_FORMAT_MOD_Yf_TILED_CCS: 2450 return lookup_format_info(ccs_formats, 2451 ARRAY_SIZE(ccs_formats), 2452 cmd->pixel_format); 2453 default: 2454 return NULL; 2455 } 2456 } 2457 2458 static int 2459 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2460 struct drm_framebuffer *fb) 2461 { 2462 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2463 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2464 u32 gtt_offset_rotated = 0; 2465 unsigned int max_size = 0; 2466 int i, num_planes = fb->format->num_planes; 2467 unsigned int tile_size = intel_tile_size(dev_priv); 2468 2469 for (i = 0; i < num_planes; i++) { 2470 unsigned int width, height; 2471 unsigned int cpp, size; 2472 u32 offset; 2473 int x, y; 2474 int ret; 2475 2476 cpp = fb->format->cpp[i]; 2477 width = drm_framebuffer_plane_width(fb->width, fb, i); 2478 height = drm_framebuffer_plane_height(fb->height, fb, i); 2479 2480 ret = intel_fb_offset_to_xy(&x, &y, fb, i); 2481 if (ret) { 2482 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2483 i, fb->offsets[i]); 2484 return ret; 2485 } 2486 2487 if ((fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 2488 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) && i == 1) { 2489 int hsub = fb->format->hsub; 2490 int vsub = fb->format->vsub; 2491 int tile_width, tile_height; 2492 int main_x, main_y; 2493 int ccs_x, ccs_y; 2494 2495 intel_tile_dims(fb, i, &tile_width, &tile_height); 2496 tile_width *= hsub; 2497 tile_height *= vsub; 2498 2499 ccs_x = (x * hsub) % tile_width; 2500 ccs_y = (y * vsub) % tile_height; 2501 main_x = intel_fb->normal[0].x % tile_width; 2502 main_y = intel_fb->normal[0].y % tile_height; 2503 2504 /* 2505 * CCS doesn't have its own x/y offset register, so the intra CCS tile 2506 * x/y offsets must match between CCS and the main surface. 2507 */ 2508 if (main_x != ccs_x || main_y != ccs_y) { 2509 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n", 2510 main_x, main_y, 2511 ccs_x, ccs_y, 2512 intel_fb->normal[0].x, 2513 intel_fb->normal[0].y, 2514 x, y); 2515 return -EINVAL; 2516 } 2517 } 2518 2519 /* 2520 * The fence (if used) is aligned to the start of the object 2521 * so having the framebuffer wrap around across the edge of the 2522 * fenced region doesn't really work. We have no API to configure 2523 * the fence start offset within the object (nor could we probably 2524 * on gen2/3). So it's just easier if we just require that the 2525 * fb layout agrees with the fence layout. We already check that the 2526 * fb stride matches the fence stride elsewhere. 2527 */ 2528 if (i == 0 && i915_gem_object_is_tiled(intel_fb->obj) && 2529 (x + width) * cpp > fb->pitches[i]) { 2530 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2531 i, fb->offsets[i]); 2532 return -EINVAL; 2533 } 2534 2535 /* 2536 * First pixel of the framebuffer from 2537 * the start of the normal gtt mapping. 2538 */ 2539 intel_fb->normal[i].x = x; 2540 intel_fb->normal[i].y = y; 2541 2542 offset = _intel_compute_tile_offset(dev_priv, &x, &y, 2543 fb, i, fb->pitches[i], 2544 DRM_MODE_ROTATE_0, tile_size); 2545 offset /= tile_size; 2546 2547 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2548 unsigned int tile_width, tile_height; 2549 unsigned int pitch_tiles; 2550 struct drm_rect r; 2551 2552 intel_tile_dims(fb, i, &tile_width, &tile_height); 2553 2554 rot_info->plane[i].offset = offset; 2555 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2556 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2557 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2558 2559 intel_fb->rotated[i].pitch = 2560 rot_info->plane[i].height * tile_height; 2561 2562 /* how many tiles does this plane need */ 2563 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2564 /* 2565 * If the plane isn't horizontally tile aligned, 2566 * we need one more tile. 2567 */ 2568 if (x != 0) 2569 size++; 2570 2571 /* rotate the x/y offsets to match the GTT view */ 2572 r.x1 = x; 2573 r.y1 = y; 2574 r.x2 = x + width; 2575 r.y2 = y + height; 2576 drm_rect_rotate(&r, 2577 rot_info->plane[i].width * tile_width, 2578 rot_info->plane[i].height * tile_height, 2579 DRM_MODE_ROTATE_270); 2580 x = r.x1; 2581 y = r.y1; 2582 2583 /* rotate the tile dimensions to match the GTT view */ 2584 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2585 swap(tile_width, tile_height); 2586 2587 /* 2588 * We only keep the x/y offsets, so push all of the 2589 * gtt offset into the x/y offsets. 2590 */ 2591 __intel_adjust_tile_offset(&x, &y, 2592 tile_width, tile_height, 2593 tile_size, pitch_tiles, 2594 gtt_offset_rotated * tile_size, 0); 2595 2596 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2597 2598 /* 2599 * First pixel of the framebuffer from 2600 * the start of the rotated gtt mapping. 2601 */ 2602 intel_fb->rotated[i].x = x; 2603 intel_fb->rotated[i].y = y; 2604 } else { 2605 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2606 x * cpp, tile_size); 2607 } 2608 2609 /* how many tiles in total needed in the bo */ 2610 max_size = max(max_size, offset + size); 2611 } 2612 2613 if (max_size * tile_size > intel_fb->obj->base.size) { 2614 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", 2615 max_size * tile_size, intel_fb->obj->base.size); 2616 return -EINVAL; 2617 } 2618 2619 return 0; 2620 } 2621 2622 static int i9xx_format_to_fourcc(int format) 2623 { 2624 switch (format) { 2625 case DISPPLANE_8BPP: 2626 return DRM_FORMAT_C8; 2627 case DISPPLANE_BGRX555: 2628 return DRM_FORMAT_XRGB1555; 2629 case DISPPLANE_BGRX565: 2630 return DRM_FORMAT_RGB565; 2631 default: 2632 case DISPPLANE_BGRX888: 2633 return DRM_FORMAT_XRGB8888; 2634 case DISPPLANE_RGBX888: 2635 return DRM_FORMAT_XBGR8888; 2636 case DISPPLANE_BGRX101010: 2637 return DRM_FORMAT_XRGB2101010; 2638 case DISPPLANE_RGBX101010: 2639 return DRM_FORMAT_XBGR2101010; 2640 } 2641 } 2642 2643 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2644 { 2645 switch (format) { 2646 case PLANE_CTL_FORMAT_RGB_565: 2647 return DRM_FORMAT_RGB565; 2648 default: 2649 case PLANE_CTL_FORMAT_XRGB_8888: 2650 if (rgb_order) { 2651 if (alpha) 2652 return DRM_FORMAT_ABGR8888; 2653 else 2654 return DRM_FORMAT_XBGR8888; 2655 } else { 2656 if (alpha) 2657 return DRM_FORMAT_ARGB8888; 2658 else 2659 return DRM_FORMAT_XRGB8888; 2660 } 2661 case PLANE_CTL_FORMAT_XRGB_2101010: 2662 if (rgb_order) 2663 return DRM_FORMAT_XBGR2101010; 2664 else 2665 return DRM_FORMAT_XRGB2101010; 2666 } 2667 } 2668 2669 static bool 2670 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2671 struct intel_initial_plane_config *plane_config) 2672 { 2673 struct drm_device *dev = crtc->base.dev; 2674 struct drm_i915_private *dev_priv = to_i915(dev); 2675 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2676 struct drm_i915_gem_object *obj = NULL; 2677 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2678 struct drm_framebuffer *fb = &plane_config->fb->base; 2679 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2680 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2681 PAGE_SIZE); 2682 2683 size_aligned -= base_aligned; 2684 2685 if (plane_config->size == 0) 2686 return false; 2687 2688 /* If the FB is too big, just don't use it since fbdev is not very 2689 * important and we should probably use that space with FBC or other 2690 * features. */ 2691 if (size_aligned * 2 > ggtt->stolen_usable_size) 2692 return false; 2693 2694 mutex_lock(&dev->struct_mutex); 2695 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 2696 base_aligned, 2697 base_aligned, 2698 size_aligned); 2699 mutex_unlock(&dev->struct_mutex); 2700 if (!obj) 2701 return false; 2702 2703 if (plane_config->tiling == I915_TILING_X) 2704 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; 2705 2706 mode_cmd.pixel_format = fb->format->format; 2707 mode_cmd.width = fb->width; 2708 mode_cmd.height = fb->height; 2709 mode_cmd.pitches[0] = fb->pitches[0]; 2710 mode_cmd.modifier[0] = fb->modifier; 2711 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2712 2713 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 2714 DRM_DEBUG_KMS("intel fb init failed\n"); 2715 goto out_unref_obj; 2716 } 2717 2718 2719 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2720 return true; 2721 2722 out_unref_obj: 2723 i915_gem_object_put(obj); 2724 return false; 2725 } 2726 2727 static void 2728 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 2729 struct intel_plane_state *plane_state, 2730 bool visible) 2731 { 2732 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2733 2734 plane_state->base.visible = visible; 2735 2736 /* FIXME pre-g4x don't work like this */ 2737 if (visible) { 2738 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base)); 2739 crtc_state->active_planes |= BIT(plane->id); 2740 } else { 2741 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base)); 2742 crtc_state->active_planes &= ~BIT(plane->id); 2743 } 2744 2745 DRM_DEBUG_KMS("%s active planes 0x%x\n", 2746 crtc_state->base.crtc->name, 2747 crtc_state->active_planes); 2748 } 2749 2750 static void intel_plane_disable_noatomic(struct intel_crtc *crtc, 2751 struct intel_plane *plane) 2752 { 2753 struct intel_crtc_state *crtc_state = 2754 to_intel_crtc_state(crtc->base.state); 2755 struct intel_plane_state *plane_state = 2756 to_intel_plane_state(plane->base.state); 2757 2758 intel_set_plane_visible(crtc_state, plane_state, false); 2759 2760 if (plane->id == PLANE_PRIMARY) 2761 intel_pre_disable_primary_noatomic(&crtc->base); 2762 2763 trace_intel_disable_plane(&plane->base, crtc); 2764 plane->disable_plane(plane, crtc); 2765 } 2766 2767 static void 2768 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2769 struct intel_initial_plane_config *plane_config) 2770 { 2771 struct drm_device *dev = intel_crtc->base.dev; 2772 struct drm_i915_private *dev_priv = to_i915(dev); 2773 struct drm_crtc *c; 2774 struct drm_i915_gem_object *obj; 2775 struct drm_plane *primary = intel_crtc->base.primary; 2776 struct drm_plane_state *plane_state = primary->state; 2777 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2778 struct intel_plane *intel_plane = to_intel_plane(primary); 2779 struct intel_plane_state *intel_state = 2780 to_intel_plane_state(plane_state); 2781 struct drm_framebuffer *fb; 2782 2783 if (!plane_config->fb) 2784 return; 2785 2786 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2787 fb = &plane_config->fb->base; 2788 goto valid_fb; 2789 } 2790 2791 kfree(plane_config->fb); 2792 2793 /* 2794 * Failed to alloc the obj, check to see if we should share 2795 * an fb with another CRTC instead 2796 */ 2797 for_each_crtc(dev, c) { 2798 struct intel_plane_state *state; 2799 2800 if (c == &intel_crtc->base) 2801 continue; 2802 2803 if (!to_intel_crtc(c)->active) 2804 continue; 2805 2806 state = to_intel_plane_state(c->primary->state); 2807 if (!state->vma) 2808 continue; 2809 2810 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2811 fb = c->primary->fb; 2812 drm_framebuffer_get(fb); 2813 goto valid_fb; 2814 } 2815 } 2816 2817 /* 2818 * We've failed to reconstruct the BIOS FB. Current display state 2819 * indicates that the primary plane is visible, but has a NULL FB, 2820 * which will lead to problems later if we don't fix it up. The 2821 * simplest solution is to just disable the primary plane now and 2822 * pretend the BIOS never had it enabled. 2823 */ 2824 intel_plane_disable_noatomic(intel_crtc, intel_plane); 2825 2826 return; 2827 2828 valid_fb: 2829 mutex_lock(&dev->struct_mutex); 2830 intel_state->vma = 2831 intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2832 mutex_unlock(&dev->struct_mutex); 2833 if (IS_ERR(intel_state->vma)) { 2834 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 2835 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 2836 2837 intel_state->vma = NULL; 2838 drm_framebuffer_put(fb); 2839 return; 2840 } 2841 2842 plane_state->src_x = 0; 2843 plane_state->src_y = 0; 2844 plane_state->src_w = fb->width << 16; 2845 plane_state->src_h = fb->height << 16; 2846 2847 plane_state->crtc_x = 0; 2848 plane_state->crtc_y = 0; 2849 plane_state->crtc_w = fb->width; 2850 plane_state->crtc_h = fb->height; 2851 2852 intel_state->base.src = drm_plane_state_src(plane_state); 2853 intel_state->base.dst = drm_plane_state_dest(plane_state); 2854 2855 obj = intel_fb_obj(fb); 2856 if (i915_gem_object_is_tiled(obj)) 2857 dev_priv->preserve_bios_swizzle = true; 2858 2859 drm_framebuffer_get(fb); 2860 primary->fb = primary->state->fb = fb; 2861 primary->crtc = primary->state->crtc = &intel_crtc->base; 2862 2863 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2864 to_intel_plane_state(plane_state), 2865 true); 2866 2867 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2868 &obj->frontbuffer_bits); 2869 } 2870 2871 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, 2872 unsigned int rotation) 2873 { 2874 int cpp = fb->format->cpp[plane]; 2875 2876 switch (fb->modifier) { 2877 case DRM_FORMAT_MOD_LINEAR: 2878 case I915_FORMAT_MOD_X_TILED: 2879 switch (cpp) { 2880 case 8: 2881 return 4096; 2882 case 4: 2883 case 2: 2884 case 1: 2885 return 8192; 2886 default: 2887 MISSING_CASE(cpp); 2888 break; 2889 } 2890 break; 2891 case I915_FORMAT_MOD_Y_TILED_CCS: 2892 case I915_FORMAT_MOD_Yf_TILED_CCS: 2893 /* FIXME AUX plane? */ 2894 case I915_FORMAT_MOD_Y_TILED: 2895 case I915_FORMAT_MOD_Yf_TILED: 2896 switch (cpp) { 2897 case 8: 2898 return 2048; 2899 case 4: 2900 return 4096; 2901 case 2: 2902 case 1: 2903 return 8192; 2904 default: 2905 MISSING_CASE(cpp); 2906 break; 2907 } 2908 break; 2909 default: 2910 MISSING_CASE(fb->modifier); 2911 } 2912 2913 return 2048; 2914 } 2915 2916 static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, 2917 int main_x, int main_y, u32 main_offset) 2918 { 2919 const struct drm_framebuffer *fb = plane_state->base.fb; 2920 int hsub = fb->format->hsub; 2921 int vsub = fb->format->vsub; 2922 int aux_x = plane_state->aux.x; 2923 int aux_y = plane_state->aux.y; 2924 u32 aux_offset = plane_state->aux.offset; 2925 u32 alignment = intel_surf_alignment(fb, 1); 2926 2927 while (aux_offset >= main_offset && aux_y <= main_y) { 2928 int x, y; 2929 2930 if (aux_x == main_x && aux_y == main_y) 2931 break; 2932 2933 if (aux_offset == 0) 2934 break; 2935 2936 x = aux_x / hsub; 2937 y = aux_y / vsub; 2938 aux_offset = intel_adjust_tile_offset(&x, &y, plane_state, 1, 2939 aux_offset, aux_offset - alignment); 2940 aux_x = x * hsub + aux_x % hsub; 2941 aux_y = y * vsub + aux_y % vsub; 2942 } 2943 2944 if (aux_x != main_x || aux_y != main_y) 2945 return false; 2946 2947 plane_state->aux.offset = aux_offset; 2948 plane_state->aux.x = aux_x; 2949 plane_state->aux.y = aux_y; 2950 2951 return true; 2952 } 2953 2954 static int skl_check_main_surface(struct intel_plane_state *plane_state) 2955 { 2956 const struct drm_framebuffer *fb = plane_state->base.fb; 2957 unsigned int rotation = plane_state->base.rotation; 2958 int x = plane_state->base.src.x1 >> 16; 2959 int y = plane_state->base.src.y1 >> 16; 2960 int w = drm_rect_width(&plane_state->base.src) >> 16; 2961 int h = drm_rect_height(&plane_state->base.src) >> 16; 2962 int max_width = skl_max_plane_width(fb, 0, rotation); 2963 int max_height = 4096; 2964 u32 alignment, offset, aux_offset = plane_state->aux.offset; 2965 2966 if (w > max_width || h > max_height) { 2967 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 2968 w, h, max_width, max_height); 2969 return -EINVAL; 2970 } 2971 2972 intel_add_fb_offsets(&x, &y, plane_state, 0); 2973 offset = intel_compute_tile_offset(&x, &y, plane_state, 0); 2974 alignment = intel_surf_alignment(fb, 0); 2975 2976 /* 2977 * AUX surface offset is specified as the distance from the 2978 * main surface offset, and it must be non-negative. Make 2979 * sure that is what we will get. 2980 */ 2981 if (offset > aux_offset) 2982 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2983 offset, aux_offset & ~(alignment - 1)); 2984 2985 /* 2986 * When using an X-tiled surface, the plane blows up 2987 * if the x offset + width exceed the stride. 2988 * 2989 * TODO: linear and Y-tiled seem fine, Yf untested, 2990 */ 2991 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 2992 int cpp = fb->format->cpp[0]; 2993 2994 while ((x + w) * cpp > fb->pitches[0]) { 2995 if (offset == 0) { 2996 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n"); 2997 return -EINVAL; 2998 } 2999 3000 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 3001 offset, offset - alignment); 3002 } 3003 } 3004 3005 /* 3006 * CCS AUX surface doesn't have its own x/y offsets, we must make sure 3007 * they match with the main surface x/y offsets. 3008 */ 3009 if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 3010 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { 3011 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) { 3012 if (offset == 0) 3013 break; 3014 3015 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 3016 offset, offset - alignment); 3017 } 3018 3019 if (x != plane_state->aux.x || y != plane_state->aux.y) { 3020 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n"); 3021 return -EINVAL; 3022 } 3023 } 3024 3025 plane_state->main.offset = offset; 3026 plane_state->main.x = x; 3027 plane_state->main.y = y; 3028 3029 return 0; 3030 } 3031 3032 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 3033 { 3034 const struct drm_framebuffer *fb = plane_state->base.fb; 3035 unsigned int rotation = plane_state->base.rotation; 3036 int max_width = skl_max_plane_width(fb, 1, rotation); 3037 int max_height = 4096; 3038 int x = plane_state->base.src.x1 >> 17; 3039 int y = plane_state->base.src.y1 >> 17; 3040 int w = drm_rect_width(&plane_state->base.src) >> 17; 3041 int h = drm_rect_height(&plane_state->base.src) >> 17; 3042 u32 offset; 3043 3044 intel_add_fb_offsets(&x, &y, plane_state, 1); 3045 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 3046 3047 /* FIXME not quite sure how/if these apply to the chroma plane */ 3048 if (w > max_width || h > max_height) { 3049 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 3050 w, h, max_width, max_height); 3051 return -EINVAL; 3052 } 3053 3054 plane_state->aux.offset = offset; 3055 plane_state->aux.x = x; 3056 plane_state->aux.y = y; 3057 3058 return 0; 3059 } 3060 3061 static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state) 3062 { 3063 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 3064 struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); 3065 const struct drm_framebuffer *fb = plane_state->base.fb; 3066 int src_x = plane_state->base.src.x1 >> 16; 3067 int src_y = plane_state->base.src.y1 >> 16; 3068 int hsub = fb->format->hsub; 3069 int vsub = fb->format->vsub; 3070 int x = src_x / hsub; 3071 int y = src_y / vsub; 3072 u32 offset; 3073 3074 switch (plane->id) { 3075 case PLANE_PRIMARY: 3076 case PLANE_SPRITE0: 3077 break; 3078 default: 3079 DRM_DEBUG_KMS("RC support only on plane 1 and 2\n"); 3080 return -EINVAL; 3081 } 3082 3083 if (crtc->pipe == PIPE_C) { 3084 DRM_DEBUG_KMS("No RC support on pipe C\n"); 3085 return -EINVAL; 3086 } 3087 3088 if (plane_state->base.rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180)) { 3089 DRM_DEBUG_KMS("RC support only with 0/180 degree rotation %x\n", 3090 plane_state->base.rotation); 3091 return -EINVAL; 3092 } 3093 3094 intel_add_fb_offsets(&x, &y, plane_state, 1); 3095 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 3096 3097 plane_state->aux.offset = offset; 3098 plane_state->aux.x = x * hsub + src_x % hsub; 3099 plane_state->aux.y = y * vsub + src_y % vsub; 3100 3101 return 0; 3102 } 3103 3104 int skl_check_plane_surface(struct intel_plane_state *plane_state) 3105 { 3106 const struct drm_framebuffer *fb = plane_state->base.fb; 3107 unsigned int rotation = plane_state->base.rotation; 3108 int ret; 3109 3110 if (!plane_state->base.visible) 3111 return 0; 3112 3113 /* Rotate src coordinates to match rotated GTT view */ 3114 if (drm_rotation_90_or_270(rotation)) 3115 drm_rect_rotate(&plane_state->base.src, 3116 fb->width << 16, fb->height << 16, 3117 DRM_MODE_ROTATE_270); 3118 3119 /* 3120 * Handle the AUX surface first since 3121 * the main surface setup depends on it. 3122 */ 3123 if (fb->format->format == DRM_FORMAT_NV12) { 3124 ret = skl_check_nv12_aux_surface(plane_state); 3125 if (ret) 3126 return ret; 3127 } else if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 3128 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS) { 3129 ret = skl_check_ccs_aux_surface(plane_state); 3130 if (ret) 3131 return ret; 3132 } else { 3133 plane_state->aux.offset = ~0xfff; 3134 plane_state->aux.x = 0; 3135 plane_state->aux.y = 0; 3136 } 3137 3138 ret = skl_check_main_surface(plane_state); 3139 if (ret) 3140 return ret; 3141 3142 return 0; 3143 } 3144 3145 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 3146 const struct intel_plane_state *plane_state) 3147 { 3148 struct drm_i915_private *dev_priv = 3149 to_i915(plane_state->base.plane->dev); 3150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 3151 const struct drm_framebuffer *fb = plane_state->base.fb; 3152 unsigned int rotation = plane_state->base.rotation; 3153 u32 dspcntr; 3154 3155 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; 3156 3157 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || 3158 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 3159 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 3160 3161 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3162 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3163 3164 if (INTEL_GEN(dev_priv) < 4) 3165 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3166 3167 switch (fb->format->format) { 3168 case DRM_FORMAT_C8: 3169 dspcntr |= DISPPLANE_8BPP; 3170 break; 3171 case DRM_FORMAT_XRGB1555: 3172 dspcntr |= DISPPLANE_BGRX555; 3173 break; 3174 case DRM_FORMAT_RGB565: 3175 dspcntr |= DISPPLANE_BGRX565; 3176 break; 3177 case DRM_FORMAT_XRGB8888: 3178 dspcntr |= DISPPLANE_BGRX888; 3179 break; 3180 case DRM_FORMAT_XBGR8888: 3181 dspcntr |= DISPPLANE_RGBX888; 3182 break; 3183 case DRM_FORMAT_XRGB2101010: 3184 dspcntr |= DISPPLANE_BGRX101010; 3185 break; 3186 case DRM_FORMAT_XBGR2101010: 3187 dspcntr |= DISPPLANE_RGBX101010; 3188 break; 3189 default: 3190 MISSING_CASE(fb->format->format); 3191 return 0; 3192 } 3193 3194 if (INTEL_GEN(dev_priv) >= 4 && 3195 fb->modifier == I915_FORMAT_MOD_X_TILED) 3196 dspcntr |= DISPPLANE_TILED; 3197 3198 if (rotation & DRM_MODE_ROTATE_180) 3199 dspcntr |= DISPPLANE_ROTATE_180; 3200 3201 if (rotation & DRM_MODE_REFLECT_X) 3202 dspcntr |= DISPPLANE_MIRROR; 3203 3204 return dspcntr; 3205 } 3206 3207 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3208 { 3209 struct drm_i915_private *dev_priv = 3210 to_i915(plane_state->base.plane->dev); 3211 int src_x = plane_state->base.src.x1 >> 16; 3212 int src_y = plane_state->base.src.y1 >> 16; 3213 u32 offset; 3214 3215 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3216 3217 if (INTEL_GEN(dev_priv) >= 4) 3218 offset = intel_compute_tile_offset(&src_x, &src_y, 3219 plane_state, 0); 3220 else 3221 offset = 0; 3222 3223 /* HSW/BDW do this automagically in hardware */ 3224 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3225 unsigned int rotation = plane_state->base.rotation; 3226 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3227 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3228 3229 if (rotation & DRM_MODE_ROTATE_180) { 3230 src_x += src_w - 1; 3231 src_y += src_h - 1; 3232 } else if (rotation & DRM_MODE_REFLECT_X) { 3233 src_x += src_w - 1; 3234 } 3235 } 3236 3237 plane_state->main.offset = offset; 3238 plane_state->main.x = src_x; 3239 plane_state->main.y = src_y; 3240 3241 return 0; 3242 } 3243 3244 static void i9xx_update_primary_plane(struct intel_plane *primary, 3245 const struct intel_crtc_state *crtc_state, 3246 const struct intel_plane_state *plane_state) 3247 { 3248 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3249 const struct drm_framebuffer *fb = plane_state->base.fb; 3250 enum plane plane = primary->plane; 3251 u32 linear_offset; 3252 u32 dspcntr = plane_state->ctl; 3253 i915_reg_t reg = DSPCNTR(plane); 3254 int x = plane_state->main.x; 3255 int y = plane_state->main.y; 3256 unsigned long irqflags; 3257 u32 dspaddr_offset; 3258 3259 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3260 3261 if (INTEL_GEN(dev_priv) >= 4) 3262 dspaddr_offset = plane_state->main.offset; 3263 else 3264 dspaddr_offset = linear_offset; 3265 3266 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3267 3268 if (INTEL_GEN(dev_priv) < 4) { 3269 /* pipesrc and dspsize control the size that is scaled from, 3270 * which should always be the user's requested size. 3271 */ 3272 I915_WRITE_FW(DSPSIZE(plane), 3273 ((crtc_state->pipe_src_h - 1) << 16) | 3274 (crtc_state->pipe_src_w - 1)); 3275 I915_WRITE_FW(DSPPOS(plane), 0); 3276 } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) { 3277 I915_WRITE_FW(PRIMSIZE(plane), 3278 ((crtc_state->pipe_src_h - 1) << 16) | 3279 (crtc_state->pipe_src_w - 1)); 3280 I915_WRITE_FW(PRIMPOS(plane), 0); 3281 I915_WRITE_FW(PRIMCNSTALPHA(plane), 0); 3282 } 3283 3284 I915_WRITE_FW(reg, dspcntr); 3285 3286 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); 3287 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3288 I915_WRITE_FW(DSPSURF(plane), 3289 intel_plane_ggtt_offset(plane_state) + 3290 dspaddr_offset); 3291 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); 3292 } else if (INTEL_GEN(dev_priv) >= 4) { 3293 I915_WRITE_FW(DSPSURF(plane), 3294 intel_plane_ggtt_offset(plane_state) + 3295 dspaddr_offset); 3296 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); 3297 I915_WRITE_FW(DSPLINOFF(plane), linear_offset); 3298 } else { 3299 I915_WRITE_FW(DSPADDR(plane), 3300 intel_plane_ggtt_offset(plane_state) + 3301 dspaddr_offset); 3302 } 3303 POSTING_READ_FW(reg); 3304 3305 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3306 } 3307 3308 static void i9xx_disable_primary_plane(struct intel_plane *primary, 3309 struct intel_crtc *crtc) 3310 { 3311 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3312 enum plane plane = primary->plane; 3313 unsigned long irqflags; 3314 3315 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3316 3317 I915_WRITE_FW(DSPCNTR(plane), 0); 3318 if (INTEL_INFO(dev_priv)->gen >= 4) 3319 I915_WRITE_FW(DSPSURF(plane), 0); 3320 else 3321 I915_WRITE_FW(DSPADDR(plane), 0); 3322 POSTING_READ_FW(DSPCNTR(plane)); 3323 3324 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3325 } 3326 3327 static bool i9xx_plane_get_hw_state(struct intel_plane *primary) 3328 { 3329 3330 struct drm_i915_private *dev_priv = to_i915(primary->base.dev); 3331 enum intel_display_power_domain power_domain; 3332 enum plane plane = primary->plane; 3333 enum i915_pipe pipe = primary->pipe; 3334 bool ret; 3335 3336 /* 3337 * Not 100% correct for planes that can move between pipes, 3338 * but that's only the case for gen2-4 which don't have any 3339 * display power wells. 3340 */ 3341 power_domain = POWER_DOMAIN_PIPE(pipe); 3342 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 3343 return false; 3344 3345 ret = I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE; 3346 3347 intel_display_power_put(dev_priv, power_domain); 3348 3349 return ret; 3350 } 3351 3352 static u32 3353 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3354 { 3355 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3356 return 64; 3357 else 3358 return intel_tile_width_bytes(fb, plane); 3359 } 3360 3361 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3362 { 3363 struct drm_device *dev = intel_crtc->base.dev; 3364 struct drm_i915_private *dev_priv = to_i915(dev); 3365 3366 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3367 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3368 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3369 } 3370 3371 /* 3372 * This function detaches (aka. unbinds) unused scalers in hardware 3373 */ 3374 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3375 { 3376 struct intel_crtc_scaler_state *scaler_state; 3377 int i; 3378 3379 scaler_state = &intel_crtc->config->scaler_state; 3380 3381 /* loop through and disable scalers that aren't in use */ 3382 for (i = 0; i < intel_crtc->num_scalers; i++) { 3383 if (!scaler_state->scalers[i].in_use) 3384 skl_detach_scaler(intel_crtc, i); 3385 } 3386 } 3387 3388 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 3389 unsigned int rotation) 3390 { 3391 u32 stride; 3392 3393 if (plane >= fb->format->num_planes) 3394 return 0; 3395 3396 stride = intel_fb_pitch(fb, plane, rotation); 3397 3398 /* 3399 * The stride is either expressed as a multiple of 64 bytes chunks for 3400 * linear buffers or in number of tiles for tiled buffers. 3401 */ 3402 if (drm_rotation_90_or_270(rotation)) 3403 stride /= intel_tile_height(fb, plane); 3404 else 3405 stride /= intel_fb_stride_alignment(fb, plane); 3406 3407 return stride; 3408 } 3409 3410 static u32 skl_plane_ctl_format(uint32_t pixel_format) 3411 { 3412 switch (pixel_format) { 3413 case DRM_FORMAT_C8: 3414 return PLANE_CTL_FORMAT_INDEXED; 3415 case DRM_FORMAT_RGB565: 3416 return PLANE_CTL_FORMAT_RGB_565; 3417 case DRM_FORMAT_XBGR8888: 3418 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3419 case DRM_FORMAT_XRGB8888: 3420 return PLANE_CTL_FORMAT_XRGB_8888; 3421 /* 3422 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3423 * to be already pre-multiplied. We need to add a knob (or a different 3424 * DRM_FORMAT) for user-space to configure that. 3425 */ 3426 case DRM_FORMAT_ABGR8888: 3427 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3428 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3429 case DRM_FORMAT_ARGB8888: 3430 return PLANE_CTL_FORMAT_XRGB_8888 | 3431 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3432 case DRM_FORMAT_XRGB2101010: 3433 return PLANE_CTL_FORMAT_XRGB_2101010; 3434 case DRM_FORMAT_XBGR2101010: 3435 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3436 case DRM_FORMAT_YUYV: 3437 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3438 case DRM_FORMAT_YVYU: 3439 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3440 case DRM_FORMAT_UYVY: 3441 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3442 case DRM_FORMAT_VYUY: 3443 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3444 default: 3445 MISSING_CASE(pixel_format); 3446 } 3447 3448 return 0; 3449 } 3450 3451 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3452 { 3453 switch (fb_modifier) { 3454 case DRM_FORMAT_MOD_LINEAR: 3455 break; 3456 case I915_FORMAT_MOD_X_TILED: 3457 return PLANE_CTL_TILED_X; 3458 case I915_FORMAT_MOD_Y_TILED: 3459 return PLANE_CTL_TILED_Y; 3460 case I915_FORMAT_MOD_Y_TILED_CCS: 3461 return PLANE_CTL_TILED_Y | PLANE_CTL_DECOMPRESSION_ENABLE; 3462 case I915_FORMAT_MOD_Yf_TILED: 3463 return PLANE_CTL_TILED_YF; 3464 case I915_FORMAT_MOD_Yf_TILED_CCS: 3465 return PLANE_CTL_TILED_YF | PLANE_CTL_DECOMPRESSION_ENABLE; 3466 default: 3467 MISSING_CASE(fb_modifier); 3468 } 3469 3470 return 0; 3471 } 3472 3473 static u32 skl_plane_ctl_rotation(unsigned int rotation) 3474 { 3475 switch (rotation) { 3476 case DRM_MODE_ROTATE_0: 3477 break; 3478 /* 3479 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr 3480 * while i915 HW rotation is clockwise, thats why this swapping. 3481 */ 3482 case DRM_MODE_ROTATE_90: 3483 return PLANE_CTL_ROTATE_270; 3484 case DRM_MODE_ROTATE_180: 3485 return PLANE_CTL_ROTATE_180; 3486 case DRM_MODE_ROTATE_270: 3487 return PLANE_CTL_ROTATE_90; 3488 default: 3489 MISSING_CASE(rotation); 3490 } 3491 3492 return 0; 3493 } 3494 3495 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 3496 const struct intel_plane_state *plane_state) 3497 { 3498 struct drm_i915_private *dev_priv = 3499 to_i915(plane_state->base.plane->dev); 3500 const struct drm_framebuffer *fb = plane_state->base.fb; 3501 unsigned int rotation = plane_state->base.rotation; 3502 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 3503 u32 plane_ctl; 3504 3505 plane_ctl = PLANE_CTL_ENABLE; 3506 3507 if (!IS_GEMINILAKE(dev_priv) && !IS_CANNONLAKE(dev_priv)) { 3508 plane_ctl |= 3509 PLANE_CTL_PIPE_GAMMA_ENABLE | 3510 PLANE_CTL_PIPE_CSC_ENABLE | 3511 PLANE_CTL_PLANE_GAMMA_DISABLE; 3512 } 3513 3514 plane_ctl |= skl_plane_ctl_format(fb->format->format); 3515 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 3516 plane_ctl |= skl_plane_ctl_rotation(rotation); 3517 3518 if (key->flags & I915_SET_COLORKEY_DESTINATION) 3519 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 3520 else if (key->flags & I915_SET_COLORKEY_SOURCE) 3521 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 3522 3523 return plane_ctl; 3524 } 3525 3526 static int 3527 __intel_display_resume(struct drm_device *dev, 3528 struct drm_atomic_state *state, 3529 struct drm_modeset_acquire_ctx *ctx) 3530 { 3531 struct drm_crtc_state *crtc_state; 3532 struct drm_crtc *crtc; 3533 int i, ret; 3534 3535 intel_modeset_setup_hw_state(dev, ctx); 3536 i915_redisable_vga(to_i915(dev)); 3537 3538 if (!state) 3539 return 0; 3540 3541 /* 3542 * We've duplicated the state, pointers to the old state are invalid. 3543 * 3544 * Don't attempt to use the old state until we commit the duplicated state. 3545 */ 3546 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 3547 /* 3548 * Force recalculation even if we restore 3549 * current state. With fast modeset this may not result 3550 * in a modeset when the state is compatible. 3551 */ 3552 crtc_state->mode_changed = true; 3553 } 3554 3555 /* ignore any reset values/BIOS leftovers in the WM registers */ 3556 if (!HAS_GMCH_DISPLAY(to_i915(dev))) 3557 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3558 3559 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 3560 3561 WARN_ON(ret == -EDEADLK); 3562 return ret; 3563 } 3564 3565 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3566 { 3567 return intel_has_gpu_reset(dev_priv) && 3568 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3569 } 3570 3571 void intel_prepare_reset(struct drm_i915_private *dev_priv) 3572 { 3573 struct drm_device *dev = &dev_priv->drm; 3574 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3575 struct drm_atomic_state *state; 3576 int ret; 3577 3578 3579 /* reset doesn't touch the display */ 3580 if (!i915_modparams.force_reset_modeset_test && 3581 !gpu_reset_clobbers_display(dev_priv)) 3582 return; 3583 3584 /* We have a modeset vs reset deadlock, defensively unbreak it. */ 3585 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); 3586 wake_up_all(&dev_priv->gpu_error.wait_queue); 3587 3588 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) { 3589 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n"); 3590 i915_gem_set_wedged(dev_priv); 3591 } 3592 3593 /* 3594 * Need mode_config.mutex so that we don't 3595 * trample ongoing ->detect() and whatnot. 3596 */ 3597 mutex_lock(&dev->mode_config.mutex); 3598 drm_modeset_acquire_init(ctx, 0); 3599 while (1) { 3600 ret = drm_modeset_lock_all_ctx(dev, ctx); 3601 if (ret != -EDEADLK) 3602 break; 3603 3604 drm_modeset_backoff(ctx); 3605 } 3606 /* 3607 * Disabling the crtcs gracefully seems nicer. Also the 3608 * g33 docs say we should at least disable all the planes. 3609 */ 3610 state = drm_atomic_helper_duplicate_state(dev, ctx); 3611 if (IS_ERR(state)) { 3612 ret = PTR_ERR(state); 3613 DRM_ERROR("Duplicating state failed with %i\n", ret); 3614 return; 3615 } 3616 3617 ret = drm_atomic_helper_disable_all(dev, ctx); 3618 if (ret) { 3619 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3620 drm_atomic_state_put(state); 3621 return; 3622 } 3623 3624 dev_priv->modeset_restore_state = state; 3625 state->acquire_ctx = ctx; 3626 } 3627 3628 void intel_finish_reset(struct drm_i915_private *dev_priv) 3629 { 3630 struct drm_device *dev = &dev_priv->drm; 3631 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3632 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3633 int ret; 3634 3635 /* reset doesn't touch the display */ 3636 if (!i915_modparams.force_reset_modeset_test && 3637 !gpu_reset_clobbers_display(dev_priv)) 3638 return; 3639 3640 if (!state) 3641 goto unlock; 3642 3643 dev_priv->modeset_restore_state = NULL; 3644 3645 /* reset doesn't touch the display */ 3646 if (!gpu_reset_clobbers_display(dev_priv)) { 3647 /* for testing only restore the display */ 3648 ret = __intel_display_resume(dev, state, ctx); 3649 if (ret) 3650 DRM_ERROR("Restoring old state failed with %i\n", ret); 3651 } else { 3652 /* 3653 * The display has been reset as well, 3654 * so need a full re-initialization. 3655 */ 3656 intel_runtime_pm_disable_interrupts(dev_priv); 3657 intel_runtime_pm_enable_interrupts(dev_priv); 3658 3659 intel_pps_unlock_regs_wa(dev_priv); 3660 intel_modeset_init_hw(dev); 3661 intel_init_clock_gating(dev_priv); 3662 3663 spin_lock_irq(&dev_priv->irq_lock); 3664 if (dev_priv->display.hpd_irq_setup) 3665 dev_priv->display.hpd_irq_setup(dev_priv); 3666 spin_unlock_irq(&dev_priv->irq_lock); 3667 3668 ret = __intel_display_resume(dev, state, ctx); 3669 if (ret) 3670 DRM_ERROR("Restoring old state failed with %i\n", ret); 3671 3672 intel_hpd_init(dev_priv); 3673 } 3674 3675 drm_atomic_state_put(state); 3676 unlock: 3677 drm_modeset_drop_locks(ctx); 3678 drm_modeset_acquire_fini(ctx); 3679 mutex_unlock(&dev->mode_config.mutex); 3680 3681 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags); 3682 } 3683 3684 static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, 3685 const struct intel_crtc_state *new_crtc_state) 3686 { 3687 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); 3688 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3689 3690 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3691 crtc->base.mode = new_crtc_state->base.mode; 3692 3693 /* 3694 * Update pipe size and adjust fitter if needed: the reason for this is 3695 * that in compute_mode_changes we check the native mode (not the pfit 3696 * mode) to see if we can flip rather than do a full mode set. In the 3697 * fastboot case, we'll flip, but if we don't update the pipesrc and 3698 * pfit state, we'll end up with a big fb scanned out into the wrong 3699 * sized surface. 3700 */ 3701 3702 I915_WRITE(PIPESRC(crtc->pipe), 3703 ((new_crtc_state->pipe_src_w - 1) << 16) | 3704 (new_crtc_state->pipe_src_h - 1)); 3705 3706 /* on skylake this is done by detaching scalers */ 3707 if (INTEL_GEN(dev_priv) >= 9) { 3708 skl_detach_scalers(crtc); 3709 3710 if (new_crtc_state->pch_pfit.enabled) 3711 skylake_pfit_enable(crtc); 3712 } else if (HAS_PCH_SPLIT(dev_priv)) { 3713 if (new_crtc_state->pch_pfit.enabled) 3714 ironlake_pfit_enable(crtc); 3715 else if (old_crtc_state->pch_pfit.enabled) 3716 ironlake_pfit_disable(crtc, true); 3717 } 3718 } 3719 3720 static void intel_fdi_normal_train(struct intel_crtc *crtc) 3721 { 3722 struct drm_device *dev = crtc->base.dev; 3723 struct drm_i915_private *dev_priv = to_i915(dev); 3724 int pipe = crtc->pipe; 3725 i915_reg_t reg; 3726 u32 temp; 3727 3728 /* enable normal train */ 3729 reg = FDI_TX_CTL(pipe); 3730 temp = I915_READ(reg); 3731 if (IS_IVYBRIDGE(dev_priv)) { 3732 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3733 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3734 } else { 3735 temp &= ~FDI_LINK_TRAIN_NONE; 3736 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3737 } 3738 I915_WRITE(reg, temp); 3739 3740 reg = FDI_RX_CTL(pipe); 3741 temp = I915_READ(reg); 3742 if (HAS_PCH_CPT(dev_priv)) { 3743 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3744 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3745 } else { 3746 temp &= ~FDI_LINK_TRAIN_NONE; 3747 temp |= FDI_LINK_TRAIN_NONE; 3748 } 3749 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3750 3751 /* wait one idle pattern time */ 3752 POSTING_READ(reg); 3753 udelay(1000); 3754 3755 /* IVB wants error correction enabled */ 3756 if (IS_IVYBRIDGE(dev_priv)) 3757 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3758 FDI_FE_ERRC_ENABLE); 3759 } 3760 3761 /* The FDI link training functions for ILK/Ibexpeak. */ 3762 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 3763 const struct intel_crtc_state *crtc_state) 3764 { 3765 struct drm_device *dev = crtc->base.dev; 3766 struct drm_i915_private *dev_priv = to_i915(dev); 3767 int pipe = crtc->pipe; 3768 i915_reg_t reg; 3769 u32 temp, tries; 3770 3771 /* FDI needs bits from pipe first */ 3772 assert_pipe_enabled(dev_priv, pipe); 3773 3774 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3775 for train result */ 3776 reg = FDI_RX_IMR(pipe); 3777 temp = I915_READ(reg); 3778 temp &= ~FDI_RX_SYMBOL_LOCK; 3779 temp &= ~FDI_RX_BIT_LOCK; 3780 I915_WRITE(reg, temp); 3781 I915_READ(reg); 3782 udelay(150); 3783 3784 /* enable CPU FDI TX and PCH FDI RX */ 3785 reg = FDI_TX_CTL(pipe); 3786 temp = I915_READ(reg); 3787 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3788 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3789 temp &= ~FDI_LINK_TRAIN_NONE; 3790 temp |= FDI_LINK_TRAIN_PATTERN_1; 3791 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3792 3793 reg = FDI_RX_CTL(pipe); 3794 temp = I915_READ(reg); 3795 temp &= ~FDI_LINK_TRAIN_NONE; 3796 temp |= FDI_LINK_TRAIN_PATTERN_1; 3797 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3798 3799 POSTING_READ(reg); 3800 udelay(150); 3801 3802 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3803 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3804 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3805 FDI_RX_PHASE_SYNC_POINTER_EN); 3806 3807 reg = FDI_RX_IIR(pipe); 3808 for (tries = 0; tries < 5; tries++) { 3809 temp = I915_READ(reg); 3810 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3811 3812 if ((temp & FDI_RX_BIT_LOCK)) { 3813 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3814 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3815 break; 3816 } 3817 } 3818 if (tries == 5) 3819 DRM_ERROR("FDI train 1 fail!\n"); 3820 3821 /* Train 2 */ 3822 reg = FDI_TX_CTL(pipe); 3823 temp = I915_READ(reg); 3824 temp &= ~FDI_LINK_TRAIN_NONE; 3825 temp |= FDI_LINK_TRAIN_PATTERN_2; 3826 I915_WRITE(reg, temp); 3827 3828 reg = FDI_RX_CTL(pipe); 3829 temp = I915_READ(reg); 3830 temp &= ~FDI_LINK_TRAIN_NONE; 3831 temp |= FDI_LINK_TRAIN_PATTERN_2; 3832 I915_WRITE(reg, temp); 3833 3834 POSTING_READ(reg); 3835 udelay(150); 3836 3837 reg = FDI_RX_IIR(pipe); 3838 for (tries = 0; tries < 5; tries++) { 3839 temp = I915_READ(reg); 3840 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3841 3842 if (temp & FDI_RX_SYMBOL_LOCK) { 3843 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3844 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3845 break; 3846 } 3847 } 3848 if (tries == 5) 3849 DRM_ERROR("FDI train 2 fail!\n"); 3850 3851 DRM_DEBUG_KMS("FDI train done\n"); 3852 3853 } 3854 3855 static const int snb_b_fdi_train_param[] = { 3856 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3857 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3858 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3859 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3860 }; 3861 3862 /* The FDI link training functions for SNB/Cougarpoint. */ 3863 static void gen6_fdi_link_train(struct intel_crtc *crtc, 3864 const struct intel_crtc_state *crtc_state) 3865 { 3866 struct drm_device *dev = crtc->base.dev; 3867 struct drm_i915_private *dev_priv = to_i915(dev); 3868 int pipe = crtc->pipe; 3869 i915_reg_t reg; 3870 u32 temp, i, retry; 3871 3872 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3873 for train result */ 3874 reg = FDI_RX_IMR(pipe); 3875 temp = I915_READ(reg); 3876 temp &= ~FDI_RX_SYMBOL_LOCK; 3877 temp &= ~FDI_RX_BIT_LOCK; 3878 I915_WRITE(reg, temp); 3879 3880 POSTING_READ(reg); 3881 udelay(150); 3882 3883 /* enable CPU FDI TX and PCH FDI RX */ 3884 reg = FDI_TX_CTL(pipe); 3885 temp = I915_READ(reg); 3886 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3887 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3888 temp &= ~FDI_LINK_TRAIN_NONE; 3889 temp |= FDI_LINK_TRAIN_PATTERN_1; 3890 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3891 /* SNB-B */ 3892 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3893 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3894 3895 I915_WRITE(FDI_RX_MISC(pipe), 3896 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3897 3898 reg = FDI_RX_CTL(pipe); 3899 temp = I915_READ(reg); 3900 if (HAS_PCH_CPT(dev_priv)) { 3901 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3902 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3903 } else { 3904 temp &= ~FDI_LINK_TRAIN_NONE; 3905 temp |= FDI_LINK_TRAIN_PATTERN_1; 3906 } 3907 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3908 3909 POSTING_READ(reg); 3910 udelay(150); 3911 3912 for (i = 0; i < 4; i++) { 3913 reg = FDI_TX_CTL(pipe); 3914 temp = I915_READ(reg); 3915 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3916 temp |= snb_b_fdi_train_param[i]; 3917 I915_WRITE(reg, temp); 3918 3919 POSTING_READ(reg); 3920 udelay(500); 3921 3922 for (retry = 0; retry < 5; retry++) { 3923 reg = FDI_RX_IIR(pipe); 3924 temp = I915_READ(reg); 3925 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3926 if (temp & FDI_RX_BIT_LOCK) { 3927 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3928 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3929 break; 3930 } 3931 udelay(50); 3932 } 3933 if (retry < 5) 3934 break; 3935 } 3936 if (i == 4) 3937 DRM_ERROR("FDI train 1 fail!\n"); 3938 3939 /* Train 2 */ 3940 reg = FDI_TX_CTL(pipe); 3941 temp = I915_READ(reg); 3942 temp &= ~FDI_LINK_TRAIN_NONE; 3943 temp |= FDI_LINK_TRAIN_PATTERN_2; 3944 if (IS_GEN6(dev_priv)) { 3945 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3946 /* SNB-B */ 3947 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3948 } 3949 I915_WRITE(reg, temp); 3950 3951 reg = FDI_RX_CTL(pipe); 3952 temp = I915_READ(reg); 3953 if (HAS_PCH_CPT(dev_priv)) { 3954 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3955 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3956 } else { 3957 temp &= ~FDI_LINK_TRAIN_NONE; 3958 temp |= FDI_LINK_TRAIN_PATTERN_2; 3959 } 3960 I915_WRITE(reg, temp); 3961 3962 POSTING_READ(reg); 3963 udelay(150); 3964 3965 for (i = 0; i < 4; i++) { 3966 reg = FDI_TX_CTL(pipe); 3967 temp = I915_READ(reg); 3968 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3969 temp |= snb_b_fdi_train_param[i]; 3970 I915_WRITE(reg, temp); 3971 3972 POSTING_READ(reg); 3973 udelay(500); 3974 3975 for (retry = 0; retry < 5; retry++) { 3976 reg = FDI_RX_IIR(pipe); 3977 temp = I915_READ(reg); 3978 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3979 if (temp & FDI_RX_SYMBOL_LOCK) { 3980 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3981 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3982 break; 3983 } 3984 udelay(50); 3985 } 3986 if (retry < 5) 3987 break; 3988 } 3989 if (i == 4) 3990 DRM_ERROR("FDI train 2 fail!\n"); 3991 3992 DRM_DEBUG_KMS("FDI train done.\n"); 3993 } 3994 3995 /* Manual link training for Ivy Bridge A0 parts */ 3996 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 3997 const struct intel_crtc_state *crtc_state) 3998 { 3999 struct drm_device *dev = crtc->base.dev; 4000 struct drm_i915_private *dev_priv = to_i915(dev); 4001 int pipe = crtc->pipe; 4002 i915_reg_t reg; 4003 u32 temp, i, j; 4004 4005 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 4006 for train result */ 4007 reg = FDI_RX_IMR(pipe); 4008 temp = I915_READ(reg); 4009 temp &= ~FDI_RX_SYMBOL_LOCK; 4010 temp &= ~FDI_RX_BIT_LOCK; 4011 I915_WRITE(reg, temp); 4012 4013 POSTING_READ(reg); 4014 udelay(150); 4015 4016 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 4017 I915_READ(FDI_RX_IIR(pipe))); 4018 4019 /* Try each vswing and preemphasis setting twice before moving on */ 4020 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 4021 /* disable first in case we need to retry */ 4022 reg = FDI_TX_CTL(pipe); 4023 temp = I915_READ(reg); 4024 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 4025 temp &= ~FDI_TX_ENABLE; 4026 I915_WRITE(reg, temp); 4027 4028 reg = FDI_RX_CTL(pipe); 4029 temp = I915_READ(reg); 4030 temp &= ~FDI_LINK_TRAIN_AUTO; 4031 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4032 temp &= ~FDI_RX_ENABLE; 4033 I915_WRITE(reg, temp); 4034 4035 /* enable CPU FDI TX and PCH FDI RX */ 4036 reg = FDI_TX_CTL(pipe); 4037 temp = I915_READ(reg); 4038 temp &= ~FDI_DP_PORT_WIDTH_MASK; 4039 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 4040 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 4041 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 4042 temp |= snb_b_fdi_train_param[j/2]; 4043 temp |= FDI_COMPOSITE_SYNC; 4044 I915_WRITE(reg, temp | FDI_TX_ENABLE); 4045 4046 I915_WRITE(FDI_RX_MISC(pipe), 4047 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 4048 4049 reg = FDI_RX_CTL(pipe); 4050 temp = I915_READ(reg); 4051 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4052 temp |= FDI_COMPOSITE_SYNC; 4053 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4054 4055 POSTING_READ(reg); 4056 udelay(1); /* should be 0.5us */ 4057 4058 for (i = 0; i < 4; i++) { 4059 reg = FDI_RX_IIR(pipe); 4060 temp = I915_READ(reg); 4061 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4062 4063 if (temp & FDI_RX_BIT_LOCK || 4064 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4065 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4066 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4067 i); 4068 break; 4069 } 4070 udelay(1); /* should be 0.5us */ 4071 } 4072 if (i == 4) { 4073 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4074 continue; 4075 } 4076 4077 /* Train 2 */ 4078 reg = FDI_TX_CTL(pipe); 4079 temp = I915_READ(reg); 4080 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4081 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4082 I915_WRITE(reg, temp); 4083 4084 reg = FDI_RX_CTL(pipe); 4085 temp = I915_READ(reg); 4086 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4087 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4088 I915_WRITE(reg, temp); 4089 4090 POSTING_READ(reg); 4091 udelay(2); /* should be 1.5us */ 4092 4093 for (i = 0; i < 4; i++) { 4094 reg = FDI_RX_IIR(pipe); 4095 temp = I915_READ(reg); 4096 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4097 4098 if (temp & FDI_RX_SYMBOL_LOCK || 4099 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4100 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4101 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4102 i); 4103 goto train_done; 4104 } 4105 udelay(2); /* should be 1.5us */ 4106 } 4107 if (i == 4) 4108 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4109 } 4110 4111 train_done: 4112 DRM_DEBUG_KMS("FDI train done.\n"); 4113 } 4114 4115 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4116 { 4117 struct drm_device *dev = intel_crtc->base.dev; 4118 struct drm_i915_private *dev_priv = to_i915(dev); 4119 int pipe = intel_crtc->pipe; 4120 i915_reg_t reg; 4121 u32 temp; 4122 4123 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4124 reg = FDI_RX_CTL(pipe); 4125 temp = I915_READ(reg); 4126 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4127 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4128 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4129 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4130 4131 POSTING_READ(reg); 4132 udelay(200); 4133 4134 /* Switch from Rawclk to PCDclk */ 4135 temp = I915_READ(reg); 4136 I915_WRITE(reg, temp | FDI_PCDCLK); 4137 4138 POSTING_READ(reg); 4139 udelay(200); 4140 4141 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4142 reg = FDI_TX_CTL(pipe); 4143 temp = I915_READ(reg); 4144 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4145 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4146 4147 POSTING_READ(reg); 4148 udelay(100); 4149 } 4150 } 4151 4152 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4153 { 4154 struct drm_device *dev = intel_crtc->base.dev; 4155 struct drm_i915_private *dev_priv = to_i915(dev); 4156 int pipe = intel_crtc->pipe; 4157 i915_reg_t reg; 4158 u32 temp; 4159 4160 /* Switch from PCDclk to Rawclk */ 4161 reg = FDI_RX_CTL(pipe); 4162 temp = I915_READ(reg); 4163 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4164 4165 /* Disable CPU FDI TX PLL */ 4166 reg = FDI_TX_CTL(pipe); 4167 temp = I915_READ(reg); 4168 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4169 4170 POSTING_READ(reg); 4171 udelay(100); 4172 4173 reg = FDI_RX_CTL(pipe); 4174 temp = I915_READ(reg); 4175 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4176 4177 /* Wait for the clocks to turn off. */ 4178 POSTING_READ(reg); 4179 udelay(100); 4180 } 4181 4182 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4183 { 4184 struct drm_device *dev = crtc->dev; 4185 struct drm_i915_private *dev_priv = to_i915(dev); 4186 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4187 int pipe = intel_crtc->pipe; 4188 i915_reg_t reg; 4189 u32 temp; 4190 4191 /* disable CPU FDI tx and PCH FDI rx */ 4192 reg = FDI_TX_CTL(pipe); 4193 temp = I915_READ(reg); 4194 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4195 POSTING_READ(reg); 4196 4197 reg = FDI_RX_CTL(pipe); 4198 temp = I915_READ(reg); 4199 temp &= ~(0x7 << 16); 4200 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4201 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4202 4203 POSTING_READ(reg); 4204 udelay(100); 4205 4206 /* Ironlake workaround, disable clock pointer after downing FDI */ 4207 if (HAS_PCH_IBX(dev_priv)) 4208 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4209 4210 /* still set train pattern 1 */ 4211 reg = FDI_TX_CTL(pipe); 4212 temp = I915_READ(reg); 4213 temp &= ~FDI_LINK_TRAIN_NONE; 4214 temp |= FDI_LINK_TRAIN_PATTERN_1; 4215 I915_WRITE(reg, temp); 4216 4217 reg = FDI_RX_CTL(pipe); 4218 temp = I915_READ(reg); 4219 if (HAS_PCH_CPT(dev_priv)) { 4220 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4221 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4222 } else { 4223 temp &= ~FDI_LINK_TRAIN_NONE; 4224 temp |= FDI_LINK_TRAIN_PATTERN_1; 4225 } 4226 /* BPC in FDI rx is consistent with that in PIPECONF */ 4227 temp &= ~(0x07 << 16); 4228 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4229 I915_WRITE(reg, temp); 4230 4231 POSTING_READ(reg); 4232 udelay(100); 4233 } 4234 4235 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4236 { 4237 struct drm_crtc *crtc; 4238 bool cleanup_done; 4239 4240 drm_for_each_crtc(crtc, &dev_priv->drm) { 4241 struct drm_crtc_commit *commit; 4242 lockmgr(&crtc->commit_lock, LK_EXCLUSIVE); 4243 commit = list_first_entry_or_null(&crtc->commit_list, 4244 struct drm_crtc_commit, commit_entry); 4245 cleanup_done = commit ? 4246 try_wait_for_completion(&commit->cleanup_done) : true; 4247 lockmgr(&crtc->commit_lock, LK_RELEASE); 4248 4249 if (cleanup_done) 4250 continue; 4251 4252 drm_crtc_wait_one_vblank(crtc); 4253 4254 return true; 4255 } 4256 4257 return false; 4258 } 4259 4260 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4261 { 4262 u32 temp; 4263 4264 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4265 4266 mutex_lock(&dev_priv->sb_lock); 4267 4268 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4269 temp |= SBI_SSCCTL_DISABLE; 4270 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4271 4272 mutex_unlock(&dev_priv->sb_lock); 4273 } 4274 4275 /* Program iCLKIP clock to the desired frequency */ 4276 static void lpt_program_iclkip(struct intel_crtc *crtc) 4277 { 4278 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4279 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4280 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4281 u32 temp; 4282 4283 lpt_disable_iclkip(dev_priv); 4284 4285 /* The iCLK virtual clock root frequency is in MHz, 4286 * but the adjusted_mode->crtc_clock in in KHz. To get the 4287 * divisors, it is necessary to divide one by another, so we 4288 * convert the virtual clock precision to KHz here for higher 4289 * precision. 4290 */ 4291 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 4292 u32 iclk_virtual_root_freq = 172800 * 1000; 4293 u32 iclk_pi_range = 64; 4294 u32 desired_divisor; 4295 4296 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4297 clock << auxdiv); 4298 divsel = (desired_divisor / iclk_pi_range) - 2; 4299 phaseinc = desired_divisor % iclk_pi_range; 4300 4301 /* 4302 * Near 20MHz is a corner case which is 4303 * out of range for the 7-bit divisor 4304 */ 4305 if (divsel <= 0x7f) 4306 break; 4307 } 4308 4309 /* This should not happen with any sane values */ 4310 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 4311 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 4312 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 4313 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 4314 4315 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 4316 clock, 4317 auxdiv, 4318 divsel, 4319 phasedir, 4320 phaseinc); 4321 4322 mutex_lock(&dev_priv->sb_lock); 4323 4324 /* Program SSCDIVINTPHASE6 */ 4325 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4326 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4327 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4328 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4329 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4330 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4331 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4332 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4333 4334 /* Program SSCAUXDIV */ 4335 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4336 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4337 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4338 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4339 4340 /* Enable modulator and associated divider */ 4341 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4342 temp &= ~SBI_SSCCTL_DISABLE; 4343 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4344 4345 mutex_unlock(&dev_priv->sb_lock); 4346 4347 /* Wait for initialization time */ 4348 udelay(24); 4349 4350 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4351 } 4352 4353 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4354 { 4355 u32 divsel, phaseinc, auxdiv; 4356 u32 iclk_virtual_root_freq = 172800 * 1000; 4357 u32 iclk_pi_range = 64; 4358 u32 desired_divisor; 4359 u32 temp; 4360 4361 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4362 return 0; 4363 4364 mutex_lock(&dev_priv->sb_lock); 4365 4366 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4367 if (temp & SBI_SSCCTL_DISABLE) { 4368 mutex_unlock(&dev_priv->sb_lock); 4369 return 0; 4370 } 4371 4372 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4373 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4374 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4375 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4376 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4377 4378 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4379 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4380 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4381 4382 mutex_unlock(&dev_priv->sb_lock); 4383 4384 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4385 4386 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4387 desired_divisor << auxdiv); 4388 } 4389 4390 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4391 enum i915_pipe pch_transcoder) 4392 { 4393 struct drm_device *dev = crtc->base.dev; 4394 struct drm_i915_private *dev_priv = to_i915(dev); 4395 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4396 4397 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4398 I915_READ(HTOTAL(cpu_transcoder))); 4399 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4400 I915_READ(HBLANK(cpu_transcoder))); 4401 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4402 I915_READ(HSYNC(cpu_transcoder))); 4403 4404 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4405 I915_READ(VTOTAL(cpu_transcoder))); 4406 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4407 I915_READ(VBLANK(cpu_transcoder))); 4408 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4409 I915_READ(VSYNC(cpu_transcoder))); 4410 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4411 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4412 } 4413 4414 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4415 { 4416 struct drm_i915_private *dev_priv = to_i915(dev); 4417 uint32_t temp; 4418 4419 temp = I915_READ(SOUTH_CHICKEN1); 4420 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4421 return; 4422 4423 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4424 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4425 4426 temp &= ~FDI_BC_BIFURCATION_SELECT; 4427 if (enable) 4428 temp |= FDI_BC_BIFURCATION_SELECT; 4429 4430 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4431 I915_WRITE(SOUTH_CHICKEN1, temp); 4432 POSTING_READ(SOUTH_CHICKEN1); 4433 } 4434 4435 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4436 { 4437 struct drm_device *dev = intel_crtc->base.dev; 4438 4439 switch (intel_crtc->pipe) { 4440 case PIPE_A: 4441 break; 4442 case PIPE_B: 4443 if (intel_crtc->config->fdi_lanes > 2) 4444 cpt_set_fdi_bc_bifurcation(dev, false); 4445 else 4446 cpt_set_fdi_bc_bifurcation(dev, true); 4447 4448 break; 4449 case PIPE_C: 4450 cpt_set_fdi_bc_bifurcation(dev, true); 4451 4452 break; 4453 default: 4454 BUG(); 4455 } 4456 } 4457 4458 /* Return which DP Port should be selected for Transcoder DP control */ 4459 static enum port 4460 intel_trans_dp_port_sel(struct intel_crtc *crtc) 4461 { 4462 struct drm_device *dev = crtc->base.dev; 4463 struct intel_encoder *encoder; 4464 4465 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 4466 if (encoder->type == INTEL_OUTPUT_DP || 4467 encoder->type == INTEL_OUTPUT_EDP) 4468 return enc_to_dig_port(&encoder->base)->port; 4469 } 4470 4471 return -1; 4472 } 4473 4474 /* 4475 * Enable PCH resources required for PCH ports: 4476 * - PCH PLLs 4477 * - FDI training & RX/TX 4478 * - update transcoder timings 4479 * - DP transcoding bits 4480 * - transcoder 4481 */ 4482 static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) 4483 { 4484 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4485 struct drm_device *dev = crtc->base.dev; 4486 struct drm_i915_private *dev_priv = to_i915(dev); 4487 int pipe = crtc->pipe; 4488 u32 temp; 4489 4490 assert_pch_transcoder_disabled(dev_priv, pipe); 4491 4492 if (IS_IVYBRIDGE(dev_priv)) 4493 ivybridge_update_fdi_bc_bifurcation(crtc); 4494 4495 /* Write the TU size bits before fdi link training, so that error 4496 * detection works. */ 4497 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4498 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4499 4500 /* For PCH output, training FDI link */ 4501 dev_priv->display.fdi_link_train(crtc, crtc_state); 4502 4503 /* We need to program the right clock selection before writing the pixel 4504 * mutliplier into the DPLL. */ 4505 if (HAS_PCH_CPT(dev_priv)) { 4506 u32 sel; 4507 4508 temp = I915_READ(PCH_DPLL_SEL); 4509 temp |= TRANS_DPLL_ENABLE(pipe); 4510 sel = TRANS_DPLLB_SEL(pipe); 4511 if (crtc_state->shared_dpll == 4512 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4513 temp |= sel; 4514 else 4515 temp &= ~sel; 4516 I915_WRITE(PCH_DPLL_SEL, temp); 4517 } 4518 4519 /* XXX: pch pll's can be enabled any time before we enable the PCH 4520 * transcoder, and we actually should do this to not upset any PCH 4521 * transcoder that already use the clock when we share it. 4522 * 4523 * Note that enable_shared_dpll tries to do the right thing, but 4524 * get_shared_dpll unconditionally resets the pll - we need that to have 4525 * the right LVDS enable sequence. */ 4526 intel_enable_shared_dpll(crtc); 4527 4528 /* set transcoder timing, panel must allow it */ 4529 assert_panel_unlocked(dev_priv, pipe); 4530 ironlake_pch_transcoder_set_timings(crtc, pipe); 4531 4532 intel_fdi_normal_train(crtc); 4533 4534 /* For PCH DP, enable TRANS_DP_CTL */ 4535 if (HAS_PCH_CPT(dev_priv) && 4536 intel_crtc_has_dp_encoder(crtc_state)) { 4537 const struct drm_display_mode *adjusted_mode = 4538 &crtc_state->base.adjusted_mode; 4539 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4540 i915_reg_t reg = TRANS_DP_CTL(pipe); 4541 temp = I915_READ(reg); 4542 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4543 TRANS_DP_SYNC_MASK | 4544 TRANS_DP_BPC_MASK); 4545 temp |= TRANS_DP_OUTPUT_ENABLE; 4546 temp |= bpc << 9; /* same format but at 11:9 */ 4547 4548 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4549 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4550 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4551 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4552 4553 switch (intel_trans_dp_port_sel(crtc)) { 4554 case PORT_B: 4555 temp |= TRANS_DP_PORT_SEL_B; 4556 break; 4557 case PORT_C: 4558 temp |= TRANS_DP_PORT_SEL_C; 4559 break; 4560 case PORT_D: 4561 temp |= TRANS_DP_PORT_SEL_D; 4562 break; 4563 default: 4564 BUG(); 4565 } 4566 4567 I915_WRITE(reg, temp); 4568 } 4569 4570 ironlake_enable_pch_transcoder(dev_priv, pipe); 4571 } 4572 4573 static void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 4574 { 4575 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4576 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4577 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 4578 4579 assert_pch_transcoder_disabled(dev_priv, PIPE_A); 4580 4581 lpt_program_iclkip(crtc); 4582 4583 /* Set transcoder timing. */ 4584 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4585 4586 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4587 } 4588 4589 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4590 { 4591 struct drm_i915_private *dev_priv = to_i915(dev); 4592 i915_reg_t dslreg = PIPEDSL(pipe); 4593 u32 temp; 4594 4595 temp = I915_READ(dslreg); 4596 udelay(500); 4597 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4598 if (wait_for(I915_READ(dslreg) != temp, 5)) 4599 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4600 } 4601 } 4602 4603 static int 4604 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4605 unsigned int scaler_user, int *scaler_id, 4606 int src_w, int src_h, int dst_w, int dst_h) 4607 { 4608 struct intel_crtc_scaler_state *scaler_state = 4609 &crtc_state->scaler_state; 4610 struct intel_crtc *intel_crtc = 4611 to_intel_crtc(crtc_state->base.crtc); 4612 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4613 const struct drm_display_mode *adjusted_mode = 4614 &crtc_state->base.adjusted_mode; 4615 int need_scaling; 4616 4617 /* 4618 * Src coordinates are already rotated by 270 degrees for 4619 * the 90/270 degree plane rotation cases (to match the 4620 * GTT mapping), hence no need to account for rotation here. 4621 */ 4622 need_scaling = src_w != dst_w || src_h != dst_h; 4623 4624 if (crtc_state->ycbcr420 && scaler_user == SKL_CRTC_INDEX) 4625 need_scaling = true; 4626 4627 /* 4628 * Scaling/fitting not supported in IF-ID mode in GEN9+ 4629 * TODO: Interlace fetch mode doesn't support YUV420 planar formats. 4630 * Once NV12 is enabled, handle it here while allocating scaler 4631 * for NV12. 4632 */ 4633 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable && 4634 need_scaling && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 4635 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n"); 4636 return -EINVAL; 4637 } 4638 4639 /* 4640 * if plane is being disabled or scaler is no more required or force detach 4641 * - free scaler binded to this plane/crtc 4642 * - in order to do this, update crtc->scaler_usage 4643 * 4644 * Here scaler state in crtc_state is set free so that 4645 * scaler can be assigned to other user. Actual register 4646 * update to free the scaler is done in plane/panel-fit programming. 4647 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4648 */ 4649 if (force_detach || !need_scaling) { 4650 if (*scaler_id >= 0) { 4651 scaler_state->scaler_users &= ~(1 << scaler_user); 4652 scaler_state->scalers[*scaler_id].in_use = 0; 4653 4654 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4655 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4656 intel_crtc->pipe, scaler_user, *scaler_id, 4657 scaler_state->scaler_users); 4658 *scaler_id = -1; 4659 } 4660 return 0; 4661 } 4662 4663 /* range checks */ 4664 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4665 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4666 4667 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4668 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4669 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4670 "size is out of scaler range\n", 4671 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4672 return -EINVAL; 4673 } 4674 4675 /* mark this plane as a scaler user in crtc_state */ 4676 scaler_state->scaler_users |= (1 << scaler_user); 4677 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4678 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4679 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4680 scaler_state->scaler_users); 4681 4682 return 0; 4683 } 4684 4685 /** 4686 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4687 * 4688 * @state: crtc's scaler state 4689 * 4690 * Return 4691 * 0 - scaler_usage updated successfully 4692 * error - requested scaling cannot be supported or other error condition 4693 */ 4694 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4695 { 4696 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4697 4698 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4699 &state->scaler_state.scaler_id, 4700 state->pipe_src_w, state->pipe_src_h, 4701 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4702 } 4703 4704 /** 4705 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4706 * 4707 * @state: crtc's scaler state 4708 * @plane_state: atomic plane state to update 4709 * 4710 * Return 4711 * 0 - scaler_usage updated successfully 4712 * error - requested scaling cannot be supported or other error condition 4713 */ 4714 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4715 struct intel_plane_state *plane_state) 4716 { 4717 4718 struct intel_plane *intel_plane = 4719 to_intel_plane(plane_state->base.plane); 4720 struct drm_framebuffer *fb = plane_state->base.fb; 4721 int ret; 4722 4723 bool force_detach = !fb || !plane_state->base.visible; 4724 4725 ret = skl_update_scaler(crtc_state, force_detach, 4726 drm_plane_index(&intel_plane->base), 4727 &plane_state->scaler_id, 4728 drm_rect_width(&plane_state->base.src) >> 16, 4729 drm_rect_height(&plane_state->base.src) >> 16, 4730 drm_rect_width(&plane_state->base.dst), 4731 drm_rect_height(&plane_state->base.dst)); 4732 4733 if (ret || plane_state->scaler_id < 0) 4734 return ret; 4735 4736 /* check colorkey */ 4737 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4738 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4739 intel_plane->base.base.id, 4740 intel_plane->base.name); 4741 return -EINVAL; 4742 } 4743 4744 /* Check src format */ 4745 switch (fb->format->format) { 4746 case DRM_FORMAT_RGB565: 4747 case DRM_FORMAT_XBGR8888: 4748 case DRM_FORMAT_XRGB8888: 4749 case DRM_FORMAT_ABGR8888: 4750 case DRM_FORMAT_ARGB8888: 4751 case DRM_FORMAT_XRGB2101010: 4752 case DRM_FORMAT_XBGR2101010: 4753 case DRM_FORMAT_YUYV: 4754 case DRM_FORMAT_YVYU: 4755 case DRM_FORMAT_UYVY: 4756 case DRM_FORMAT_VYUY: 4757 break; 4758 default: 4759 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4760 intel_plane->base.base.id, intel_plane->base.name, 4761 fb->base.id, fb->format->format); 4762 return -EINVAL; 4763 } 4764 4765 return 0; 4766 } 4767 4768 static void skylake_scaler_disable(struct intel_crtc *crtc) 4769 { 4770 int i; 4771 4772 for (i = 0; i < crtc->num_scalers; i++) 4773 skl_detach_scaler(crtc, i); 4774 } 4775 4776 static void skylake_pfit_enable(struct intel_crtc *crtc) 4777 { 4778 struct drm_device *dev = crtc->base.dev; 4779 struct drm_i915_private *dev_priv = to_i915(dev); 4780 int pipe = crtc->pipe; 4781 struct intel_crtc_scaler_state *scaler_state = 4782 &crtc->config->scaler_state; 4783 4784 if (crtc->config->pch_pfit.enabled) { 4785 int id; 4786 4787 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 4788 return; 4789 4790 id = scaler_state->scaler_id; 4791 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4792 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4793 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4794 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4795 } 4796 } 4797 4798 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4799 { 4800 struct drm_device *dev = crtc->base.dev; 4801 struct drm_i915_private *dev_priv = to_i915(dev); 4802 int pipe = crtc->pipe; 4803 4804 if (crtc->config->pch_pfit.enabled) { 4805 /* Force use of hard-coded filter coefficients 4806 * as some pre-programmed values are broken, 4807 * e.g. x201. 4808 */ 4809 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 4810 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4811 PF_PIPE_SEL_IVB(pipe)); 4812 else 4813 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4814 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4815 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4816 } 4817 } 4818 4819 void hsw_enable_ips(struct intel_crtc *crtc) 4820 { 4821 struct drm_device *dev = crtc->base.dev; 4822 struct drm_i915_private *dev_priv = to_i915(dev); 4823 4824 if (!crtc->config->ips_enabled) 4825 return; 4826 4827 /* 4828 * We can only enable IPS after we enable a plane and wait for a vblank 4829 * This function is called from post_plane_update, which is run after 4830 * a vblank wait. 4831 */ 4832 4833 assert_plane_enabled(to_intel_plane(crtc->base.primary)); 4834 4835 if (IS_BROADWELL(dev_priv)) { 4836 mutex_lock(&dev_priv->pcu_lock); 4837 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 4838 IPS_ENABLE | IPS_PCODE_CONTROL)); 4839 mutex_unlock(&dev_priv->pcu_lock); 4840 /* Quoting Art Runyan: "its not safe to expect any particular 4841 * value in IPS_CTL bit 31 after enabling IPS through the 4842 * mailbox." Moreover, the mailbox may return a bogus state, 4843 * so we need to just enable it and continue on. 4844 */ 4845 } else { 4846 I915_WRITE(IPS_CTL, IPS_ENABLE); 4847 /* The bit only becomes 1 in the next vblank, so this wait here 4848 * is essentially intel_wait_for_vblank. If we don't have this 4849 * and don't wait for vblanks until the end of crtc_enable, then 4850 * the HW state readout code will complain that the expected 4851 * IPS_CTL value is not the one we read. */ 4852 if (intel_wait_for_register(dev_priv, 4853 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4854 50)) 4855 DRM_ERROR("Timed out waiting for IPS enable\n"); 4856 } 4857 } 4858 4859 void hsw_disable_ips(struct intel_crtc *crtc) 4860 { 4861 struct drm_device *dev = crtc->base.dev; 4862 struct drm_i915_private *dev_priv = to_i915(dev); 4863 4864 if (!crtc->config->ips_enabled) 4865 return; 4866 4867 assert_plane_enabled(to_intel_plane(crtc->base.primary)); 4868 4869 if (IS_BROADWELL(dev_priv)) { 4870 mutex_lock(&dev_priv->pcu_lock); 4871 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4872 mutex_unlock(&dev_priv->pcu_lock); 4873 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4874 if (intel_wait_for_register(dev_priv, 4875 IPS_CTL, IPS_ENABLE, 0, 4876 42)) 4877 DRM_ERROR("Timed out waiting for IPS disable\n"); 4878 } else { 4879 I915_WRITE(IPS_CTL, 0); 4880 POSTING_READ(IPS_CTL); 4881 } 4882 4883 /* We need to wait for a vblank before we can disable the plane. */ 4884 intel_wait_for_vblank(dev_priv, crtc->pipe); 4885 } 4886 4887 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4888 { 4889 if (intel_crtc->overlay) { 4890 struct drm_device *dev = intel_crtc->base.dev; 4891 4892 mutex_lock(&dev->struct_mutex); 4893 (void) intel_overlay_switch_off(intel_crtc->overlay); 4894 mutex_unlock(&dev->struct_mutex); 4895 } 4896 4897 /* Let userspace switch the overlay on again. In most cases userspace 4898 * has to recompute where to put it anyway. 4899 */ 4900 } 4901 4902 /** 4903 * intel_post_enable_primary - Perform operations after enabling primary plane 4904 * @crtc: the CRTC whose primary plane was just enabled 4905 * 4906 * Performs potentially sleeping operations that must be done after the primary 4907 * plane is enabled, such as updating FBC and IPS. Note that this may be 4908 * called due to an explicit primary plane update, or due to an implicit 4909 * re-enable that is caused when a sprite plane is updated to no longer 4910 * completely hide the primary plane. 4911 */ 4912 static void 4913 intel_post_enable_primary(struct drm_crtc *crtc) 4914 { 4915 struct drm_device *dev = crtc->dev; 4916 struct drm_i915_private *dev_priv = to_i915(dev); 4917 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4918 int pipe = intel_crtc->pipe; 4919 4920 /* 4921 * FIXME IPS should be fine as long as one plane is 4922 * enabled, but in practice it seems to have problems 4923 * when going from primary only to sprite only and vice 4924 * versa. 4925 */ 4926 hsw_enable_ips(intel_crtc); 4927 4928 /* 4929 * Gen2 reports pipe underruns whenever all planes are disabled. 4930 * So don't enable underrun reporting before at least some planes 4931 * are enabled. 4932 * FIXME: Need to fix the logic to work when we turn off all planes 4933 * but leave the pipe running. 4934 */ 4935 if (IS_GEN2(dev_priv)) 4936 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4937 4938 /* Underruns don't always raise interrupts, so check manually. */ 4939 intel_check_cpu_fifo_underruns(dev_priv); 4940 intel_check_pch_fifo_underruns(dev_priv); 4941 } 4942 4943 /* FIXME move all this to pre_plane_update() with proper state tracking */ 4944 static void 4945 intel_pre_disable_primary(struct drm_crtc *crtc) 4946 { 4947 struct drm_device *dev = crtc->dev; 4948 struct drm_i915_private *dev_priv = to_i915(dev); 4949 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4950 int pipe = intel_crtc->pipe; 4951 4952 /* 4953 * Gen2 reports pipe underruns whenever all planes are disabled. 4954 * So diasble underrun reporting before all the planes get disabled. 4955 * FIXME: Need to fix the logic to work when we turn off all planes 4956 * but leave the pipe running. 4957 */ 4958 if (IS_GEN2(dev_priv)) 4959 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4960 4961 /* 4962 * FIXME IPS should be fine as long as one plane is 4963 * enabled, but in practice it seems to have problems 4964 * when going from primary only to sprite only and vice 4965 * versa. 4966 */ 4967 hsw_disable_ips(intel_crtc); 4968 } 4969 4970 /* FIXME get rid of this and use pre_plane_update */ 4971 static void 4972 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4973 { 4974 struct drm_device *dev = crtc->dev; 4975 struct drm_i915_private *dev_priv = to_i915(dev); 4976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4977 int pipe = intel_crtc->pipe; 4978 4979 intel_pre_disable_primary(crtc); 4980 4981 /* 4982 * Vblank time updates from the shadow to live plane control register 4983 * are blocked if the memory self-refresh mode is active at that 4984 * moment. So to make sure the plane gets truly disabled, disable 4985 * first the self-refresh mode. The self-refresh enable bit in turn 4986 * will be checked/applied by the HW only at the next frame start 4987 * event which is after the vblank start event, so we need to have a 4988 * wait-for-vblank between disabling the plane and the pipe. 4989 */ 4990 if (HAS_GMCH_DISPLAY(dev_priv) && 4991 intel_set_memory_cxsr(dev_priv, false)) 4992 intel_wait_for_vblank(dev_priv, pipe); 4993 } 4994 4995 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4996 { 4997 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4998 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4999 struct intel_crtc_state *pipe_config = 5000 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state), 5001 crtc); 5002 struct drm_plane *primary = crtc->base.primary; 5003 struct drm_plane_state *old_pri_state = 5004 drm_atomic_get_existing_plane_state(old_state, primary); 5005 5006 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 5007 5008 if (pipe_config->update_wm_post && pipe_config->base.active) 5009 intel_update_watermarks(crtc); 5010 5011 if (old_pri_state) { 5012 struct intel_plane_state *primary_state = 5013 intel_atomic_get_new_plane_state(to_intel_atomic_state(old_state), 5014 to_intel_plane(primary)); 5015 struct intel_plane_state *old_primary_state = 5016 to_intel_plane_state(old_pri_state); 5017 5018 intel_fbc_post_update(crtc); 5019 5020 if (primary_state->base.visible && 5021 (needs_modeset(&pipe_config->base) || 5022 !old_primary_state->base.visible)) 5023 intel_post_enable_primary(&crtc->base); 5024 } 5025 } 5026 5027 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5028 struct intel_crtc_state *pipe_config) 5029 { 5030 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5031 struct drm_device *dev = crtc->base.dev; 5032 struct drm_i915_private *dev_priv = to_i915(dev); 5033 struct drm_atomic_state *old_state = old_crtc_state->base.state; 5034 struct drm_plane *primary = crtc->base.primary; 5035 struct drm_plane_state *old_pri_state = 5036 drm_atomic_get_existing_plane_state(old_state, primary); 5037 bool modeset = needs_modeset(&pipe_config->base); 5038 struct intel_atomic_state *old_intel_state = 5039 to_intel_atomic_state(old_state); 5040 5041 if (old_pri_state) { 5042 struct intel_plane_state *primary_state = 5043 intel_atomic_get_new_plane_state(old_intel_state, 5044 to_intel_plane(primary)); 5045 struct intel_plane_state *old_primary_state = 5046 to_intel_plane_state(old_pri_state); 5047 5048 intel_fbc_pre_update(crtc, pipe_config, primary_state); 5049 5050 if (old_primary_state->base.visible && 5051 (modeset || !primary_state->base.visible)) 5052 intel_pre_disable_primary(&crtc->base); 5053 } 5054 5055 /* 5056 * Vblank time updates from the shadow to live plane control register 5057 * are blocked if the memory self-refresh mode is active at that 5058 * moment. So to make sure the plane gets truly disabled, disable 5059 * first the self-refresh mode. The self-refresh enable bit in turn 5060 * will be checked/applied by the HW only at the next frame start 5061 * event which is after the vblank start event, so we need to have a 5062 * wait-for-vblank between disabling the plane and the pipe. 5063 */ 5064 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active && 5065 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5066 intel_wait_for_vblank(dev_priv, crtc->pipe); 5067 5068 /* 5069 * IVB workaround: must disable low power watermarks for at least 5070 * one frame before enabling scaling. LP watermarks can be re-enabled 5071 * when scaling is disabled. 5072 * 5073 * WaCxSRDisabledForSpriteScaling:ivb 5074 */ 5075 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5076 intel_wait_for_vblank(dev_priv, crtc->pipe); 5077 5078 /* 5079 * If we're doing a modeset, we're done. No need to do any pre-vblank 5080 * watermark programming here. 5081 */ 5082 if (needs_modeset(&pipe_config->base)) 5083 return; 5084 5085 /* 5086 * For platforms that support atomic watermarks, program the 5087 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 5088 * will be the intermediate values that are safe for both pre- and 5089 * post- vblank; when vblank happens, the 'active' values will be set 5090 * to the final 'target' values and we'll do this again to get the 5091 * optimal watermarks. For gen9+ platforms, the values we program here 5092 * will be the final target values which will get automatically latched 5093 * at vblank time; no further programming will be necessary. 5094 * 5095 * If a platform hasn't been transitioned to atomic watermarks yet, 5096 * we'll continue to update watermarks the old way, if flags tell 5097 * us to. 5098 */ 5099 if (dev_priv->display.initial_watermarks != NULL) 5100 dev_priv->display.initial_watermarks(old_intel_state, 5101 pipe_config); 5102 else if (pipe_config->update_wm_pre) 5103 intel_update_watermarks(crtc); 5104 } 5105 5106 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5107 { 5108 struct drm_device *dev = crtc->dev; 5109 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5110 struct drm_plane *p; 5111 int pipe = intel_crtc->pipe; 5112 5113 intel_crtc_dpms_overlay_disable(intel_crtc); 5114 5115 drm_for_each_plane_mask(p, dev, plane_mask) 5116 to_intel_plane(p)->disable_plane(to_intel_plane(p), intel_crtc); 5117 5118 /* 5119 * FIXME: Once we grow proper nuclear flip support out of this we need 5120 * to compute the mask of flip planes precisely. For the time being 5121 * consider this a flip to a NULL plane. 5122 */ 5123 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); 5124 } 5125 5126 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5127 struct intel_crtc_state *crtc_state, 5128 struct drm_atomic_state *old_state) 5129 { 5130 struct drm_connector_state *conn_state; 5131 struct drm_connector *conn; 5132 int i; 5133 5134 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5135 struct intel_encoder *encoder = 5136 to_intel_encoder(conn_state->best_encoder); 5137 5138 if (conn_state->crtc != crtc) 5139 continue; 5140 5141 if (encoder->pre_pll_enable) 5142 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 5143 } 5144 } 5145 5146 static void intel_encoders_pre_enable(struct drm_crtc *crtc, 5147 struct intel_crtc_state *crtc_state, 5148 struct drm_atomic_state *old_state) 5149 { 5150 struct drm_connector_state *conn_state; 5151 struct drm_connector *conn; 5152 int i; 5153 5154 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5155 struct intel_encoder *encoder = 5156 to_intel_encoder(conn_state->best_encoder); 5157 5158 if (conn_state->crtc != crtc) 5159 continue; 5160 5161 if (encoder->pre_enable) 5162 encoder->pre_enable(encoder, crtc_state, conn_state); 5163 } 5164 } 5165 5166 static void intel_encoders_enable(struct drm_crtc *crtc, 5167 struct intel_crtc_state *crtc_state, 5168 struct drm_atomic_state *old_state) 5169 { 5170 struct drm_connector_state *conn_state; 5171 struct drm_connector *conn; 5172 int i; 5173 5174 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5175 struct intel_encoder *encoder = 5176 to_intel_encoder(conn_state->best_encoder); 5177 5178 if (conn_state->crtc != crtc) 5179 continue; 5180 5181 encoder->enable(encoder, crtc_state, conn_state); 5182 intel_opregion_notify_encoder(encoder, true); 5183 } 5184 } 5185 5186 static void intel_encoders_disable(struct drm_crtc *crtc, 5187 struct intel_crtc_state *old_crtc_state, 5188 struct drm_atomic_state *old_state) 5189 { 5190 struct drm_connector_state *old_conn_state; 5191 struct drm_connector *conn; 5192 int i; 5193 5194 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5195 struct intel_encoder *encoder = 5196 to_intel_encoder(old_conn_state->best_encoder); 5197 5198 if (old_conn_state->crtc != crtc) 5199 continue; 5200 5201 intel_opregion_notify_encoder(encoder, false); 5202 encoder->disable(encoder, old_crtc_state, old_conn_state); 5203 } 5204 } 5205 5206 static void intel_encoders_post_disable(struct drm_crtc *crtc, 5207 struct intel_crtc_state *old_crtc_state, 5208 struct drm_atomic_state *old_state) 5209 { 5210 struct drm_connector_state *old_conn_state; 5211 struct drm_connector *conn; 5212 int i; 5213 5214 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5215 struct intel_encoder *encoder = 5216 to_intel_encoder(old_conn_state->best_encoder); 5217 5218 if (old_conn_state->crtc != crtc) 5219 continue; 5220 5221 if (encoder->post_disable) 5222 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 5223 } 5224 } 5225 5226 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, 5227 struct intel_crtc_state *old_crtc_state, 5228 struct drm_atomic_state *old_state) 5229 { 5230 struct drm_connector_state *old_conn_state; 5231 struct drm_connector *conn; 5232 int i; 5233 5234 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5235 struct intel_encoder *encoder = 5236 to_intel_encoder(old_conn_state->best_encoder); 5237 5238 if (old_conn_state->crtc != crtc) 5239 continue; 5240 5241 if (encoder->post_pll_disable) 5242 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 5243 } 5244 } 5245 5246 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 5247 struct drm_atomic_state *old_state) 5248 { 5249 struct drm_crtc *crtc = pipe_config->base.crtc; 5250 struct drm_device *dev = crtc->dev; 5251 struct drm_i915_private *dev_priv = to_i915(dev); 5252 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5253 int pipe = intel_crtc->pipe; 5254 struct intel_atomic_state *old_intel_state = 5255 to_intel_atomic_state(old_state); 5256 5257 if (WARN_ON(intel_crtc->active)) 5258 return; 5259 5260 /* 5261 * Sometimes spurious CPU pipe underruns happen during FDI 5262 * training, at least with VGA+HDMI cloning. Suppress them. 5263 * 5264 * On ILK we get an occasional spurious CPU pipe underruns 5265 * between eDP port A enable and vdd enable. Also PCH port 5266 * enable seems to result in the occasional CPU pipe underrun. 5267 * 5268 * Spurious PCH underruns also occur during PCH enabling. 5269 */ 5270 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 5271 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5272 if (intel_crtc->config->has_pch_encoder) 5273 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5274 5275 if (intel_crtc->config->has_pch_encoder) 5276 intel_prepare_shared_dpll(intel_crtc); 5277 5278 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5279 intel_dp_set_m_n(intel_crtc, M1_N1); 5280 5281 intel_set_pipe_timings(intel_crtc); 5282 intel_set_pipe_src_size(intel_crtc); 5283 5284 if (intel_crtc->config->has_pch_encoder) { 5285 intel_cpu_transcoder_set_m_n(intel_crtc, 5286 &intel_crtc->config->fdi_m_n, NULL); 5287 } 5288 5289 ironlake_set_pipeconf(crtc); 5290 5291 intel_crtc->active = true; 5292 5293 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5294 5295 if (intel_crtc->config->has_pch_encoder) { 5296 /* Note: FDI PLL enabling _must_ be done before we enable the 5297 * cpu pipes, hence this is separate from all the other fdi/pch 5298 * enabling. */ 5299 ironlake_fdi_pll_enable(intel_crtc); 5300 } else { 5301 assert_fdi_tx_disabled(dev_priv, pipe); 5302 assert_fdi_rx_disabled(dev_priv, pipe); 5303 } 5304 5305 ironlake_pfit_enable(intel_crtc); 5306 5307 /* 5308 * On ILK+ LUT must be loaded before the pipe is running but with 5309 * clocks enabled 5310 */ 5311 intel_color_load_luts(&pipe_config->base); 5312 5313 if (dev_priv->display.initial_watermarks != NULL) 5314 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5315 intel_enable_pipe(intel_crtc); 5316 5317 if (intel_crtc->config->has_pch_encoder) 5318 ironlake_pch_enable(pipe_config); 5319 5320 assert_vblank_disabled(crtc); 5321 drm_crtc_vblank_on(crtc); 5322 5323 intel_encoders_enable(crtc, pipe_config, old_state); 5324 5325 if (HAS_PCH_CPT(dev_priv)) 5326 cpt_verify_modeset(dev, intel_crtc->pipe); 5327 5328 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 5329 if (intel_crtc->config->has_pch_encoder) 5330 intel_wait_for_vblank(dev_priv, pipe); 5331 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5332 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5333 } 5334 5335 /* IPS only exists on ULT machines and is tied to pipe A. */ 5336 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 5337 { 5338 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 5339 } 5340 5341 static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, 5342 enum i915_pipe pipe, bool apply) 5343 { 5344 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe)); 5345 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; 5346 5347 if (apply) 5348 val |= mask; 5349 else 5350 val &= ~mask; 5351 5352 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 5353 } 5354 5355 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5356 struct drm_atomic_state *old_state) 5357 { 5358 struct drm_crtc *crtc = pipe_config->base.crtc; 5359 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5360 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5361 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5362 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5363 struct intel_atomic_state *old_intel_state = 5364 to_intel_atomic_state(old_state); 5365 bool psl_clkgate_wa; 5366 5367 if (WARN_ON(intel_crtc->active)) 5368 return; 5369 5370 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5371 5372 if (intel_crtc->config->shared_dpll) 5373 intel_enable_shared_dpll(intel_crtc); 5374 5375 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5376 intel_dp_set_m_n(intel_crtc, M1_N1); 5377 5378 if (!transcoder_is_dsi(cpu_transcoder)) 5379 intel_set_pipe_timings(intel_crtc); 5380 5381 intel_set_pipe_src_size(intel_crtc); 5382 5383 if (cpu_transcoder != TRANSCODER_EDP && 5384 !transcoder_is_dsi(cpu_transcoder)) { 5385 I915_WRITE(PIPE_MULT(cpu_transcoder), 5386 intel_crtc->config->pixel_multiplier - 1); 5387 } 5388 5389 if (intel_crtc->config->has_pch_encoder) { 5390 intel_cpu_transcoder_set_m_n(intel_crtc, 5391 &intel_crtc->config->fdi_m_n, NULL); 5392 } 5393 5394 if (!transcoder_is_dsi(cpu_transcoder)) 5395 haswell_set_pipeconf(crtc); 5396 5397 haswell_set_pipemisc(crtc); 5398 5399 intel_color_set_csc(&pipe_config->base); 5400 5401 intel_crtc->active = true; 5402 5403 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5404 5405 if (!transcoder_is_dsi(cpu_transcoder)) 5406 intel_ddi_enable_pipe_clock(pipe_config); 5407 5408 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */ 5409 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) && 5410 intel_crtc->config->pch_pfit.enabled; 5411 if (psl_clkgate_wa) 5412 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); 5413 5414 if (INTEL_GEN(dev_priv) >= 9) 5415 skylake_pfit_enable(intel_crtc); 5416 else 5417 ironlake_pfit_enable(intel_crtc); 5418 5419 /* 5420 * On ILK+ LUT must be loaded before the pipe is running but with 5421 * clocks enabled 5422 */ 5423 intel_color_load_luts(&pipe_config->base); 5424 5425 intel_ddi_set_pipe_settings(pipe_config); 5426 if (!transcoder_is_dsi(cpu_transcoder)) 5427 intel_ddi_enable_transcoder_func(pipe_config); 5428 5429 if (dev_priv->display.initial_watermarks != NULL) 5430 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5431 5432 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5433 if (!transcoder_is_dsi(cpu_transcoder)) 5434 intel_enable_pipe(intel_crtc); 5435 5436 if (intel_crtc->config->has_pch_encoder) 5437 lpt_pch_enable(pipe_config); 5438 5439 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5440 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5441 5442 assert_vblank_disabled(crtc); 5443 drm_crtc_vblank_on(crtc); 5444 5445 intel_encoders_enable(crtc, pipe_config, old_state); 5446 5447 if (psl_clkgate_wa) { 5448 intel_wait_for_vblank(dev_priv, pipe); 5449 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); 5450 } 5451 5452 /* If we change the relative order between pipe/planes enabling, we need 5453 * to change the workaround. */ 5454 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5455 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 5456 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5457 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5458 } 5459 } 5460 5461 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5462 { 5463 struct drm_device *dev = crtc->base.dev; 5464 struct drm_i915_private *dev_priv = to_i915(dev); 5465 int pipe = crtc->pipe; 5466 5467 /* To avoid upsetting the power well on haswell only disable the pfit if 5468 * it's in use. The hw state code will make sure we get this right. */ 5469 if (force || crtc->config->pch_pfit.enabled) { 5470 I915_WRITE(PF_CTL(pipe), 0); 5471 I915_WRITE(PF_WIN_POS(pipe), 0); 5472 I915_WRITE(PF_WIN_SZ(pipe), 0); 5473 } 5474 } 5475 5476 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 5477 struct drm_atomic_state *old_state) 5478 { 5479 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5480 struct drm_device *dev = crtc->dev; 5481 struct drm_i915_private *dev_priv = to_i915(dev); 5482 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5483 int pipe = intel_crtc->pipe; 5484 5485 /* 5486 * Sometimes spurious CPU pipe underruns happen when the 5487 * pipe is already disabled, but FDI RX/TX is still enabled. 5488 * Happens at least with VGA+HDMI cloning. Suppress them. 5489 */ 5490 if (intel_crtc->config->has_pch_encoder) { 5491 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5492 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5493 } 5494 5495 intel_encoders_disable(crtc, old_crtc_state, old_state); 5496 5497 drm_crtc_vblank_off(crtc); 5498 assert_vblank_disabled(crtc); 5499 5500 intel_disable_pipe(intel_crtc); 5501 5502 ironlake_pfit_disable(intel_crtc, false); 5503 5504 if (intel_crtc->config->has_pch_encoder) 5505 ironlake_fdi_disable(crtc); 5506 5507 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5508 5509 if (intel_crtc->config->has_pch_encoder) { 5510 ironlake_disable_pch_transcoder(dev_priv, pipe); 5511 5512 if (HAS_PCH_CPT(dev_priv)) { 5513 i915_reg_t reg; 5514 u32 temp; 5515 5516 /* disable TRANS_DP_CTL */ 5517 reg = TRANS_DP_CTL(pipe); 5518 temp = I915_READ(reg); 5519 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5520 TRANS_DP_PORT_SEL_MASK); 5521 temp |= TRANS_DP_PORT_SEL_NONE; 5522 I915_WRITE(reg, temp); 5523 5524 /* disable DPLL_SEL */ 5525 temp = I915_READ(PCH_DPLL_SEL); 5526 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5527 I915_WRITE(PCH_DPLL_SEL, temp); 5528 } 5529 5530 ironlake_fdi_pll_disable(intel_crtc); 5531 } 5532 5533 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5534 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5535 } 5536 5537 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 5538 struct drm_atomic_state *old_state) 5539 { 5540 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5541 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5542 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5543 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5544 5545 intel_encoders_disable(crtc, old_crtc_state, old_state); 5546 5547 drm_crtc_vblank_off(crtc); 5548 assert_vblank_disabled(crtc); 5549 5550 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5551 if (!transcoder_is_dsi(cpu_transcoder)) 5552 intel_disable_pipe(intel_crtc); 5553 5554 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5555 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5556 5557 if (!transcoder_is_dsi(cpu_transcoder)) 5558 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5559 5560 if (INTEL_GEN(dev_priv) >= 9) 5561 skylake_scaler_disable(intel_crtc); 5562 else 5563 ironlake_pfit_disable(intel_crtc, false); 5564 5565 if (!transcoder_is_dsi(cpu_transcoder)) 5566 intel_ddi_disable_pipe_clock(intel_crtc->config); 5567 5568 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5569 } 5570 5571 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5572 { 5573 struct drm_device *dev = crtc->base.dev; 5574 struct drm_i915_private *dev_priv = to_i915(dev); 5575 struct intel_crtc_state *pipe_config = crtc->config; 5576 5577 if (!pipe_config->gmch_pfit.control) 5578 return; 5579 5580 /* 5581 * The panel fitter should only be adjusted whilst the pipe is disabled, 5582 * according to register description and PRM. 5583 */ 5584 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5585 assert_pipe_disabled(dev_priv, crtc->pipe); 5586 5587 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5588 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5589 5590 /* Border color in case we don't scale up to the full screen. Black by 5591 * default, change to something else for debugging. */ 5592 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5593 } 5594 5595 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 5596 { 5597 switch (port) { 5598 case PORT_A: 5599 return POWER_DOMAIN_PORT_DDI_A_LANES; 5600 case PORT_B: 5601 return POWER_DOMAIN_PORT_DDI_B_LANES; 5602 case PORT_C: 5603 return POWER_DOMAIN_PORT_DDI_C_LANES; 5604 case PORT_D: 5605 return POWER_DOMAIN_PORT_DDI_D_LANES; 5606 case PORT_E: 5607 return POWER_DOMAIN_PORT_DDI_E_LANES; 5608 default: 5609 MISSING_CASE(port); 5610 return POWER_DOMAIN_PORT_OTHER; 5611 } 5612 } 5613 5614 static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5615 struct intel_crtc_state *crtc_state) 5616 { 5617 struct drm_device *dev = crtc->dev; 5618 struct drm_i915_private *dev_priv = to_i915(dev); 5619 struct drm_encoder *encoder; 5620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5621 enum i915_pipe pipe = intel_crtc->pipe; 5622 u64 mask; 5623 enum transcoder transcoder = crtc_state->cpu_transcoder; 5624 5625 if (!crtc_state->base.active) 5626 return 0; 5627 5628 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5629 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5630 if (crtc_state->pch_pfit.enabled || 5631 crtc_state->pch_pfit.force_thru) 5632 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5633 5634 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5635 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5636 5637 mask |= BIT_ULL(intel_encoder->power_domain); 5638 } 5639 5640 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5641 mask |= BIT(POWER_DOMAIN_AUDIO); 5642 5643 if (crtc_state->shared_dpll) 5644 mask |= BIT_ULL(POWER_DOMAIN_PLLS); 5645 5646 return mask; 5647 } 5648 5649 static u64 5650 modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5651 struct intel_crtc_state *crtc_state) 5652 { 5653 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5655 enum intel_display_power_domain domain; 5656 u64 domains, new_domains, old_domains; 5657 5658 old_domains = intel_crtc->enabled_power_domains; 5659 intel_crtc->enabled_power_domains = new_domains = 5660 get_crtc_power_domains(crtc, crtc_state); 5661 5662 domains = new_domains & ~old_domains; 5663 5664 for_each_power_domain(domain, domains) 5665 intel_display_power_get(dev_priv, domain); 5666 5667 return old_domains & ~new_domains; 5668 } 5669 5670 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5671 u64 domains) 5672 { 5673 enum intel_display_power_domain domain; 5674 5675 for_each_power_domain(domain, domains) 5676 intel_display_power_put(dev_priv, domain); 5677 } 5678 5679 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 5680 struct drm_atomic_state *old_state) 5681 { 5682 struct intel_atomic_state *old_intel_state = 5683 to_intel_atomic_state(old_state); 5684 struct drm_crtc *crtc = pipe_config->base.crtc; 5685 struct drm_device *dev = crtc->dev; 5686 struct drm_i915_private *dev_priv = to_i915(dev); 5687 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5688 int pipe = intel_crtc->pipe; 5689 5690 if (WARN_ON(intel_crtc->active)) 5691 return; 5692 5693 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5694 intel_dp_set_m_n(intel_crtc, M1_N1); 5695 5696 intel_set_pipe_timings(intel_crtc); 5697 intel_set_pipe_src_size(intel_crtc); 5698 5699 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 5700 struct drm_i915_private *dev_priv = to_i915(dev); 5701 5702 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5703 I915_WRITE(CHV_CANVAS(pipe), 0); 5704 } 5705 5706 i9xx_set_pipeconf(intel_crtc); 5707 5708 intel_crtc->active = true; 5709 5710 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5711 5712 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5713 5714 if (IS_CHERRYVIEW(dev_priv)) { 5715 chv_prepare_pll(intel_crtc, intel_crtc->config); 5716 chv_enable_pll(intel_crtc, intel_crtc->config); 5717 } else { 5718 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5719 vlv_enable_pll(intel_crtc, intel_crtc->config); 5720 } 5721 5722 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5723 5724 i9xx_pfit_enable(intel_crtc); 5725 5726 intel_color_load_luts(&pipe_config->base); 5727 5728 dev_priv->display.initial_watermarks(old_intel_state, 5729 pipe_config); 5730 intel_enable_pipe(intel_crtc); 5731 5732 assert_vblank_disabled(crtc); 5733 drm_crtc_vblank_on(crtc); 5734 5735 intel_encoders_enable(crtc, pipe_config, old_state); 5736 } 5737 5738 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5739 { 5740 struct drm_device *dev = crtc->base.dev; 5741 struct drm_i915_private *dev_priv = to_i915(dev); 5742 5743 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5744 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5745 } 5746 5747 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 5748 struct drm_atomic_state *old_state) 5749 { 5750 struct intel_atomic_state *old_intel_state = 5751 to_intel_atomic_state(old_state); 5752 struct drm_crtc *crtc = pipe_config->base.crtc; 5753 struct drm_device *dev = crtc->dev; 5754 struct drm_i915_private *dev_priv = to_i915(dev); 5755 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5756 enum i915_pipe pipe = intel_crtc->pipe; 5757 5758 if (WARN_ON(intel_crtc->active)) 5759 return; 5760 5761 i9xx_set_pll_dividers(intel_crtc); 5762 5763 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5764 intel_dp_set_m_n(intel_crtc, M1_N1); 5765 5766 intel_set_pipe_timings(intel_crtc); 5767 intel_set_pipe_src_size(intel_crtc); 5768 5769 i9xx_set_pipeconf(intel_crtc); 5770 5771 intel_crtc->active = true; 5772 5773 if (!IS_GEN2(dev_priv)) 5774 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5775 5776 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5777 5778 i9xx_enable_pll(intel_crtc, pipe_config); 5779 5780 i9xx_pfit_enable(intel_crtc); 5781 5782 intel_color_load_luts(&pipe_config->base); 5783 5784 if (dev_priv->display.initial_watermarks != NULL) 5785 dev_priv->display.initial_watermarks(old_intel_state, 5786 intel_crtc->config); 5787 else 5788 intel_update_watermarks(intel_crtc); 5789 intel_enable_pipe(intel_crtc); 5790 5791 assert_vblank_disabled(crtc); 5792 drm_crtc_vblank_on(crtc); 5793 5794 intel_encoders_enable(crtc, pipe_config, old_state); 5795 } 5796 5797 static void i9xx_pfit_disable(struct intel_crtc *crtc) 5798 { 5799 struct drm_device *dev = crtc->base.dev; 5800 struct drm_i915_private *dev_priv = to_i915(dev); 5801 5802 if (!crtc->config->gmch_pfit.control) 5803 return; 5804 5805 assert_pipe_disabled(dev_priv, crtc->pipe); 5806 5807 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5808 I915_READ(PFIT_CONTROL)); 5809 I915_WRITE(PFIT_CONTROL, 0); 5810 } 5811 5812 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 5813 struct drm_atomic_state *old_state) 5814 { 5815 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5816 struct drm_device *dev = crtc->dev; 5817 struct drm_i915_private *dev_priv = to_i915(dev); 5818 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5819 int pipe = intel_crtc->pipe; 5820 5821 /* 5822 * On gen2 planes are double buffered but the pipe isn't, so we must 5823 * wait for planes to fully turn off before disabling the pipe. 5824 */ 5825 if (IS_GEN2(dev_priv)) 5826 intel_wait_for_vblank(dev_priv, pipe); 5827 5828 intel_encoders_disable(crtc, old_crtc_state, old_state); 5829 5830 drm_crtc_vblank_off(crtc); 5831 assert_vblank_disabled(crtc); 5832 5833 intel_disable_pipe(intel_crtc); 5834 5835 i9xx_pfit_disable(intel_crtc); 5836 5837 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5838 5839 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 5840 if (IS_CHERRYVIEW(dev_priv)) 5841 chv_disable_pll(dev_priv, pipe); 5842 else if (IS_VALLEYVIEW(dev_priv)) 5843 vlv_disable_pll(dev_priv, pipe); 5844 else 5845 i9xx_disable_pll(intel_crtc); 5846 } 5847 5848 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 5849 5850 if (!IS_GEN2(dev_priv)) 5851 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5852 5853 if (!dev_priv->display.initial_watermarks) 5854 intel_update_watermarks(intel_crtc); 5855 5856 /* clock the pipe down to 640x480@60 to potentially save power */ 5857 if (IS_I830(dev_priv)) 5858 i830_enable_pipe(dev_priv, pipe); 5859 } 5860 5861 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 5862 struct drm_modeset_acquire_ctx *ctx) 5863 { 5864 struct intel_encoder *encoder; 5865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5866 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5867 enum intel_display_power_domain domain; 5868 struct intel_plane *plane; 5869 u64 domains; 5870 struct drm_atomic_state *state; 5871 struct intel_crtc_state *crtc_state; 5872 int ret; 5873 5874 if (!intel_crtc->active) 5875 return; 5876 5877 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) { 5878 const struct intel_plane_state *plane_state = 5879 to_intel_plane_state(plane->base.state); 5880 5881 if (plane_state->base.visible) 5882 intel_plane_disable_noatomic(intel_crtc, plane); 5883 } 5884 5885 state = drm_atomic_state_alloc(crtc->dev); 5886 if (!state) { 5887 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 5888 crtc->base.id, crtc->name); 5889 return; 5890 } 5891 5892 state->acquire_ctx = ctx; 5893 5894 /* Everything's already locked, -EDEADLK can't happen. */ 5895 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5896 ret = drm_atomic_add_affected_connectors(state, crtc); 5897 5898 WARN_ON(IS_ERR(crtc_state) || ret); 5899 5900 dev_priv->display.crtc_disable(crtc_state, state); 5901 5902 drm_atomic_state_put(state); 5903 5904 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 5905 crtc->base.id, crtc->name); 5906 5907 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 5908 crtc->state->active = false; 5909 intel_crtc->active = false; 5910 crtc->enabled = false; 5911 crtc->state->connector_mask = 0; 5912 crtc->state->encoder_mask = 0; 5913 5914 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 5915 encoder->base.crtc = NULL; 5916 5917 intel_fbc_disable(intel_crtc); 5918 intel_update_watermarks(intel_crtc); 5919 intel_disable_shared_dpll(intel_crtc); 5920 5921 domains = intel_crtc->enabled_power_domains; 5922 for_each_power_domain(domain, domains) 5923 intel_display_power_put(dev_priv, domain); 5924 intel_crtc->enabled_power_domains = 0; 5925 5926 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 5927 dev_priv->min_cdclk[intel_crtc->pipe] = 0; 5928 } 5929 5930 /* 5931 * turn all crtc's off, but do not adjust state 5932 * This has to be paired with a call to intel_modeset_setup_hw_state. 5933 */ 5934 int intel_display_suspend(struct drm_device *dev) 5935 { 5936 struct drm_i915_private *dev_priv = to_i915(dev); 5937 struct drm_atomic_state *state; 5938 int ret; 5939 5940 state = drm_atomic_helper_suspend(dev); 5941 ret = PTR_ERR_OR_ZERO(state); 5942 if (ret) 5943 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 5944 else 5945 dev_priv->modeset_restore_state = state; 5946 return ret; 5947 } 5948 5949 void intel_encoder_destroy(struct drm_encoder *encoder) 5950 { 5951 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5952 5953 drm_encoder_cleanup(encoder); 5954 kfree(intel_encoder); 5955 } 5956 5957 /* Cross check the actual hw state with our own modeset state tracking (and it's 5958 * internal consistency). */ 5959 static void intel_connector_verify_state(struct drm_crtc_state *crtc_state, 5960 struct drm_connector_state *conn_state) 5961 { 5962 struct intel_connector *connector = to_intel_connector(conn_state->connector); 5963 5964 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5965 connector->base.base.id, 5966 connector->base.name); 5967 5968 if (connector->get_hw_state(connector)) { 5969 struct intel_encoder *encoder = connector->encoder; 5970 5971 I915_STATE_WARN(!crtc_state, 5972 "connector enabled without attached crtc\n"); 5973 5974 if (!crtc_state) 5975 return; 5976 5977 I915_STATE_WARN(!crtc_state->active, 5978 "connector is active, but attached crtc isn't\n"); 5979 5980 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 5981 return; 5982 5983 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 5984 "atomic encoder doesn't match attached encoder\n"); 5985 5986 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 5987 "attached encoder crtc differs from connector crtc\n"); 5988 } else { 5989 I915_STATE_WARN(crtc_state && crtc_state->active, 5990 "attached crtc is active, but connector isn't\n"); 5991 I915_STATE_WARN(!crtc_state && conn_state->best_encoder, 5992 "best encoder set without crtc!\n"); 5993 } 5994 } 5995 5996 int intel_connector_init(struct intel_connector *connector) 5997 { 5998 struct intel_digital_connector_state *conn_state; 5999 6000 /* 6001 * Allocate enough memory to hold intel_digital_connector_state, 6002 * This might be a few bytes too many, but for connectors that don't 6003 * need it we'll free the state and allocate a smaller one on the first 6004 * succesful commit anyway. 6005 */ 6006 conn_state = kzalloc(sizeof(*conn_state), GFP_KERNEL); 6007 if (!conn_state) 6008 return -ENOMEM; 6009 6010 __drm_atomic_helper_connector_reset(&connector->base, 6011 &conn_state->base); 6012 6013 return 0; 6014 } 6015 6016 struct intel_connector *intel_connector_alloc(void) 6017 { 6018 struct intel_connector *connector; 6019 6020 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6021 if (!connector) 6022 return NULL; 6023 6024 if (intel_connector_init(connector) < 0) { 6025 kfree(connector); 6026 return NULL; 6027 } 6028 6029 return connector; 6030 } 6031 6032 /* 6033 * Free the bits allocated by intel_connector_alloc. 6034 * This should only be used after intel_connector_alloc has returned 6035 * successfully, and before drm_connector_init returns successfully. 6036 * Otherwise the destroy callbacks for the connector and the state should 6037 * take care of proper cleanup/free 6038 */ 6039 void intel_connector_free(struct intel_connector *connector) 6040 { 6041 kfree(to_intel_digital_connector_state(connector->base.state)); 6042 kfree(connector); 6043 } 6044 6045 /* Simple connector->get_hw_state implementation for encoders that support only 6046 * one connector and no cloning and hence the encoder state determines the state 6047 * of the connector. */ 6048 bool intel_connector_get_hw_state(struct intel_connector *connector) 6049 { 6050 enum i915_pipe pipe = 0; 6051 struct intel_encoder *encoder = connector->encoder; 6052 6053 return encoder->get_hw_state(encoder, &pipe); 6054 } 6055 6056 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6057 { 6058 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6059 return crtc_state->fdi_lanes; 6060 6061 return 0; 6062 } 6063 6064 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6065 struct intel_crtc_state *pipe_config) 6066 { 6067 struct drm_i915_private *dev_priv = to_i915(dev); 6068 struct drm_atomic_state *state = pipe_config->base.state; 6069 struct intel_crtc *other_crtc; 6070 struct intel_crtc_state *other_crtc_state; 6071 6072 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6073 pipe_name(pipe), pipe_config->fdi_lanes); 6074 if (pipe_config->fdi_lanes > 4) { 6075 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6076 pipe_name(pipe), pipe_config->fdi_lanes); 6077 return -EINVAL; 6078 } 6079 6080 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 6081 if (pipe_config->fdi_lanes > 2) { 6082 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6083 pipe_config->fdi_lanes); 6084 return -EINVAL; 6085 } else { 6086 return 0; 6087 } 6088 } 6089 6090 if (INTEL_INFO(dev_priv)->num_pipes == 2) 6091 return 0; 6092 6093 /* Ivybridge 3 pipe is really complicated */ 6094 switch (pipe) { 6095 case PIPE_A: 6096 return 0; 6097 case PIPE_B: 6098 if (pipe_config->fdi_lanes <= 2) 6099 return 0; 6100 6101 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 6102 other_crtc_state = 6103 intel_atomic_get_crtc_state(state, other_crtc); 6104 if (IS_ERR(other_crtc_state)) 6105 return PTR_ERR(other_crtc_state); 6106 6107 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6108 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6109 pipe_name(pipe), pipe_config->fdi_lanes); 6110 return -EINVAL; 6111 } 6112 return 0; 6113 case PIPE_C: 6114 if (pipe_config->fdi_lanes > 2) { 6115 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6116 pipe_name(pipe), pipe_config->fdi_lanes); 6117 return -EINVAL; 6118 } 6119 6120 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 6121 other_crtc_state = 6122 intel_atomic_get_crtc_state(state, other_crtc); 6123 if (IS_ERR(other_crtc_state)) 6124 return PTR_ERR(other_crtc_state); 6125 6126 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6127 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6128 return -EINVAL; 6129 } 6130 return 0; 6131 default: 6132 BUG(); 6133 } 6134 } 6135 6136 #define RETRY 1 6137 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6138 struct intel_crtc_state *pipe_config) 6139 { 6140 struct drm_device *dev = intel_crtc->base.dev; 6141 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6142 int lane, link_bw, fdi_dotclock, ret; 6143 bool needs_recompute = false; 6144 6145 retry: 6146 /* FDI is a binary signal running at ~2.7GHz, encoding 6147 * each output octet as 10 bits. The actual frequency 6148 * is stored as a divider into a 100MHz clock, and the 6149 * mode pixel clock is stored in units of 1KHz. 6150 * Hence the bw of each lane in terms of the mode signal 6151 * is: 6152 */ 6153 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6154 6155 fdi_dotclock = adjusted_mode->crtc_clock; 6156 6157 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6158 pipe_config->pipe_bpp); 6159 6160 pipe_config->fdi_lanes = lane; 6161 6162 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6163 link_bw, &pipe_config->fdi_m_n, false); 6164 6165 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6166 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6167 pipe_config->pipe_bpp -= 2*3; 6168 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6169 pipe_config->pipe_bpp); 6170 needs_recompute = true; 6171 pipe_config->bw_constrained = true; 6172 6173 goto retry; 6174 } 6175 6176 if (needs_recompute) 6177 return RETRY; 6178 6179 return ret; 6180 } 6181 6182 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6183 struct intel_crtc_state *pipe_config) 6184 { 6185 if (pipe_config->ips_force_disable) 6186 return false; 6187 6188 if (pipe_config->pipe_bpp > 24) 6189 return false; 6190 6191 /* HSW can handle pixel rate up to cdclk? */ 6192 if (IS_HASWELL(dev_priv)) 6193 return true; 6194 6195 /* 6196 * We compare against max which means we must take 6197 * the increased cdclk requirement into account when 6198 * calculating the new cdclk. 6199 * 6200 * Should measure whether using a lower cdclk w/o IPS 6201 */ 6202 return pipe_config->pixel_rate <= 6203 dev_priv->max_cdclk_freq * 95 / 100; 6204 } 6205 6206 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6207 struct intel_crtc_state *pipe_config) 6208 { 6209 struct drm_device *dev = crtc->base.dev; 6210 struct drm_i915_private *dev_priv = to_i915(dev); 6211 6212 pipe_config->ips_enabled = i915_modparams.enable_ips && 6213 hsw_crtc_supports_ips(crtc) && 6214 pipe_config_supports_ips(dev_priv, pipe_config); 6215 } 6216 6217 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6218 { 6219 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6220 6221 /* GDG double wide on either pipe, otherwise pipe A only */ 6222 return INTEL_INFO(dev_priv)->gen < 4 && 6223 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6224 } 6225 6226 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 6227 { 6228 uint32_t pixel_rate; 6229 6230 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 6231 6232 /* 6233 * We only use IF-ID interlacing. If we ever use 6234 * PF-ID we'll need to adjust the pixel_rate here. 6235 */ 6236 6237 if (pipe_config->pch_pfit.enabled) { 6238 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 6239 uint32_t pfit_size = pipe_config->pch_pfit.size; 6240 6241 pipe_w = pipe_config->pipe_src_w; 6242 pipe_h = pipe_config->pipe_src_h; 6243 6244 pfit_w = (pfit_size >> 16) & 0xFFFF; 6245 pfit_h = pfit_size & 0xFFFF; 6246 if (pipe_w < pfit_w) 6247 pipe_w = pfit_w; 6248 if (pipe_h < pfit_h) 6249 pipe_h = pfit_h; 6250 6251 if (WARN_ON(!pfit_w || !pfit_h)) 6252 return pixel_rate; 6253 6254 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 6255 pfit_w * pfit_h); 6256 } 6257 6258 return pixel_rate; 6259 } 6260 6261 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 6262 { 6263 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 6264 6265 if (HAS_GMCH_DISPLAY(dev_priv)) 6266 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 6267 crtc_state->pixel_rate = 6268 crtc_state->base.adjusted_mode.crtc_clock; 6269 else 6270 crtc_state->pixel_rate = 6271 ilk_pipe_pixel_rate(crtc_state); 6272 } 6273 6274 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6275 struct intel_crtc_state *pipe_config) 6276 { 6277 struct drm_device *dev = crtc->base.dev; 6278 struct drm_i915_private *dev_priv = to_i915(dev); 6279 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6280 int clock_limit = dev_priv->max_dotclk_freq; 6281 6282 if (INTEL_GEN(dev_priv) < 4) { 6283 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6284 6285 /* 6286 * Enable double wide mode when the dot clock 6287 * is > 90% of the (display) core speed. 6288 */ 6289 if (intel_crtc_supports_double_wide(crtc) && 6290 adjusted_mode->crtc_clock > clock_limit) { 6291 clock_limit = dev_priv->max_dotclk_freq; 6292 pipe_config->double_wide = true; 6293 } 6294 } 6295 6296 if (adjusted_mode->crtc_clock > clock_limit) { 6297 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6298 adjusted_mode->crtc_clock, clock_limit, 6299 yesno(pipe_config->double_wide)); 6300 return -EINVAL; 6301 } 6302 6303 if (pipe_config->ycbcr420 && pipe_config->base.ctm) { 6304 /* 6305 * There is only one pipe CSC unit per pipe, and we need that 6306 * for output conversion from RGB->YCBCR. So if CTM is already 6307 * applied we can't support YCBCR420 output. 6308 */ 6309 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n"); 6310 return -EINVAL; 6311 } 6312 6313 /* 6314 * Pipe horizontal size must be even in: 6315 * - DVO ganged mode 6316 * - LVDS dual channel mode 6317 * - Double wide pipe 6318 */ 6319 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6320 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6321 pipe_config->pipe_src_w &= ~1; 6322 6323 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6324 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6325 */ 6326 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 6327 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6328 return -EINVAL; 6329 6330 intel_crtc_compute_pixel_rate(pipe_config); 6331 6332 if (HAS_IPS(dev_priv)) 6333 hsw_compute_ips_config(crtc, pipe_config); 6334 6335 if (pipe_config->has_pch_encoder) 6336 return ironlake_fdi_compute_config(crtc, pipe_config); 6337 6338 return 0; 6339 } 6340 6341 static void 6342 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 6343 { 6344 while (*num > DATA_LINK_M_N_MASK || 6345 *den > DATA_LINK_M_N_MASK) { 6346 *num >>= 1; 6347 *den >>= 1; 6348 } 6349 } 6350 6351 static void compute_m_n(unsigned int m, unsigned int n, 6352 uint32_t *ret_m, uint32_t *ret_n, 6353 bool reduce_m_n) 6354 { 6355 /* 6356 * Reduce M/N as much as possible without loss in precision. Several DP 6357 * dongles in particular seem to be fussy about too large *link* M/N 6358 * values. The passed in values are more likely to have the least 6359 * significant bits zero than M after rounding below, so do this first. 6360 */ 6361 if (reduce_m_n) { 6362 while ((m & 1) == 0 && (n & 1) == 0) { 6363 m >>= 1; 6364 n >>= 1; 6365 } 6366 } 6367 6368 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6369 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6370 intel_reduce_m_n_ratio(ret_m, ret_n); 6371 } 6372 6373 void 6374 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6375 int pixel_clock, int link_clock, 6376 struct intel_link_m_n *m_n, 6377 bool reduce_m_n) 6378 { 6379 m_n->tu = 64; 6380 6381 compute_m_n(bits_per_pixel * pixel_clock, 6382 link_clock * nlanes * 8, 6383 &m_n->gmch_m, &m_n->gmch_n, 6384 reduce_m_n); 6385 6386 compute_m_n(pixel_clock, link_clock, 6387 &m_n->link_m, &m_n->link_n, 6388 reduce_m_n); 6389 } 6390 6391 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6392 { 6393 if (i915_modparams.panel_use_ssc >= 0) 6394 return i915_modparams.panel_use_ssc != 0; 6395 return dev_priv->vbt.lvds_use_ssc 6396 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6397 } 6398 6399 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6400 { 6401 return (1 << dpll->n) << 16 | dpll->m2; 6402 } 6403 6404 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6405 { 6406 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6407 } 6408 6409 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6410 struct intel_crtc_state *crtc_state, 6411 struct dpll *reduced_clock) 6412 { 6413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6414 u32 fp, fp2 = 0; 6415 6416 if (IS_PINEVIEW(dev_priv)) { 6417 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6418 if (reduced_clock) 6419 fp2 = pnv_dpll_compute_fp(reduced_clock); 6420 } else { 6421 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6422 if (reduced_clock) 6423 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6424 } 6425 6426 crtc_state->dpll_hw_state.fp0 = fp; 6427 6428 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6429 reduced_clock) { 6430 crtc_state->dpll_hw_state.fp1 = fp2; 6431 } else { 6432 crtc_state->dpll_hw_state.fp1 = fp; 6433 } 6434 } 6435 6436 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 6437 pipe) 6438 { 6439 u32 reg_val; 6440 6441 /* 6442 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6443 * and set it to a reasonable value instead. 6444 */ 6445 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6446 reg_val &= 0xffffff00; 6447 reg_val |= 0x00000030; 6448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6449 6450 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6451 reg_val &= 0x00ffffff; 6452 reg_val |= 0x8c000000; 6453 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6454 6455 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6456 reg_val &= 0xffffff00; 6457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6458 6459 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6460 reg_val &= 0x00ffffff; 6461 reg_val |= 0xb0000000; 6462 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6463 } 6464 6465 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6466 struct intel_link_m_n *m_n) 6467 { 6468 struct drm_device *dev = crtc->base.dev; 6469 struct drm_i915_private *dev_priv = to_i915(dev); 6470 int pipe = crtc->pipe; 6471 6472 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6473 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6474 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6475 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6476 } 6477 6478 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6479 struct intel_link_m_n *m_n, 6480 struct intel_link_m_n *m2_n2) 6481 { 6482 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6483 int pipe = crtc->pipe; 6484 enum transcoder transcoder = crtc->config->cpu_transcoder; 6485 6486 if (INTEL_GEN(dev_priv) >= 5) { 6487 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6488 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6489 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6490 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6491 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6492 * for gen < 8) and if DRRS is supported (to make sure the 6493 * registers are not unnecessarily accessed). 6494 */ 6495 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6496 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6497 I915_WRITE(PIPE_DATA_M2(transcoder), 6498 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6499 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6500 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 6501 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 6502 } 6503 } else { 6504 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6505 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6506 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 6507 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 6508 } 6509 } 6510 6511 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6512 { 6513 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6514 6515 if (m_n == M1_N1) { 6516 dp_m_n = &crtc->config->dp_m_n; 6517 dp_m2_n2 = &crtc->config->dp_m2_n2; 6518 } else if (m_n == M2_N2) { 6519 6520 /* 6521 * M2_N2 registers are not supported. Hence m2_n2 divider value 6522 * needs to be programmed into M1_N1. 6523 */ 6524 dp_m_n = &crtc->config->dp_m2_n2; 6525 } else { 6526 DRM_ERROR("Unsupported divider value\n"); 6527 return; 6528 } 6529 6530 if (crtc->config->has_pch_encoder) 6531 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6532 else 6533 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6534 } 6535 6536 static void vlv_compute_dpll(struct intel_crtc *crtc, 6537 struct intel_crtc_state *pipe_config) 6538 { 6539 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 6540 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6541 if (crtc->pipe != PIPE_A) 6542 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6543 6544 /* DPLL not used with DSI, but still need the rest set up */ 6545 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6546 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 6547 DPLL_EXT_BUFFER_ENABLE_VLV; 6548 6549 pipe_config->dpll_hw_state.dpll_md = 6550 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6551 } 6552 6553 static void chv_compute_dpll(struct intel_crtc *crtc, 6554 struct intel_crtc_state *pipe_config) 6555 { 6556 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 6557 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6558 if (crtc->pipe != PIPE_A) 6559 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6560 6561 /* DPLL not used with DSI, but still need the rest set up */ 6562 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6563 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 6564 6565 pipe_config->dpll_hw_state.dpll_md = 6566 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6567 } 6568 6569 static void vlv_prepare_pll(struct intel_crtc *crtc, 6570 const struct intel_crtc_state *pipe_config) 6571 { 6572 struct drm_device *dev = crtc->base.dev; 6573 struct drm_i915_private *dev_priv = to_i915(dev); 6574 enum i915_pipe pipe = crtc->pipe; 6575 u32 mdiv; 6576 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6577 u32 coreclk, reg_val; 6578 6579 /* Enable Refclk */ 6580 I915_WRITE(DPLL(pipe), 6581 pipe_config->dpll_hw_state.dpll & 6582 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 6583 6584 /* No need to actually set up the DPLL with DSI */ 6585 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6586 return; 6587 6588 mutex_lock(&dev_priv->sb_lock); 6589 6590 bestn = pipe_config->dpll.n; 6591 bestm1 = pipe_config->dpll.m1; 6592 bestm2 = pipe_config->dpll.m2; 6593 bestp1 = pipe_config->dpll.p1; 6594 bestp2 = pipe_config->dpll.p2; 6595 6596 /* See eDP HDMI DPIO driver vbios notes doc */ 6597 6598 /* PLL B needs special handling */ 6599 if (pipe == PIPE_B) 6600 vlv_pllb_recal_opamp(dev_priv, pipe); 6601 6602 /* Set up Tx target for periodic Rcomp update */ 6603 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6604 6605 /* Disable target IRef on PLL */ 6606 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6607 reg_val &= 0x00ffffff; 6608 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6609 6610 /* Disable fast lock */ 6611 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6612 6613 /* Set idtafcrecal before PLL is enabled */ 6614 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6615 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6616 mdiv |= ((bestn << DPIO_N_SHIFT)); 6617 mdiv |= (1 << DPIO_K_SHIFT); 6618 6619 /* 6620 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6621 * but we don't support that). 6622 * Note: don't use the DAC post divider as it seems unstable. 6623 */ 6624 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6625 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6626 6627 mdiv |= DPIO_ENABLE_CALIBRATION; 6628 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6629 6630 /* Set HBR and RBR LPF coefficients */ 6631 if (pipe_config->port_clock == 162000 || 6632 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6633 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 6634 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6635 0x009f0003); 6636 else 6637 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6638 0x00d0000f); 6639 6640 if (intel_crtc_has_dp_encoder(pipe_config)) { 6641 /* Use SSC source */ 6642 if (pipe == PIPE_A) 6643 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6644 0x0df40000); 6645 else 6646 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6647 0x0df70000); 6648 } else { /* HDMI or VGA */ 6649 /* Use bend source */ 6650 if (pipe == PIPE_A) 6651 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6652 0x0df70000); 6653 else 6654 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6655 0x0df40000); 6656 } 6657 6658 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6659 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6660 if (intel_crtc_has_dp_encoder(crtc->config)) 6661 coreclk |= 0x01000000; 6662 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6663 6664 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6665 mutex_unlock(&dev_priv->sb_lock); 6666 } 6667 6668 static void chv_prepare_pll(struct intel_crtc *crtc, 6669 const struct intel_crtc_state *pipe_config) 6670 { 6671 struct drm_device *dev = crtc->base.dev; 6672 struct drm_i915_private *dev_priv = to_i915(dev); 6673 enum i915_pipe pipe = crtc->pipe; 6674 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6675 u32 loopfilter, tribuf_calcntr; 6676 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6677 u32 dpio_val; 6678 int vco; 6679 6680 /* Enable Refclk and SSC */ 6681 I915_WRITE(DPLL(pipe), 6682 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6683 6684 /* No need to actually set up the DPLL with DSI */ 6685 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6686 return; 6687 6688 bestn = pipe_config->dpll.n; 6689 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6690 bestm1 = pipe_config->dpll.m1; 6691 bestm2 = pipe_config->dpll.m2 >> 22; 6692 bestp1 = pipe_config->dpll.p1; 6693 bestp2 = pipe_config->dpll.p2; 6694 vco = pipe_config->dpll.vco; 6695 dpio_val = 0; 6696 loopfilter = 0; 6697 6698 mutex_lock(&dev_priv->sb_lock); 6699 6700 /* p1 and p2 divider */ 6701 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6702 5 << DPIO_CHV_S1_DIV_SHIFT | 6703 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6704 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6705 1 << DPIO_CHV_K_DIV_SHIFT); 6706 6707 /* Feedback post-divider - m2 */ 6708 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6709 6710 /* Feedback refclk divider - n and m1 */ 6711 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6712 DPIO_CHV_M1_DIV_BY_2 | 6713 1 << DPIO_CHV_N_DIV_SHIFT); 6714 6715 /* M2 fraction division */ 6716 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6717 6718 /* M2 fraction division enable */ 6719 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6720 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6721 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6722 if (bestm2_frac) 6723 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6724 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6725 6726 /* Program digital lock detect threshold */ 6727 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6728 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6729 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6730 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6731 if (!bestm2_frac) 6732 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6733 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6734 6735 /* Loop filter */ 6736 if (vco == 5400000) { 6737 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6738 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6739 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6740 tribuf_calcntr = 0x9; 6741 } else if (vco <= 6200000) { 6742 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6743 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6744 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6745 tribuf_calcntr = 0x9; 6746 } else if (vco <= 6480000) { 6747 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6748 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6749 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6750 tribuf_calcntr = 0x8; 6751 } else { 6752 /* Not supported. Apply the same limits as in the max case */ 6753 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6754 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6755 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6756 tribuf_calcntr = 0; 6757 } 6758 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6759 6760 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6761 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6762 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6763 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6764 6765 /* AFC Recal */ 6766 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6767 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6768 DPIO_AFC_RECAL); 6769 6770 mutex_unlock(&dev_priv->sb_lock); 6771 } 6772 6773 /** 6774 * vlv_force_pll_on - forcibly enable just the PLL 6775 * @dev_priv: i915 private structure 6776 * @pipe: pipe PLL to enable 6777 * @dpll: PLL configuration 6778 * 6779 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6780 * in cases where we need the PLL enabled even when @pipe is not going to 6781 * be enabled. 6782 */ 6783 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 6784 const struct dpll *dpll) 6785 { 6786 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 6787 struct intel_crtc_state *pipe_config; 6788 6789 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 6790 if (!pipe_config) 6791 return -ENOMEM; 6792 6793 pipe_config->base.crtc = &crtc->base; 6794 pipe_config->pixel_multiplier = 1; 6795 pipe_config->dpll = *dpll; 6796 6797 if (IS_CHERRYVIEW(dev_priv)) { 6798 chv_compute_dpll(crtc, pipe_config); 6799 chv_prepare_pll(crtc, pipe_config); 6800 chv_enable_pll(crtc, pipe_config); 6801 } else { 6802 vlv_compute_dpll(crtc, pipe_config); 6803 vlv_prepare_pll(crtc, pipe_config); 6804 vlv_enable_pll(crtc, pipe_config); 6805 } 6806 6807 kfree(pipe_config); 6808 6809 return 0; 6810 } 6811 6812 /** 6813 * vlv_force_pll_off - forcibly disable just the PLL 6814 * @dev_priv: i915 private structure 6815 * @pipe: pipe PLL to disable 6816 * 6817 * Disable the PLL for @pipe. To be used in cases where we need 6818 * the PLL enabled even when @pipe is not going to be enabled. 6819 */ 6820 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 6821 { 6822 if (IS_CHERRYVIEW(dev_priv)) 6823 chv_disable_pll(dev_priv, pipe); 6824 else 6825 vlv_disable_pll(dev_priv, pipe); 6826 } 6827 6828 static void i9xx_compute_dpll(struct intel_crtc *crtc, 6829 struct intel_crtc_state *crtc_state, 6830 struct dpll *reduced_clock) 6831 { 6832 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6833 u32 dpll; 6834 struct dpll *clock = &crtc_state->dpll; 6835 6836 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6837 6838 dpll = DPLL_VGA_MODE_DIS; 6839 6840 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 6841 dpll |= DPLLB_MODE_LVDS; 6842 else 6843 dpll |= DPLLB_MODE_DAC_SERIAL; 6844 6845 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 6846 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 6847 dpll |= (crtc_state->pixel_multiplier - 1) 6848 << SDVO_MULTIPLIER_SHIFT_HIRES; 6849 } 6850 6851 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 6852 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 6853 dpll |= DPLL_SDVO_HIGH_SPEED; 6854 6855 if (intel_crtc_has_dp_encoder(crtc_state)) 6856 dpll |= DPLL_SDVO_HIGH_SPEED; 6857 6858 /* compute bitmask from p1 value */ 6859 if (IS_PINEVIEW(dev_priv)) 6860 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6861 else { 6862 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6863 if (IS_G4X(dev_priv) && reduced_clock) 6864 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6865 } 6866 switch (clock->p2) { 6867 case 5: 6868 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6869 break; 6870 case 7: 6871 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6872 break; 6873 case 10: 6874 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6875 break; 6876 case 14: 6877 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6878 break; 6879 } 6880 if (INTEL_GEN(dev_priv) >= 4) 6881 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6882 6883 if (crtc_state->sdvo_tv_clock) 6884 dpll |= PLL_REF_INPUT_TVCLKINBC; 6885 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6886 intel_panel_use_ssc(dev_priv)) 6887 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6888 else 6889 dpll |= PLL_REF_INPUT_DREFCLK; 6890 6891 dpll |= DPLL_VCO_ENABLE; 6892 crtc_state->dpll_hw_state.dpll = dpll; 6893 6894 if (INTEL_GEN(dev_priv) >= 4) { 6895 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6896 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6897 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6898 } 6899 } 6900 6901 static void i8xx_compute_dpll(struct intel_crtc *crtc, 6902 struct intel_crtc_state *crtc_state, 6903 struct dpll *reduced_clock) 6904 { 6905 struct drm_device *dev = crtc->base.dev; 6906 struct drm_i915_private *dev_priv = to_i915(dev); 6907 u32 dpll; 6908 struct dpll *clock = &crtc_state->dpll; 6909 6910 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6911 6912 dpll = DPLL_VGA_MODE_DIS; 6913 6914 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 6915 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6916 } else { 6917 if (clock->p1 == 2) 6918 dpll |= PLL_P1_DIVIDE_BY_TWO; 6919 else 6920 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6921 if (clock->p2 == 4) 6922 dpll |= PLL_P2_DIVIDE_BY_4; 6923 } 6924 6925 if (!IS_I830(dev_priv) && 6926 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 6927 dpll |= DPLL_DVO_2X_MODE; 6928 6929 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6930 intel_panel_use_ssc(dev_priv)) 6931 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6932 else 6933 dpll |= PLL_REF_INPUT_DREFCLK; 6934 6935 dpll |= DPLL_VCO_ENABLE; 6936 crtc_state->dpll_hw_state.dpll = dpll; 6937 } 6938 6939 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6940 { 6941 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6942 enum i915_pipe pipe = intel_crtc->pipe; 6943 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6944 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 6945 uint32_t crtc_vtotal, crtc_vblank_end; 6946 int vsyncshift = 0; 6947 6948 /* We need to be careful not to changed the adjusted mode, for otherwise 6949 * the hw state checker will get angry at the mismatch. */ 6950 crtc_vtotal = adjusted_mode->crtc_vtotal; 6951 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6952 6953 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6954 /* the chip adds 2 halflines automatically */ 6955 crtc_vtotal -= 1; 6956 crtc_vblank_end -= 1; 6957 6958 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 6959 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6960 else 6961 vsyncshift = adjusted_mode->crtc_hsync_start - 6962 adjusted_mode->crtc_htotal / 2; 6963 if (vsyncshift < 0) 6964 vsyncshift += adjusted_mode->crtc_htotal; 6965 } 6966 6967 if (INTEL_GEN(dev_priv) > 3) 6968 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6969 6970 I915_WRITE(HTOTAL(cpu_transcoder), 6971 (adjusted_mode->crtc_hdisplay - 1) | 6972 ((adjusted_mode->crtc_htotal - 1) << 16)); 6973 I915_WRITE(HBLANK(cpu_transcoder), 6974 (adjusted_mode->crtc_hblank_start - 1) | 6975 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6976 I915_WRITE(HSYNC(cpu_transcoder), 6977 (adjusted_mode->crtc_hsync_start - 1) | 6978 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6979 6980 I915_WRITE(VTOTAL(cpu_transcoder), 6981 (adjusted_mode->crtc_vdisplay - 1) | 6982 ((crtc_vtotal - 1) << 16)); 6983 I915_WRITE(VBLANK(cpu_transcoder), 6984 (adjusted_mode->crtc_vblank_start - 1) | 6985 ((crtc_vblank_end - 1) << 16)); 6986 I915_WRITE(VSYNC(cpu_transcoder), 6987 (adjusted_mode->crtc_vsync_start - 1) | 6988 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6989 6990 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6991 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6992 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6993 * bits. */ 6994 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 6995 (pipe == PIPE_B || pipe == PIPE_C)) 6996 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6997 6998 } 6999 7000 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7001 { 7002 struct drm_device *dev = intel_crtc->base.dev; 7003 struct drm_i915_private *dev_priv = to_i915(dev); 7004 enum i915_pipe pipe = intel_crtc->pipe; 7005 7006 /* pipesrc controls the size that is scaled from, which should 7007 * always be the user's requested size. 7008 */ 7009 I915_WRITE(PIPESRC(pipe), 7010 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7011 (intel_crtc->config->pipe_src_h - 1)); 7012 } 7013 7014 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7015 struct intel_crtc_state *pipe_config) 7016 { 7017 struct drm_device *dev = crtc->base.dev; 7018 struct drm_i915_private *dev_priv = to_i915(dev); 7019 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7020 uint32_t tmp; 7021 7022 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7023 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7024 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7025 tmp = I915_READ(HBLANK(cpu_transcoder)); 7026 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7027 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7028 tmp = I915_READ(HSYNC(cpu_transcoder)); 7029 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7030 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7031 7032 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7033 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7034 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7035 tmp = I915_READ(VBLANK(cpu_transcoder)); 7036 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7037 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7038 tmp = I915_READ(VSYNC(cpu_transcoder)); 7039 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7040 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7041 7042 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7043 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7044 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7045 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7046 } 7047 } 7048 7049 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 7050 struct intel_crtc_state *pipe_config) 7051 { 7052 struct drm_device *dev = crtc->base.dev; 7053 struct drm_i915_private *dev_priv = to_i915(dev); 7054 u32 tmp; 7055 7056 tmp = I915_READ(PIPESRC(crtc->pipe)); 7057 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7058 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7059 7060 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7061 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7062 } 7063 7064 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7065 struct intel_crtc_state *pipe_config) 7066 { 7067 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7068 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7069 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7070 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7071 7072 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7073 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7074 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7075 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7076 7077 mode->flags = pipe_config->base.adjusted_mode.flags; 7078 mode->type = DRM_MODE_TYPE_DRIVER; 7079 7080 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7081 7082 mode->hsync = drm_mode_hsync(mode); 7083 mode->vrefresh = drm_mode_vrefresh(mode); 7084 drm_mode_set_name(mode); 7085 } 7086 7087 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7088 { 7089 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7090 uint32_t pipeconf; 7091 7092 pipeconf = 0; 7093 7094 /* we keep both pipes enabled on 830 */ 7095 if (IS_I830(dev_priv)) 7096 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7097 7098 if (intel_crtc->config->double_wide) 7099 pipeconf |= PIPECONF_DOUBLE_WIDE; 7100 7101 /* only g4x and later have fancy bpc/dither controls */ 7102 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7103 IS_CHERRYVIEW(dev_priv)) { 7104 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7105 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7106 pipeconf |= PIPECONF_DITHER_EN | 7107 PIPECONF_DITHER_TYPE_SP; 7108 7109 switch (intel_crtc->config->pipe_bpp) { 7110 case 18: 7111 pipeconf |= PIPECONF_6BPC; 7112 break; 7113 case 24: 7114 pipeconf |= PIPECONF_8BPC; 7115 break; 7116 case 30: 7117 pipeconf |= PIPECONF_10BPC; 7118 break; 7119 default: 7120 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7121 BUG(); 7122 } 7123 } 7124 7125 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7126 if (INTEL_GEN(dev_priv) < 4 || 7127 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7128 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7129 else 7130 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7131 } else 7132 pipeconf |= PIPECONF_PROGRESSIVE; 7133 7134 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7135 intel_crtc->config->limited_color_range) 7136 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7137 7138 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7139 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7140 } 7141 7142 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7143 struct intel_crtc_state *crtc_state) 7144 { 7145 struct drm_device *dev = crtc->base.dev; 7146 struct drm_i915_private *dev_priv = to_i915(dev); 7147 const struct intel_limit *limit; 7148 int refclk = 48000; 7149 7150 memset(&crtc_state->dpll_hw_state, 0, 7151 sizeof(crtc_state->dpll_hw_state)); 7152 7153 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7154 if (intel_panel_use_ssc(dev_priv)) { 7155 refclk = dev_priv->vbt.lvds_ssc_freq; 7156 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7157 } 7158 7159 limit = &intel_limits_i8xx_lvds; 7160 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7161 limit = &intel_limits_i8xx_dvo; 7162 } else { 7163 limit = &intel_limits_i8xx_dac; 7164 } 7165 7166 if (!crtc_state->clock_set && 7167 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7168 refclk, NULL, &crtc_state->dpll)) { 7169 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7170 return -EINVAL; 7171 } 7172 7173 i8xx_compute_dpll(crtc, crtc_state, NULL); 7174 7175 return 0; 7176 } 7177 7178 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7179 struct intel_crtc_state *crtc_state) 7180 { 7181 struct drm_device *dev = crtc->base.dev; 7182 struct drm_i915_private *dev_priv = to_i915(dev); 7183 const struct intel_limit *limit; 7184 int refclk = 96000; 7185 7186 memset(&crtc_state->dpll_hw_state, 0, 7187 sizeof(crtc_state->dpll_hw_state)); 7188 7189 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7190 if (intel_panel_use_ssc(dev_priv)) { 7191 refclk = dev_priv->vbt.lvds_ssc_freq; 7192 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7193 } 7194 7195 if (intel_is_dual_link_lvds(dev)) 7196 limit = &intel_limits_g4x_dual_channel_lvds; 7197 else 7198 limit = &intel_limits_g4x_single_channel_lvds; 7199 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 7200 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7201 limit = &intel_limits_g4x_hdmi; 7202 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7203 limit = &intel_limits_g4x_sdvo; 7204 } else { 7205 /* The option is for other outputs */ 7206 limit = &intel_limits_i9xx_sdvo; 7207 } 7208 7209 if (!crtc_state->clock_set && 7210 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7211 refclk, NULL, &crtc_state->dpll)) { 7212 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7213 return -EINVAL; 7214 } 7215 7216 i9xx_compute_dpll(crtc, crtc_state, NULL); 7217 7218 return 0; 7219 } 7220 7221 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 7222 struct intel_crtc_state *crtc_state) 7223 { 7224 struct drm_device *dev = crtc->base.dev; 7225 struct drm_i915_private *dev_priv = to_i915(dev); 7226 const struct intel_limit *limit; 7227 int refclk = 96000; 7228 7229 memset(&crtc_state->dpll_hw_state, 0, 7230 sizeof(crtc_state->dpll_hw_state)); 7231 7232 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7233 if (intel_panel_use_ssc(dev_priv)) { 7234 refclk = dev_priv->vbt.lvds_ssc_freq; 7235 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7236 } 7237 7238 limit = &intel_limits_pineview_lvds; 7239 } else { 7240 limit = &intel_limits_pineview_sdvo; 7241 } 7242 7243 if (!crtc_state->clock_set && 7244 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7245 refclk, NULL, &crtc_state->dpll)) { 7246 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7247 return -EINVAL; 7248 } 7249 7250 i9xx_compute_dpll(crtc, crtc_state, NULL); 7251 7252 return 0; 7253 } 7254 7255 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7256 struct intel_crtc_state *crtc_state) 7257 { 7258 struct drm_device *dev = crtc->base.dev; 7259 struct drm_i915_private *dev_priv = to_i915(dev); 7260 const struct intel_limit *limit; 7261 int refclk = 96000; 7262 7263 memset(&crtc_state->dpll_hw_state, 0, 7264 sizeof(crtc_state->dpll_hw_state)); 7265 7266 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7267 if (intel_panel_use_ssc(dev_priv)) { 7268 refclk = dev_priv->vbt.lvds_ssc_freq; 7269 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7270 } 7271 7272 limit = &intel_limits_i9xx_lvds; 7273 } else { 7274 limit = &intel_limits_i9xx_sdvo; 7275 } 7276 7277 if (!crtc_state->clock_set && 7278 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7279 refclk, NULL, &crtc_state->dpll)) { 7280 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7281 return -EINVAL; 7282 } 7283 7284 i9xx_compute_dpll(crtc, crtc_state, NULL); 7285 7286 return 0; 7287 } 7288 7289 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 7290 struct intel_crtc_state *crtc_state) 7291 { 7292 int refclk = 100000; 7293 const struct intel_limit *limit = &intel_limits_chv; 7294 7295 memset(&crtc_state->dpll_hw_state, 0, 7296 sizeof(crtc_state->dpll_hw_state)); 7297 7298 if (!crtc_state->clock_set && 7299 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7300 refclk, NULL, &crtc_state->dpll)) { 7301 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7302 return -EINVAL; 7303 } 7304 7305 chv_compute_dpll(crtc, crtc_state); 7306 7307 return 0; 7308 } 7309 7310 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 7311 struct intel_crtc_state *crtc_state) 7312 { 7313 int refclk = 100000; 7314 const struct intel_limit *limit = &intel_limits_vlv; 7315 7316 memset(&crtc_state->dpll_hw_state, 0, 7317 sizeof(crtc_state->dpll_hw_state)); 7318 7319 if (!crtc_state->clock_set && 7320 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7321 refclk, NULL, &crtc_state->dpll)) { 7322 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7323 return -EINVAL; 7324 } 7325 7326 vlv_compute_dpll(crtc, crtc_state); 7327 7328 return 0; 7329 } 7330 7331 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7332 struct intel_crtc_state *pipe_config) 7333 { 7334 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7335 uint32_t tmp; 7336 7337 if (INTEL_GEN(dev_priv) <= 3 && 7338 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) 7339 return; 7340 7341 tmp = I915_READ(PFIT_CONTROL); 7342 if (!(tmp & PFIT_ENABLE)) 7343 return; 7344 7345 /* Check whether the pfit is attached to our pipe. */ 7346 if (INTEL_GEN(dev_priv) < 4) { 7347 if (crtc->pipe != PIPE_B) 7348 return; 7349 } else { 7350 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7351 return; 7352 } 7353 7354 pipe_config->gmch_pfit.control = tmp; 7355 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7356 } 7357 7358 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7359 struct intel_crtc_state *pipe_config) 7360 { 7361 struct drm_device *dev = crtc->base.dev; 7362 struct drm_i915_private *dev_priv = to_i915(dev); 7363 int pipe = pipe_config->cpu_transcoder; 7364 struct dpll clock; 7365 u32 mdiv; 7366 int refclk = 100000; 7367 7368 /* In case of DSI, DPLL will not be used */ 7369 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7370 return; 7371 7372 mutex_lock(&dev_priv->sb_lock); 7373 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7374 mutex_unlock(&dev_priv->sb_lock); 7375 7376 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7377 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7378 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7379 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7380 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7381 7382 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7383 } 7384 7385 static void 7386 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7387 struct intel_initial_plane_config *plane_config) 7388 { 7389 struct drm_device *dev = crtc->base.dev; 7390 struct drm_i915_private *dev_priv = to_i915(dev); 7391 u32 val, base, offset; 7392 int pipe = crtc->pipe, plane = crtc->plane; 7393 int fourcc, pixel_format; 7394 unsigned int aligned_height; 7395 struct drm_framebuffer *fb; 7396 struct intel_framebuffer *intel_fb; 7397 7398 val = I915_READ(DSPCNTR(plane)); 7399 if (!(val & DISPLAY_PLANE_ENABLE)) 7400 return; 7401 7402 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7403 if (!intel_fb) { 7404 DRM_DEBUG_KMS("failed to alloc fb\n"); 7405 return; 7406 } 7407 7408 fb = &intel_fb->base; 7409 7410 fb->dev = dev; 7411 7412 if (INTEL_GEN(dev_priv) >= 4) { 7413 if (val & DISPPLANE_TILED) { 7414 plane_config->tiling = I915_TILING_X; 7415 fb->modifier = I915_FORMAT_MOD_X_TILED; 7416 } 7417 } 7418 7419 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7420 fourcc = i9xx_format_to_fourcc(pixel_format); 7421 fb->format = drm_format_info(fourcc); 7422 7423 if (INTEL_GEN(dev_priv) >= 4) { 7424 if (plane_config->tiling) 7425 offset = I915_READ(DSPTILEOFF(plane)); 7426 else 7427 offset = I915_READ(DSPLINOFF(plane)); 7428 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7429 } else { 7430 base = I915_READ(DSPADDR(plane)); 7431 } 7432 plane_config->base = base; 7433 7434 val = I915_READ(PIPESRC(pipe)); 7435 fb->width = ((val >> 16) & 0xfff) + 1; 7436 fb->height = ((val >> 0) & 0xfff) + 1; 7437 7438 val = I915_READ(DSPSTRIDE(pipe)); 7439 fb->pitches[0] = val & 0xffffffc0; 7440 7441 aligned_height = intel_fb_align_height(fb, 0, fb->height); 7442 7443 plane_config->size = fb->pitches[0] * aligned_height; 7444 7445 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7446 pipe_name(pipe), plane, fb->width, fb->height, 7447 fb->format->cpp[0] * 8, base, fb->pitches[0], 7448 plane_config->size); 7449 7450 plane_config->fb = intel_fb; 7451 } 7452 7453 static void chv_crtc_clock_get(struct intel_crtc *crtc, 7454 struct intel_crtc_state *pipe_config) 7455 { 7456 struct drm_device *dev = crtc->base.dev; 7457 struct drm_i915_private *dev_priv = to_i915(dev); 7458 int pipe = pipe_config->cpu_transcoder; 7459 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7460 struct dpll clock; 7461 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7462 int refclk = 100000; 7463 7464 /* In case of DSI, DPLL will not be used */ 7465 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7466 return; 7467 7468 mutex_lock(&dev_priv->sb_lock); 7469 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7470 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7471 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7472 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7473 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7474 mutex_unlock(&dev_priv->sb_lock); 7475 7476 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7477 clock.m2 = (pll_dw0 & 0xff) << 22; 7478 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7479 clock.m2 |= pll_dw2 & 0x3fffff; 7480 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7481 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7482 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7483 7484 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7485 } 7486 7487 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7488 struct intel_crtc_state *pipe_config) 7489 { 7490 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7491 enum intel_display_power_domain power_domain; 7492 uint32_t tmp; 7493 bool ret; 7494 7495 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 7496 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7497 return false; 7498 7499 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7500 pipe_config->shared_dpll = NULL; 7501 7502 ret = false; 7503 7504 tmp = I915_READ(PIPECONF(crtc->pipe)); 7505 if (!(tmp & PIPECONF_ENABLE)) 7506 goto out; 7507 7508 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7509 IS_CHERRYVIEW(dev_priv)) { 7510 switch (tmp & PIPECONF_BPC_MASK) { 7511 case PIPECONF_6BPC: 7512 pipe_config->pipe_bpp = 18; 7513 break; 7514 case PIPECONF_8BPC: 7515 pipe_config->pipe_bpp = 24; 7516 break; 7517 case PIPECONF_10BPC: 7518 pipe_config->pipe_bpp = 30; 7519 break; 7520 default: 7521 break; 7522 } 7523 } 7524 7525 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7526 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7527 pipe_config->limited_color_range = true; 7528 7529 if (INTEL_GEN(dev_priv) < 4) 7530 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7531 7532 intel_get_pipe_timings(crtc, pipe_config); 7533 intel_get_pipe_src_size(crtc, pipe_config); 7534 7535 i9xx_get_pfit_config(crtc, pipe_config); 7536 7537 if (INTEL_GEN(dev_priv) >= 4) { 7538 /* No way to read it out on pipes B and C */ 7539 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 7540 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 7541 else 7542 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7543 pipe_config->pixel_multiplier = 7544 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7545 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7546 pipe_config->dpll_hw_state.dpll_md = tmp; 7547 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 7548 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 7549 tmp = I915_READ(DPLL(crtc->pipe)); 7550 pipe_config->pixel_multiplier = 7551 ((tmp & SDVO_MULTIPLIER_MASK) 7552 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7553 } else { 7554 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7555 * port and will be fixed up in the encoder->get_config 7556 * function. */ 7557 pipe_config->pixel_multiplier = 1; 7558 } 7559 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7560 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 7561 /* 7562 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7563 * on 830. Filter it out here so that we don't 7564 * report errors due to that. 7565 */ 7566 if (IS_I830(dev_priv)) 7567 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7568 7569 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7570 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7571 } else { 7572 /* Mask out read-only status bits. */ 7573 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7574 DPLL_PORTC_READY_MASK | 7575 DPLL_PORTB_READY_MASK); 7576 } 7577 7578 if (IS_CHERRYVIEW(dev_priv)) 7579 chv_crtc_clock_get(crtc, pipe_config); 7580 else if (IS_VALLEYVIEW(dev_priv)) 7581 vlv_crtc_clock_get(crtc, pipe_config); 7582 else 7583 i9xx_crtc_clock_get(crtc, pipe_config); 7584 7585 /* 7586 * Normally the dotclock is filled in by the encoder .get_config() 7587 * but in case the pipe is enabled w/o any ports we need a sane 7588 * default. 7589 */ 7590 pipe_config->base.adjusted_mode.crtc_clock = 7591 pipe_config->port_clock / pipe_config->pixel_multiplier; 7592 7593 ret = true; 7594 7595 out: 7596 intel_display_power_put(dev_priv, power_domain); 7597 7598 return ret; 7599 } 7600 7601 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 7602 { 7603 struct intel_encoder *encoder; 7604 int i; 7605 u32 val, final; 7606 bool has_lvds = false; 7607 bool has_cpu_edp = false; 7608 bool has_panel = false; 7609 bool has_ck505 = false; 7610 bool can_ssc = false; 7611 bool using_ssc_source = false; 7612 7613 /* We need to take the global config into account */ 7614 for_each_intel_encoder(&dev_priv->drm, encoder) { 7615 switch (encoder->type) { 7616 case INTEL_OUTPUT_LVDS: 7617 has_panel = true; 7618 has_lvds = true; 7619 break; 7620 case INTEL_OUTPUT_EDP: 7621 has_panel = true; 7622 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 7623 has_cpu_edp = true; 7624 break; 7625 default: 7626 break; 7627 } 7628 } 7629 7630 if (HAS_PCH_IBX(dev_priv)) { 7631 has_ck505 = dev_priv->vbt.display_clock_mode; 7632 can_ssc = has_ck505; 7633 } else { 7634 has_ck505 = false; 7635 can_ssc = true; 7636 } 7637 7638 /* Check if any DPLLs are using the SSC source */ 7639 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 7640 u32 temp = I915_READ(PCH_DPLL(i)); 7641 7642 if (!(temp & DPLL_VCO_ENABLE)) 7643 continue; 7644 7645 if ((temp & PLL_REF_INPUT_MASK) == 7646 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 7647 using_ssc_source = true; 7648 break; 7649 } 7650 } 7651 7652 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 7653 has_panel, has_lvds, has_ck505, using_ssc_source); 7654 7655 /* Ironlake: try to setup display ref clock before DPLL 7656 * enabling. This is only under driver's control after 7657 * PCH B stepping, previous chipset stepping should be 7658 * ignoring this setting. 7659 */ 7660 val = I915_READ(PCH_DREF_CONTROL); 7661 7662 /* As we must carefully and slowly disable/enable each source in turn, 7663 * compute the final state we want first and check if we need to 7664 * make any changes at all. 7665 */ 7666 final = val; 7667 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7668 if (has_ck505) 7669 final |= DREF_NONSPREAD_CK505_ENABLE; 7670 else 7671 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7672 7673 final &= ~DREF_SSC_SOURCE_MASK; 7674 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7675 final &= ~DREF_SSC1_ENABLE; 7676 7677 if (has_panel) { 7678 final |= DREF_SSC_SOURCE_ENABLE; 7679 7680 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7681 final |= DREF_SSC1_ENABLE; 7682 7683 if (has_cpu_edp) { 7684 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7685 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7686 else 7687 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7688 } else 7689 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7690 } else if (using_ssc_source) { 7691 final |= DREF_SSC_SOURCE_ENABLE; 7692 final |= DREF_SSC1_ENABLE; 7693 } 7694 7695 if (final == val) 7696 return; 7697 7698 /* Always enable nonspread source */ 7699 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7700 7701 if (has_ck505) 7702 val |= DREF_NONSPREAD_CK505_ENABLE; 7703 else 7704 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7705 7706 if (has_panel) { 7707 val &= ~DREF_SSC_SOURCE_MASK; 7708 val |= DREF_SSC_SOURCE_ENABLE; 7709 7710 /* SSC must be turned on before enabling the CPU output */ 7711 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7712 DRM_DEBUG_KMS("Using SSC on panel\n"); 7713 val |= DREF_SSC1_ENABLE; 7714 } else 7715 val &= ~DREF_SSC1_ENABLE; 7716 7717 /* Get SSC going before enabling the outputs */ 7718 I915_WRITE(PCH_DREF_CONTROL, val); 7719 POSTING_READ(PCH_DREF_CONTROL); 7720 udelay(200); 7721 7722 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7723 7724 /* Enable CPU source on CPU attached eDP */ 7725 if (has_cpu_edp) { 7726 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7727 DRM_DEBUG_KMS("Using SSC on eDP\n"); 7728 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7729 } else 7730 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7731 } else 7732 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7733 7734 I915_WRITE(PCH_DREF_CONTROL, val); 7735 POSTING_READ(PCH_DREF_CONTROL); 7736 udelay(200); 7737 } else { 7738 DRM_DEBUG_KMS("Disabling CPU source output\n"); 7739 7740 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7741 7742 /* Turn off CPU output */ 7743 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7744 7745 I915_WRITE(PCH_DREF_CONTROL, val); 7746 POSTING_READ(PCH_DREF_CONTROL); 7747 udelay(200); 7748 7749 if (!using_ssc_source) { 7750 DRM_DEBUG_KMS("Disabling SSC source\n"); 7751 7752 /* Turn off the SSC source */ 7753 val &= ~DREF_SSC_SOURCE_MASK; 7754 val |= DREF_SSC_SOURCE_DISABLE; 7755 7756 /* Turn off SSC1 */ 7757 val &= ~DREF_SSC1_ENABLE; 7758 7759 I915_WRITE(PCH_DREF_CONTROL, val); 7760 POSTING_READ(PCH_DREF_CONTROL); 7761 udelay(200); 7762 } 7763 } 7764 7765 BUG_ON(val != final); 7766 } 7767 7768 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7769 { 7770 uint32_t tmp; 7771 7772 tmp = I915_READ(SOUTH_CHICKEN2); 7773 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7774 I915_WRITE(SOUTH_CHICKEN2, tmp); 7775 7776 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 7777 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7778 DRM_ERROR("FDI mPHY reset assert timeout\n"); 7779 7780 tmp = I915_READ(SOUTH_CHICKEN2); 7781 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7782 I915_WRITE(SOUTH_CHICKEN2, tmp); 7783 7784 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 7785 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7786 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 7787 } 7788 7789 /* WaMPhyProgramming:hsw */ 7790 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7791 { 7792 uint32_t tmp; 7793 7794 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7795 tmp &= ~(0xFF << 24); 7796 tmp |= (0x12 << 24); 7797 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7798 7799 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7800 tmp |= (1 << 11); 7801 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7802 7803 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7804 tmp |= (1 << 11); 7805 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7806 7807 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7808 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7809 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7810 7811 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7812 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7813 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7814 7815 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7816 tmp &= ~(7 << 13); 7817 tmp |= (5 << 13); 7818 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7819 7820 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7821 tmp &= ~(7 << 13); 7822 tmp |= (5 << 13); 7823 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7824 7825 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7826 tmp &= ~0xFF; 7827 tmp |= 0x1C; 7828 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7829 7830 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7831 tmp &= ~0xFF; 7832 tmp |= 0x1C; 7833 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7834 7835 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7836 tmp &= ~(0xFF << 16); 7837 tmp |= (0x1C << 16); 7838 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7839 7840 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7841 tmp &= ~(0xFF << 16); 7842 tmp |= (0x1C << 16); 7843 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7844 7845 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7846 tmp |= (1 << 27); 7847 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7848 7849 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7850 tmp |= (1 << 27); 7851 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7852 7853 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7854 tmp &= ~(0xF << 28); 7855 tmp |= (4 << 28); 7856 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7857 7858 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7859 tmp &= ~(0xF << 28); 7860 tmp |= (4 << 28); 7861 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7862 } 7863 7864 /* Implements 3 different sequences from BSpec chapter "Display iCLK 7865 * Programming" based on the parameters passed: 7866 * - Sequence to enable CLKOUT_DP 7867 * - Sequence to enable CLKOUT_DP without spread 7868 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7869 */ 7870 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 7871 bool with_spread, bool with_fdi) 7872 { 7873 uint32_t reg, tmp; 7874 7875 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7876 with_spread = true; 7877 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 7878 with_fdi, "LP PCH doesn't have FDI\n")) 7879 with_fdi = false; 7880 7881 mutex_lock(&dev_priv->sb_lock); 7882 7883 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7884 tmp &= ~SBI_SSCCTL_DISABLE; 7885 tmp |= SBI_SSCCTL_PATHALT; 7886 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7887 7888 udelay(24); 7889 7890 if (with_spread) { 7891 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7892 tmp &= ~SBI_SSCCTL_PATHALT; 7893 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7894 7895 if (with_fdi) { 7896 lpt_reset_fdi_mphy(dev_priv); 7897 lpt_program_fdi_mphy(dev_priv); 7898 } 7899 } 7900 7901 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7902 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7903 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7904 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7905 7906 mutex_unlock(&dev_priv->sb_lock); 7907 } 7908 7909 /* Sequence to disable CLKOUT_DP */ 7910 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 7911 { 7912 uint32_t reg, tmp; 7913 7914 mutex_lock(&dev_priv->sb_lock); 7915 7916 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7917 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7918 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7919 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7920 7921 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7922 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7923 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7924 tmp |= SBI_SSCCTL_PATHALT; 7925 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7926 udelay(32); 7927 } 7928 tmp |= SBI_SSCCTL_DISABLE; 7929 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7930 } 7931 7932 mutex_unlock(&dev_priv->sb_lock); 7933 } 7934 7935 #define BEND_IDX(steps) ((50 + (steps)) / 5) 7936 7937 static const uint16_t sscdivintphase[] = { 7938 [BEND_IDX( 50)] = 0x3B23, 7939 [BEND_IDX( 45)] = 0x3B23, 7940 [BEND_IDX( 40)] = 0x3C23, 7941 [BEND_IDX( 35)] = 0x3C23, 7942 [BEND_IDX( 30)] = 0x3D23, 7943 [BEND_IDX( 25)] = 0x3D23, 7944 [BEND_IDX( 20)] = 0x3E23, 7945 [BEND_IDX( 15)] = 0x3E23, 7946 [BEND_IDX( 10)] = 0x3F23, 7947 [BEND_IDX( 5)] = 0x3F23, 7948 [BEND_IDX( 0)] = 0x0025, 7949 [BEND_IDX( -5)] = 0x0025, 7950 [BEND_IDX(-10)] = 0x0125, 7951 [BEND_IDX(-15)] = 0x0125, 7952 [BEND_IDX(-20)] = 0x0225, 7953 [BEND_IDX(-25)] = 0x0225, 7954 [BEND_IDX(-30)] = 0x0325, 7955 [BEND_IDX(-35)] = 0x0325, 7956 [BEND_IDX(-40)] = 0x0425, 7957 [BEND_IDX(-45)] = 0x0425, 7958 [BEND_IDX(-50)] = 0x0525, 7959 }; 7960 7961 /* 7962 * Bend CLKOUT_DP 7963 * steps -50 to 50 inclusive, in steps of 5 7964 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 7965 * change in clock period = -(steps / 10) * 5.787 ps 7966 */ 7967 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 7968 { 7969 uint32_t tmp; 7970 int idx = BEND_IDX(steps); 7971 7972 if (WARN_ON(steps % 5 != 0)) 7973 return; 7974 7975 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 7976 return; 7977 7978 mutex_lock(&dev_priv->sb_lock); 7979 7980 if (steps % 10 != 0) 7981 tmp = 0xAAAAAAAB; 7982 else 7983 tmp = 0x00000000; 7984 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 7985 7986 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 7987 tmp &= 0xffff0000; 7988 tmp |= sscdivintphase[idx]; 7989 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 7990 7991 mutex_unlock(&dev_priv->sb_lock); 7992 } 7993 7994 #undef BEND_IDX 7995 7996 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 7997 { 7998 struct intel_encoder *encoder; 7999 bool has_vga = false; 8000 8001 for_each_intel_encoder(&dev_priv->drm, encoder) { 8002 switch (encoder->type) { 8003 case INTEL_OUTPUT_ANALOG: 8004 has_vga = true; 8005 break; 8006 default: 8007 break; 8008 } 8009 } 8010 8011 if (has_vga) { 8012 lpt_bend_clkout_dp(dev_priv, 0); 8013 lpt_enable_clkout_dp(dev_priv, true, true); 8014 } else { 8015 lpt_disable_clkout_dp(dev_priv); 8016 } 8017 } 8018 8019 /* 8020 * Initialize reference clocks when the driver loads 8021 */ 8022 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 8023 { 8024 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 8025 ironlake_init_pch_refclk(dev_priv); 8026 else if (HAS_PCH_LPT(dev_priv)) 8027 lpt_init_pch_refclk(dev_priv); 8028 } 8029 8030 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8031 { 8032 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8034 int pipe = intel_crtc->pipe; 8035 uint32_t val; 8036 8037 val = 0; 8038 8039 switch (intel_crtc->config->pipe_bpp) { 8040 case 18: 8041 val |= PIPECONF_6BPC; 8042 break; 8043 case 24: 8044 val |= PIPECONF_8BPC; 8045 break; 8046 case 30: 8047 val |= PIPECONF_10BPC; 8048 break; 8049 case 36: 8050 val |= PIPECONF_12BPC; 8051 break; 8052 default: 8053 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8054 BUG(); 8055 } 8056 8057 if (intel_crtc->config->dither) 8058 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8059 8060 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8061 val |= PIPECONF_INTERLACED_ILK; 8062 else 8063 val |= PIPECONF_PROGRESSIVE; 8064 8065 if (intel_crtc->config->limited_color_range) 8066 val |= PIPECONF_COLOR_RANGE_SELECT; 8067 8068 I915_WRITE(PIPECONF(pipe), val); 8069 POSTING_READ(PIPECONF(pipe)); 8070 } 8071 8072 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8073 { 8074 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8075 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8076 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8077 u32 val = 0; 8078 8079 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8080 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8081 8082 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8083 val |= PIPECONF_INTERLACED_ILK; 8084 else 8085 val |= PIPECONF_PROGRESSIVE; 8086 8087 I915_WRITE(PIPECONF(cpu_transcoder), val); 8088 POSTING_READ(PIPECONF(cpu_transcoder)); 8089 } 8090 8091 static void haswell_set_pipemisc(struct drm_crtc *crtc) 8092 { 8093 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8095 struct intel_crtc_state *config = intel_crtc->config; 8096 8097 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8098 u32 val = 0; 8099 8100 switch (intel_crtc->config->pipe_bpp) { 8101 case 18: 8102 val |= PIPEMISC_DITHER_6_BPC; 8103 break; 8104 case 24: 8105 val |= PIPEMISC_DITHER_8_BPC; 8106 break; 8107 case 30: 8108 val |= PIPEMISC_DITHER_10_BPC; 8109 break; 8110 case 36: 8111 val |= PIPEMISC_DITHER_12_BPC; 8112 break; 8113 default: 8114 /* Case prevented by pipe_config_set_bpp. */ 8115 BUG(); 8116 } 8117 8118 if (intel_crtc->config->dither) 8119 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8120 8121 if (config->ycbcr420) { 8122 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV | 8123 PIPEMISC_YUV420_ENABLE | 8124 PIPEMISC_YUV420_MODE_FULL_BLEND; 8125 } 8126 8127 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8128 } 8129 } 8130 8131 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8132 { 8133 /* 8134 * Account for spread spectrum to avoid 8135 * oversubscribing the link. Max center spread 8136 * is 2.5%; use 5% for safety's sake. 8137 */ 8138 u32 bps = target_clock * bpp * 21 / 20; 8139 return DIV_ROUND_UP(bps, link_bw * 8); 8140 } 8141 8142 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8143 { 8144 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8145 } 8146 8147 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8148 struct intel_crtc_state *crtc_state, 8149 struct dpll *reduced_clock) 8150 { 8151 struct drm_crtc *crtc = &intel_crtc->base; 8152 struct drm_device *dev = crtc->dev; 8153 struct drm_i915_private *dev_priv = to_i915(dev); 8154 u32 dpll, fp, fp2; 8155 int factor; 8156 8157 /* Enable autotuning of the PLL clock (if permissible) */ 8158 factor = 21; 8159 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8160 if ((intel_panel_use_ssc(dev_priv) && 8161 dev_priv->vbt.lvds_ssc_freq == 100000) || 8162 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) 8163 factor = 25; 8164 } else if (crtc_state->sdvo_tv_clock) 8165 factor = 20; 8166 8167 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8168 8169 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8170 fp |= FP_CB_TUNE; 8171 8172 if (reduced_clock) { 8173 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8174 8175 if (reduced_clock->m < factor * reduced_clock->n) 8176 fp2 |= FP_CB_TUNE; 8177 } else { 8178 fp2 = fp; 8179 } 8180 8181 dpll = 0; 8182 8183 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8184 dpll |= DPLLB_MODE_LVDS; 8185 else 8186 dpll |= DPLLB_MODE_DAC_SERIAL; 8187 8188 dpll |= (crtc_state->pixel_multiplier - 1) 8189 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8190 8191 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8192 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8193 dpll |= DPLL_SDVO_HIGH_SPEED; 8194 8195 if (intel_crtc_has_dp_encoder(crtc_state)) 8196 dpll |= DPLL_SDVO_HIGH_SPEED; 8197 8198 /* 8199 * The high speed IO clock is only really required for 8200 * SDVO/HDMI/DP, but we also enable it for CRT to make it 8201 * possible to share the DPLL between CRT and HDMI. Enabling 8202 * the clock needlessly does no real harm, except use up a 8203 * bit of power potentially. 8204 * 8205 * We'll limit this to IVB with 3 pipes, since it has only two 8206 * DPLLs and so DPLL sharing is the only way to get three pipes 8207 * driving PCH ports at the same time. On SNB we could do this, 8208 * and potentially avoid enabling the second DPLL, but it's not 8209 * clear if it''s a win or loss power wise. No point in doing 8210 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 8211 */ 8212 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 8213 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 8214 dpll |= DPLL_SDVO_HIGH_SPEED; 8215 8216 /* compute bitmask from p1 value */ 8217 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8218 /* also FPA1 */ 8219 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8220 8221 switch (crtc_state->dpll.p2) { 8222 case 5: 8223 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8224 break; 8225 case 7: 8226 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8227 break; 8228 case 10: 8229 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8230 break; 8231 case 14: 8232 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8233 break; 8234 } 8235 8236 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8237 intel_panel_use_ssc(dev_priv)) 8238 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8239 else 8240 dpll |= PLL_REF_INPUT_DREFCLK; 8241 8242 dpll |= DPLL_VCO_ENABLE; 8243 8244 crtc_state->dpll_hw_state.dpll = dpll; 8245 crtc_state->dpll_hw_state.fp0 = fp; 8246 crtc_state->dpll_hw_state.fp1 = fp2; 8247 } 8248 8249 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8250 struct intel_crtc_state *crtc_state) 8251 { 8252 struct drm_device *dev = crtc->base.dev; 8253 struct drm_i915_private *dev_priv = to_i915(dev); 8254 const struct intel_limit *limit; 8255 int refclk = 120000; 8256 8257 memset(&crtc_state->dpll_hw_state, 0, 8258 sizeof(crtc_state->dpll_hw_state)); 8259 8260 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8261 if (!crtc_state->has_pch_encoder) 8262 return 0; 8263 8264 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8265 if (intel_panel_use_ssc(dev_priv)) { 8266 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8267 dev_priv->vbt.lvds_ssc_freq); 8268 refclk = dev_priv->vbt.lvds_ssc_freq; 8269 } 8270 8271 if (intel_is_dual_link_lvds(dev)) { 8272 if (refclk == 100000) 8273 limit = &intel_limits_ironlake_dual_lvds_100m; 8274 else 8275 limit = &intel_limits_ironlake_dual_lvds; 8276 } else { 8277 if (refclk == 100000) 8278 limit = &intel_limits_ironlake_single_lvds_100m; 8279 else 8280 limit = &intel_limits_ironlake_single_lvds; 8281 } 8282 } else { 8283 limit = &intel_limits_ironlake_dac; 8284 } 8285 8286 if (!crtc_state->clock_set && 8287 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8288 refclk, NULL, &crtc_state->dpll)) { 8289 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8290 return -EINVAL; 8291 } 8292 8293 ironlake_compute_dpll(crtc, crtc_state, NULL); 8294 8295 if (!intel_get_shared_dpll(crtc, crtc_state, NULL)) { 8296 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8297 pipe_name(crtc->pipe)); 8298 return -EINVAL; 8299 } 8300 8301 return 0; 8302 } 8303 8304 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8305 struct intel_link_m_n *m_n) 8306 { 8307 struct drm_device *dev = crtc->base.dev; 8308 struct drm_i915_private *dev_priv = to_i915(dev); 8309 enum i915_pipe pipe = crtc->pipe; 8310 8311 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8312 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8313 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8314 & ~TU_SIZE_MASK; 8315 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8316 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8317 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8318 } 8319 8320 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8321 enum transcoder transcoder, 8322 struct intel_link_m_n *m_n, 8323 struct intel_link_m_n *m2_n2) 8324 { 8325 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8326 enum i915_pipe pipe = crtc->pipe; 8327 8328 if (INTEL_GEN(dev_priv) >= 5) { 8329 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8330 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8331 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8332 & ~TU_SIZE_MASK; 8333 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8334 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8335 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8336 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8337 * gen < 8) and if DRRS is supported (to make sure the 8338 * registers are not unnecessarily read). 8339 */ 8340 if (m2_n2 && INTEL_GEN(dev_priv) < 8 && 8341 crtc->config->has_drrs) { 8342 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8343 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8344 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8345 & ~TU_SIZE_MASK; 8346 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8347 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8348 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8349 } 8350 } else { 8351 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8352 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8353 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8354 & ~TU_SIZE_MASK; 8355 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8356 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8357 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8358 } 8359 } 8360 8361 void intel_dp_get_m_n(struct intel_crtc *crtc, 8362 struct intel_crtc_state *pipe_config) 8363 { 8364 if (pipe_config->has_pch_encoder) 8365 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8366 else 8367 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8368 &pipe_config->dp_m_n, 8369 &pipe_config->dp_m2_n2); 8370 } 8371 8372 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8373 struct intel_crtc_state *pipe_config) 8374 { 8375 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8376 &pipe_config->fdi_m_n, NULL); 8377 } 8378 8379 static void skylake_get_pfit_config(struct intel_crtc *crtc, 8380 struct intel_crtc_state *pipe_config) 8381 { 8382 struct drm_device *dev = crtc->base.dev; 8383 struct drm_i915_private *dev_priv = to_i915(dev); 8384 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8385 uint32_t ps_ctrl = 0; 8386 int id = -1; 8387 int i; 8388 8389 /* find scaler attached to this pipe */ 8390 for (i = 0; i < crtc->num_scalers; i++) { 8391 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8392 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8393 id = i; 8394 pipe_config->pch_pfit.enabled = true; 8395 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8396 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8397 break; 8398 } 8399 } 8400 8401 scaler_state->scaler_id = id; 8402 if (id >= 0) { 8403 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8404 } else { 8405 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8406 } 8407 } 8408 8409 static void 8410 skylake_get_initial_plane_config(struct intel_crtc *crtc, 8411 struct intel_initial_plane_config *plane_config) 8412 { 8413 struct drm_device *dev = crtc->base.dev; 8414 struct drm_i915_private *dev_priv = to_i915(dev); 8415 u32 val, base, offset, stride_mult, tiling; 8416 int pipe = crtc->pipe; 8417 int fourcc, pixel_format; 8418 unsigned int aligned_height; 8419 struct drm_framebuffer *fb; 8420 struct intel_framebuffer *intel_fb; 8421 8422 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8423 if (!intel_fb) { 8424 DRM_DEBUG_KMS("failed to alloc fb\n"); 8425 return; 8426 } 8427 8428 fb = &intel_fb->base; 8429 8430 fb->dev = dev; 8431 8432 val = I915_READ(PLANE_CTL(pipe, 0)); 8433 if (!(val & PLANE_CTL_ENABLE)) 8434 goto error; 8435 8436 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8437 fourcc = skl_format_to_fourcc(pixel_format, 8438 val & PLANE_CTL_ORDER_RGBX, 8439 val & PLANE_CTL_ALPHA_MASK); 8440 fb->format = drm_format_info(fourcc); 8441 8442 tiling = val & PLANE_CTL_TILED_MASK; 8443 switch (tiling) { 8444 case PLANE_CTL_TILED_LINEAR: 8445 fb->modifier = DRM_FORMAT_MOD_LINEAR; 8446 break; 8447 case PLANE_CTL_TILED_X: 8448 plane_config->tiling = I915_TILING_X; 8449 fb->modifier = I915_FORMAT_MOD_X_TILED; 8450 break; 8451 case PLANE_CTL_TILED_Y: 8452 if (val & PLANE_CTL_DECOMPRESSION_ENABLE) 8453 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS; 8454 else 8455 fb->modifier = I915_FORMAT_MOD_Y_TILED; 8456 break; 8457 case PLANE_CTL_TILED_YF: 8458 if (val & PLANE_CTL_DECOMPRESSION_ENABLE) 8459 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS; 8460 else 8461 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 8462 break; 8463 default: 8464 MISSING_CASE(tiling); 8465 goto error; 8466 } 8467 8468 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8469 plane_config->base = base; 8470 8471 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8472 8473 val = I915_READ(PLANE_SIZE(pipe, 0)); 8474 fb->height = ((val >> 16) & 0xfff) + 1; 8475 fb->width = ((val >> 0) & 0x1fff) + 1; 8476 8477 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8478 stride_mult = intel_fb_stride_alignment(fb, 0); 8479 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8480 8481 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8482 8483 plane_config->size = fb->pitches[0] * aligned_height; 8484 8485 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8486 pipe_name(pipe), fb->width, fb->height, 8487 fb->format->cpp[0] * 8, base, fb->pitches[0], 8488 plane_config->size); 8489 8490 plane_config->fb = intel_fb; 8491 return; 8492 8493 error: 8494 kfree(intel_fb); 8495 } 8496 8497 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8498 struct intel_crtc_state *pipe_config) 8499 { 8500 struct drm_device *dev = crtc->base.dev; 8501 struct drm_i915_private *dev_priv = to_i915(dev); 8502 uint32_t tmp; 8503 8504 tmp = I915_READ(PF_CTL(crtc->pipe)); 8505 8506 if (tmp & PF_ENABLE) { 8507 pipe_config->pch_pfit.enabled = true; 8508 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8509 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8510 8511 /* We currently do not free assignements of panel fitters on 8512 * ivb/hsw (since we don't use the higher upscaling modes which 8513 * differentiates them) so just WARN about this case for now. */ 8514 if (IS_GEN7(dev_priv)) { 8515 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8516 PF_PIPE_SEL_IVB(crtc->pipe)); 8517 } 8518 } 8519 } 8520 8521 static void 8522 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8523 struct intel_initial_plane_config *plane_config) 8524 { 8525 struct drm_device *dev = crtc->base.dev; 8526 struct drm_i915_private *dev_priv = to_i915(dev); 8527 u32 val, base, offset; 8528 int pipe = crtc->pipe; 8529 int fourcc, pixel_format; 8530 unsigned int aligned_height; 8531 struct drm_framebuffer *fb; 8532 struct intel_framebuffer *intel_fb; 8533 8534 val = I915_READ(DSPCNTR(pipe)); 8535 if (!(val & DISPLAY_PLANE_ENABLE)) 8536 return; 8537 8538 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8539 if (!intel_fb) { 8540 DRM_DEBUG_KMS("failed to alloc fb\n"); 8541 return; 8542 } 8543 8544 fb = &intel_fb->base; 8545 8546 fb->dev = dev; 8547 8548 if (INTEL_GEN(dev_priv) >= 4) { 8549 if (val & DISPPLANE_TILED) { 8550 plane_config->tiling = I915_TILING_X; 8551 fb->modifier = I915_FORMAT_MOD_X_TILED; 8552 } 8553 } 8554 8555 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8556 fourcc = i9xx_format_to_fourcc(pixel_format); 8557 fb->format = drm_format_info(fourcc); 8558 8559 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 8560 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8561 offset = I915_READ(DSPOFFSET(pipe)); 8562 } else { 8563 if (plane_config->tiling) 8564 offset = I915_READ(DSPTILEOFF(pipe)); 8565 else 8566 offset = I915_READ(DSPLINOFF(pipe)); 8567 } 8568 plane_config->base = base; 8569 8570 val = I915_READ(PIPESRC(pipe)); 8571 fb->width = ((val >> 16) & 0xfff) + 1; 8572 fb->height = ((val >> 0) & 0xfff) + 1; 8573 8574 val = I915_READ(DSPSTRIDE(pipe)); 8575 fb->pitches[0] = val & 0xffffffc0; 8576 8577 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8578 8579 plane_config->size = fb->pitches[0] * aligned_height; 8580 8581 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8582 pipe_name(pipe), fb->width, fb->height, 8583 fb->format->cpp[0] * 8, base, fb->pitches[0], 8584 plane_config->size); 8585 8586 plane_config->fb = intel_fb; 8587 } 8588 8589 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 8590 struct intel_crtc_state *pipe_config) 8591 { 8592 struct drm_device *dev = crtc->base.dev; 8593 struct drm_i915_private *dev_priv = to_i915(dev); 8594 enum intel_display_power_domain power_domain; 8595 uint32_t tmp; 8596 bool ret; 8597 8598 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8599 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8600 return false; 8601 8602 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8603 pipe_config->shared_dpll = NULL; 8604 8605 ret = false; 8606 tmp = I915_READ(PIPECONF(crtc->pipe)); 8607 if (!(tmp & PIPECONF_ENABLE)) 8608 goto out; 8609 8610 switch (tmp & PIPECONF_BPC_MASK) { 8611 case PIPECONF_6BPC: 8612 pipe_config->pipe_bpp = 18; 8613 break; 8614 case PIPECONF_8BPC: 8615 pipe_config->pipe_bpp = 24; 8616 break; 8617 case PIPECONF_10BPC: 8618 pipe_config->pipe_bpp = 30; 8619 break; 8620 case PIPECONF_12BPC: 8621 pipe_config->pipe_bpp = 36; 8622 break; 8623 default: 8624 break; 8625 } 8626 8627 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8628 pipe_config->limited_color_range = true; 8629 8630 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8631 struct intel_shared_dpll *pll; 8632 enum intel_dpll_id pll_id; 8633 8634 pipe_config->has_pch_encoder = true; 8635 8636 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 8637 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8638 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8639 8640 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8641 8642 if (HAS_PCH_IBX(dev_priv)) { 8643 /* 8644 * The pipe->pch transcoder and pch transcoder->pll 8645 * mapping is fixed. 8646 */ 8647 pll_id = (enum intel_dpll_id) crtc->pipe; 8648 } else { 8649 tmp = I915_READ(PCH_DPLL_SEL); 8650 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8651 pll_id = DPLL_ID_PCH_PLL_B; 8652 else 8653 pll_id= DPLL_ID_PCH_PLL_A; 8654 } 8655 8656 pipe_config->shared_dpll = 8657 intel_get_shared_dpll_by_id(dev_priv, pll_id); 8658 pll = pipe_config->shared_dpll; 8659 8660 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 8661 &pipe_config->dpll_hw_state)); 8662 8663 tmp = pipe_config->dpll_hw_state.dpll; 8664 pipe_config->pixel_multiplier = 8665 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8666 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8667 8668 ironlake_pch_clock_get(crtc, pipe_config); 8669 } else { 8670 pipe_config->pixel_multiplier = 1; 8671 } 8672 8673 intel_get_pipe_timings(crtc, pipe_config); 8674 intel_get_pipe_src_size(crtc, pipe_config); 8675 8676 ironlake_get_pfit_config(crtc, pipe_config); 8677 8678 ret = true; 8679 8680 out: 8681 intel_display_power_put(dev_priv, power_domain); 8682 8683 return ret; 8684 } 8685 8686 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 8687 { 8688 struct drm_device *dev = &dev_priv->drm; 8689 struct intel_crtc *crtc; 8690 8691 for_each_intel_crtc(dev, crtc) 8692 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 8693 pipe_name(crtc->pipe)); 8694 8695 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)), 8696 "Display power well on\n"); 8697 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 8698 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 8699 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 8700 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); 8701 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 8702 "CPU PWM1 enabled\n"); 8703 if (IS_HASWELL(dev_priv)) 8704 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 8705 "CPU PWM2 enabled\n"); 8706 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 8707 "PCH PWM1 enabled\n"); 8708 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 8709 "Utility pin enabled\n"); 8710 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 8711 8712 /* 8713 * In theory we can still leave IRQs enabled, as long as only the HPD 8714 * interrupts remain enabled. We used to check for that, but since it's 8715 * gen-specific and since we only disable LCPLL after we fully disable 8716 * the interrupts, the check below should be enough. 8717 */ 8718 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 8719 } 8720 8721 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 8722 { 8723 if (IS_HASWELL(dev_priv)) 8724 return I915_READ(D_COMP_HSW); 8725 else 8726 return I915_READ(D_COMP_BDW); 8727 } 8728 8729 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8730 { 8731 if (IS_HASWELL(dev_priv)) { 8732 mutex_lock(&dev_priv->pcu_lock); 8733 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8734 val)) 8735 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 8736 mutex_unlock(&dev_priv->pcu_lock); 8737 } else { 8738 I915_WRITE(D_COMP_BDW, val); 8739 POSTING_READ(D_COMP_BDW); 8740 } 8741 } 8742 8743 /* 8744 * This function implements pieces of two sequences from BSpec: 8745 * - Sequence for display software to disable LCPLL 8746 * - Sequence for display software to allow package C8+ 8747 * The steps implemented here are just the steps that actually touch the LCPLL 8748 * register. Callers should take care of disabling all the display engine 8749 * functions, doing the mode unset, fixing interrupts, etc. 8750 */ 8751 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 8752 bool switch_to_fclk, bool allow_power_down) 8753 { 8754 uint32_t val; 8755 8756 assert_can_disable_lcpll(dev_priv); 8757 8758 val = I915_READ(LCPLL_CTL); 8759 8760 if (switch_to_fclk) { 8761 val |= LCPLL_CD_SOURCE_FCLK; 8762 I915_WRITE(LCPLL_CTL, val); 8763 8764 if (wait_for_us(I915_READ(LCPLL_CTL) & 8765 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 8766 DRM_ERROR("Switching to FCLK failed\n"); 8767 8768 val = I915_READ(LCPLL_CTL); 8769 } 8770 8771 val |= LCPLL_PLL_DISABLE; 8772 I915_WRITE(LCPLL_CTL, val); 8773 POSTING_READ(LCPLL_CTL); 8774 8775 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 8776 DRM_ERROR("LCPLL still locked\n"); 8777 8778 val = hsw_read_dcomp(dev_priv); 8779 val |= D_COMP_COMP_DISABLE; 8780 hsw_write_dcomp(dev_priv, val); 8781 ndelay(100); 8782 8783 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 8784 1)) 8785 DRM_ERROR("D_COMP RCOMP still in progress\n"); 8786 8787 if (allow_power_down) { 8788 val = I915_READ(LCPLL_CTL); 8789 val |= LCPLL_POWER_DOWN_ALLOW; 8790 I915_WRITE(LCPLL_CTL, val); 8791 POSTING_READ(LCPLL_CTL); 8792 } 8793 } 8794 8795 /* 8796 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8797 * source. 8798 */ 8799 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8800 { 8801 uint32_t val; 8802 8803 val = I915_READ(LCPLL_CTL); 8804 8805 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8806 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8807 return; 8808 8809 /* 8810 * Make sure we're not on PC8 state before disabling PC8, otherwise 8811 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8812 */ 8813 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8814 8815 if (val & LCPLL_POWER_DOWN_ALLOW) { 8816 val &= ~LCPLL_POWER_DOWN_ALLOW; 8817 I915_WRITE(LCPLL_CTL, val); 8818 POSTING_READ(LCPLL_CTL); 8819 } 8820 8821 val = hsw_read_dcomp(dev_priv); 8822 val |= D_COMP_COMP_FORCE; 8823 val &= ~D_COMP_COMP_DISABLE; 8824 hsw_write_dcomp(dev_priv, val); 8825 8826 val = I915_READ(LCPLL_CTL); 8827 val &= ~LCPLL_PLL_DISABLE; 8828 I915_WRITE(LCPLL_CTL, val); 8829 8830 if (intel_wait_for_register(dev_priv, 8831 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 8832 5)) 8833 DRM_ERROR("LCPLL not locked yet\n"); 8834 8835 if (val & LCPLL_CD_SOURCE_FCLK) { 8836 val = I915_READ(LCPLL_CTL); 8837 val &= ~LCPLL_CD_SOURCE_FCLK; 8838 I915_WRITE(LCPLL_CTL, val); 8839 8840 if (wait_for_us((I915_READ(LCPLL_CTL) & 8841 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8842 DRM_ERROR("Switching back to LCPLL failed\n"); 8843 } 8844 8845 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8846 intel_update_cdclk(dev_priv); 8847 } 8848 8849 /* 8850 * Package states C8 and deeper are really deep PC states that can only be 8851 * reached when all the devices on the system allow it, so even if the graphics 8852 * device allows PC8+, it doesn't mean the system will actually get to these 8853 * states. Our driver only allows PC8+ when going into runtime PM. 8854 * 8855 * The requirements for PC8+ are that all the outputs are disabled, the power 8856 * well is disabled and most interrupts are disabled, and these are also 8857 * requirements for runtime PM. When these conditions are met, we manually do 8858 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8859 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8860 * hang the machine. 8861 * 8862 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8863 * the state of some registers, so when we come back from PC8+ we need to 8864 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8865 * need to take care of the registers kept by RC6. Notice that this happens even 8866 * if we don't put the device in PCI D3 state (which is what currently happens 8867 * because of the runtime PM support). 8868 * 8869 * For more, read "Display Sequences for Package C8" on the hardware 8870 * documentation. 8871 */ 8872 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8873 { 8874 uint32_t val; 8875 8876 DRM_DEBUG_KMS("Enabling package C8+\n"); 8877 8878 if (HAS_PCH_LPT_LP(dev_priv)) { 8879 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8880 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8881 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8882 } 8883 8884 lpt_disable_clkout_dp(dev_priv); 8885 hsw_disable_lcpll(dev_priv, true, true); 8886 } 8887 8888 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8889 { 8890 uint32_t val; 8891 8892 DRM_DEBUG_KMS("Disabling package C8+\n"); 8893 8894 hsw_restore_lcpll(dev_priv); 8895 lpt_init_pch_refclk(dev_priv); 8896 8897 if (HAS_PCH_LPT_LP(dev_priv)) { 8898 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8899 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8900 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8901 } 8902 } 8903 8904 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8905 struct intel_crtc_state *crtc_state) 8906 { 8907 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 8908 struct intel_encoder *encoder = 8909 intel_ddi_get_crtc_new_encoder(crtc_state); 8910 8911 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { 8912 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8913 pipe_name(crtc->pipe)); 8914 return -EINVAL; 8915 } 8916 } 8917 8918 return 0; 8919 } 8920 8921 static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv, 8922 enum port port, 8923 struct intel_crtc_state *pipe_config) 8924 { 8925 enum intel_dpll_id id; 8926 u32 temp; 8927 8928 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port); 8929 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port); 8930 8931 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2)) 8932 return; 8933 8934 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8935 } 8936 8937 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8938 enum port port, 8939 struct intel_crtc_state *pipe_config) 8940 { 8941 enum intel_dpll_id id; 8942 8943 switch (port) { 8944 case PORT_A: 8945 id = DPLL_ID_SKL_DPLL0; 8946 break; 8947 case PORT_B: 8948 id = DPLL_ID_SKL_DPLL1; 8949 break; 8950 case PORT_C: 8951 id = DPLL_ID_SKL_DPLL2; 8952 break; 8953 default: 8954 DRM_ERROR("Incorrect port type\n"); 8955 return; 8956 } 8957 8958 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8959 } 8960 8961 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8962 enum port port, 8963 struct intel_crtc_state *pipe_config) 8964 { 8965 enum intel_dpll_id id; 8966 u32 temp; 8967 8968 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8969 id = temp >> (port * 3 + 1); 8970 8971 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 8972 return; 8973 8974 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8975 } 8976 8977 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8978 enum port port, 8979 struct intel_crtc_state *pipe_config) 8980 { 8981 enum intel_dpll_id id; 8982 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8983 8984 switch (ddi_pll_sel) { 8985 case PORT_CLK_SEL_WRPLL1: 8986 id = DPLL_ID_WRPLL1; 8987 break; 8988 case PORT_CLK_SEL_WRPLL2: 8989 id = DPLL_ID_WRPLL2; 8990 break; 8991 case PORT_CLK_SEL_SPLL: 8992 id = DPLL_ID_SPLL; 8993 break; 8994 case PORT_CLK_SEL_LCPLL_810: 8995 id = DPLL_ID_LCPLL_810; 8996 break; 8997 case PORT_CLK_SEL_LCPLL_1350: 8998 id = DPLL_ID_LCPLL_1350; 8999 break; 9000 case PORT_CLK_SEL_LCPLL_2700: 9001 id = DPLL_ID_LCPLL_2700; 9002 break; 9003 default: 9004 MISSING_CASE(ddi_pll_sel); 9005 /* fall through */ 9006 case PORT_CLK_SEL_NONE: 9007 return; 9008 } 9009 9010 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 9011 } 9012 9013 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 9014 struct intel_crtc_state *pipe_config, 9015 u64 *power_domain_mask) 9016 { 9017 struct drm_device *dev = crtc->base.dev; 9018 struct drm_i915_private *dev_priv = to_i915(dev); 9019 enum intel_display_power_domain power_domain; 9020 u32 tmp; 9021 9022 /* 9023 * The pipe->transcoder mapping is fixed with the exception of the eDP 9024 * transcoder handled below. 9025 */ 9026 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9027 9028 /* 9029 * XXX: Do intel_display_power_get_if_enabled before reading this (for 9030 * consistency and less surprising code; it's in always on power). 9031 */ 9032 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9033 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9034 enum i915_pipe trans_edp_pipe; 9035 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9036 default: 9037 WARN(1, "unknown pipe linked to edp transcoder\n"); 9038 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9039 case TRANS_DDI_EDP_INPUT_A_ON: 9040 trans_edp_pipe = PIPE_A; 9041 break; 9042 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9043 trans_edp_pipe = PIPE_B; 9044 break; 9045 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9046 trans_edp_pipe = PIPE_C; 9047 break; 9048 } 9049 9050 if (trans_edp_pipe == crtc->pipe) 9051 pipe_config->cpu_transcoder = TRANSCODER_EDP; 9052 } 9053 9054 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 9055 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9056 return false; 9057 *power_domain_mask |= BIT_ULL(power_domain); 9058 9059 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 9060 9061 return tmp & PIPECONF_ENABLE; 9062 } 9063 9064 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 9065 struct intel_crtc_state *pipe_config, 9066 u64 *power_domain_mask) 9067 { 9068 struct drm_device *dev = crtc->base.dev; 9069 struct drm_i915_private *dev_priv = to_i915(dev); 9070 enum intel_display_power_domain power_domain; 9071 enum port port; 9072 enum transcoder cpu_transcoder; 9073 u32 tmp; 9074 9075 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 9076 if (port == PORT_A) 9077 cpu_transcoder = TRANSCODER_DSI_A; 9078 else 9079 cpu_transcoder = TRANSCODER_DSI_C; 9080 9081 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 9082 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9083 continue; 9084 *power_domain_mask |= BIT_ULL(power_domain); 9085 9086 /* 9087 * The PLL needs to be enabled with a valid divider 9088 * configuration, otherwise accessing DSI registers will hang 9089 * the machine. See BSpec North Display Engine 9090 * registers/MIPI[BXT]. We can break out here early, since we 9091 * need the same DSI PLL to be enabled for both DSI ports. 9092 */ 9093 if (!intel_dsi_pll_is_enabled(dev_priv)) 9094 break; 9095 9096 /* XXX: this works for video mode only */ 9097 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 9098 if (!(tmp & DPI_ENABLE)) 9099 continue; 9100 9101 tmp = I915_READ(MIPI_CTRL(port)); 9102 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 9103 continue; 9104 9105 pipe_config->cpu_transcoder = cpu_transcoder; 9106 break; 9107 } 9108 9109 return transcoder_is_dsi(pipe_config->cpu_transcoder); 9110 } 9111 9112 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9113 struct intel_crtc_state *pipe_config) 9114 { 9115 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9116 struct intel_shared_dpll *pll; 9117 enum port port; 9118 uint32_t tmp; 9119 9120 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9121 9122 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9123 9124 if (IS_CANNONLAKE(dev_priv)) 9125 cannonlake_get_ddi_pll(dev_priv, port, pipe_config); 9126 else if (IS_GEN9_BC(dev_priv)) 9127 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9128 else if (IS_GEN9_LP(dev_priv)) 9129 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9130 else 9131 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9132 9133 pll = pipe_config->shared_dpll; 9134 if (pll) { 9135 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9136 &pipe_config->dpll_hw_state)); 9137 } 9138 9139 /* 9140 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9141 * DDI E. So just check whether this pipe is wired to DDI E and whether 9142 * the PCH transcoder is on. 9143 */ 9144 if (INTEL_GEN(dev_priv) < 9 && 9145 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9146 pipe_config->has_pch_encoder = true; 9147 9148 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9149 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9150 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9151 9152 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9153 } 9154 } 9155 9156 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9157 struct intel_crtc_state *pipe_config) 9158 { 9159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9160 enum intel_display_power_domain power_domain; 9161 u64 power_domain_mask; 9162 bool active; 9163 9164 intel_crtc_init_scalers(crtc, pipe_config); 9165 9166 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9167 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9168 return false; 9169 power_domain_mask = BIT_ULL(power_domain); 9170 9171 pipe_config->shared_dpll = NULL; 9172 9173 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 9174 9175 if (IS_GEN9_LP(dev_priv) && 9176 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 9177 WARN_ON(active); 9178 active = true; 9179 } 9180 9181 if (!active) 9182 goto out; 9183 9184 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9185 haswell_get_ddi_port_state(crtc, pipe_config); 9186 intel_get_pipe_timings(crtc, pipe_config); 9187 } 9188 9189 intel_get_pipe_src_size(crtc, pipe_config); 9190 9191 pipe_config->gamma_mode = 9192 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9193 9194 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 9195 u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); 9196 bool clrspace_yuv = tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV; 9197 9198 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) { 9199 bool blend_mode_420 = tmp & 9200 PIPEMISC_YUV420_MODE_FULL_BLEND; 9201 9202 pipe_config->ycbcr420 = tmp & PIPEMISC_YUV420_ENABLE; 9203 if (pipe_config->ycbcr420 != clrspace_yuv || 9204 pipe_config->ycbcr420 != blend_mode_420) 9205 DRM_DEBUG_KMS("Bad 4:2:0 mode (%08x)\n", tmp); 9206 } else if (clrspace_yuv) { 9207 DRM_DEBUG_KMS("YCbCr 4:2:0 Unsupported\n"); 9208 } 9209 } 9210 9211 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9212 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9213 power_domain_mask |= BIT_ULL(power_domain); 9214 if (INTEL_GEN(dev_priv) >= 9) 9215 skylake_get_pfit_config(crtc, pipe_config); 9216 else 9217 ironlake_get_pfit_config(crtc, pipe_config); 9218 } 9219 9220 if (IS_HASWELL(dev_priv)) 9221 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9222 (I915_READ(IPS_CTL) & IPS_ENABLE); 9223 9224 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 9225 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9226 pipe_config->pixel_multiplier = 9227 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9228 } else { 9229 pipe_config->pixel_multiplier = 1; 9230 } 9231 9232 out: 9233 for_each_power_domain(power_domain, power_domain_mask) 9234 intel_display_power_put(dev_priv, power_domain); 9235 9236 return active; 9237 } 9238 9239 static u32 intel_cursor_base(const struct intel_plane_state *plane_state) 9240 { 9241 struct drm_i915_private *dev_priv = 9242 to_i915(plane_state->base.plane->dev); 9243 const struct drm_framebuffer *fb = plane_state->base.fb; 9244 const struct drm_i915_gem_object *obj = intel_fb_obj(fb); 9245 u32 base; 9246 9247 if (INTEL_INFO(dev_priv)->cursor_needs_physical) 9248 base = obj->phys_handle->busaddr; 9249 else 9250 base = intel_plane_ggtt_offset(plane_state); 9251 9252 base += plane_state->main.offset; 9253 9254 /* ILK+ do this automagically */ 9255 if (HAS_GMCH_DISPLAY(dev_priv) && 9256 plane_state->base.rotation & DRM_MODE_ROTATE_180) 9257 base += (plane_state->base.crtc_h * 9258 plane_state->base.crtc_w - 1) * fb->format->cpp[0]; 9259 9260 return base; 9261 } 9262 9263 static u32 intel_cursor_position(const struct intel_plane_state *plane_state) 9264 { 9265 int x = plane_state->base.crtc_x; 9266 int y = plane_state->base.crtc_y; 9267 u32 pos = 0; 9268 9269 if (x < 0) { 9270 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9271 x = -x; 9272 } 9273 pos |= x << CURSOR_X_SHIFT; 9274 9275 if (y < 0) { 9276 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9277 y = -y; 9278 } 9279 pos |= y << CURSOR_Y_SHIFT; 9280 9281 return pos; 9282 } 9283 9284 static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) 9285 { 9286 const struct drm_mode_config *config = 9287 &plane_state->base.plane->dev->mode_config; 9288 int width = plane_state->base.crtc_w; 9289 int height = plane_state->base.crtc_h; 9290 9291 return width > 0 && width <= config->cursor_width && 9292 height > 0 && height <= config->cursor_height; 9293 } 9294 9295 static int intel_check_cursor(struct intel_crtc_state *crtc_state, 9296 struct intel_plane_state *plane_state) 9297 { 9298 const struct drm_framebuffer *fb = plane_state->base.fb; 9299 int src_x, src_y; 9300 u32 offset; 9301 int ret; 9302 9303 ret = drm_plane_helper_check_state(&plane_state->base, 9304 &plane_state->clip, 9305 DRM_PLANE_HELPER_NO_SCALING, 9306 DRM_PLANE_HELPER_NO_SCALING, 9307 true, true); 9308 if (ret) 9309 return ret; 9310 9311 if (!fb) 9312 return 0; 9313 9314 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 9315 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 9316 return -EINVAL; 9317 } 9318 9319 src_x = plane_state->base.src_x >> 16; 9320 src_y = plane_state->base.src_y >> 16; 9321 9322 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 9323 offset = intel_compute_tile_offset(&src_x, &src_y, plane_state, 0); 9324 9325 if (src_x != 0 || src_y != 0) { 9326 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n"); 9327 return -EINVAL; 9328 } 9329 9330 plane_state->main.offset = offset; 9331 9332 return 0; 9333 } 9334 9335 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 9336 const struct intel_plane_state *plane_state) 9337 { 9338 const struct drm_framebuffer *fb = plane_state->base.fb; 9339 9340 return CURSOR_ENABLE | 9341 CURSOR_GAMMA_ENABLE | 9342 CURSOR_FORMAT_ARGB | 9343 CURSOR_STRIDE(fb->pitches[0]); 9344 } 9345 9346 static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) 9347 { 9348 int width = plane_state->base.crtc_w; 9349 9350 /* 9351 * 845g/865g are only limited by the width of their cursors, 9352 * the height is arbitrary up to the precision of the register. 9353 */ 9354 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); 9355 } 9356 9357 static int i845_check_cursor(struct intel_plane *plane, 9358 struct intel_crtc_state *crtc_state, 9359 struct intel_plane_state *plane_state) 9360 { 9361 const struct drm_framebuffer *fb = plane_state->base.fb; 9362 int ret; 9363 9364 ret = intel_check_cursor(crtc_state, plane_state); 9365 if (ret) 9366 return ret; 9367 9368 /* if we want to turn off the cursor ignore width and height */ 9369 if (!fb) 9370 return 0; 9371 9372 /* Check for which cursor types we support */ 9373 if (!i845_cursor_size_ok(plane_state)) { 9374 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 9375 plane_state->base.crtc_w, 9376 plane_state->base.crtc_h); 9377 return -EINVAL; 9378 } 9379 9380 switch (fb->pitches[0]) { 9381 case 256: 9382 case 512: 9383 case 1024: 9384 case 2048: 9385 break; 9386 default: 9387 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n", 9388 fb->pitches[0]); 9389 return -EINVAL; 9390 } 9391 9392 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); 9393 9394 return 0; 9395 } 9396 9397 static void i845_update_cursor(struct intel_plane *plane, 9398 const struct intel_crtc_state *crtc_state, 9399 const struct intel_plane_state *plane_state) 9400 { 9401 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9402 u32 cntl = 0, base = 0, pos = 0, size = 0; 9403 unsigned long irqflags; 9404 9405 if (plane_state && plane_state->base.visible) { 9406 unsigned int width = plane_state->base.crtc_w; 9407 unsigned int height = plane_state->base.crtc_h; 9408 9409 cntl = plane_state->ctl; 9410 size = (height << 12) | width; 9411 9412 base = intel_cursor_base(plane_state); 9413 pos = intel_cursor_position(plane_state); 9414 } 9415 9416 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9417 9418 /* On these chipsets we can only modify the base/size/stride 9419 * whilst the cursor is disabled. 9420 */ 9421 if (plane->cursor.base != base || 9422 plane->cursor.size != size || 9423 plane->cursor.cntl != cntl) { 9424 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 9425 I915_WRITE_FW(CURBASE(PIPE_A), base); 9426 I915_WRITE_FW(CURSIZE, size); 9427 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9428 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 9429 9430 plane->cursor.base = base; 9431 plane->cursor.size = size; 9432 plane->cursor.cntl = cntl; 9433 } else { 9434 I915_WRITE_FW(CURPOS(PIPE_A), pos); 9435 } 9436 9437 POSTING_READ_FW(CURCNTR(PIPE_A)); 9438 9439 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9440 } 9441 9442 static void i845_disable_cursor(struct intel_plane *plane, 9443 struct intel_crtc *crtc) 9444 { 9445 i845_update_cursor(plane, NULL, NULL); 9446 } 9447 9448 static bool i845_cursor_get_hw_state(struct intel_plane *plane) 9449 { 9450 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9451 enum intel_display_power_domain power_domain; 9452 bool ret; 9453 9454 power_domain = POWER_DOMAIN_PIPE(PIPE_A); 9455 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9456 return false; 9457 9458 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 9459 9460 intel_display_power_put(dev_priv, power_domain); 9461 9462 return ret; 9463 } 9464 9465 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9466 const struct intel_plane_state *plane_state) 9467 { 9468 struct drm_i915_private *dev_priv = 9469 to_i915(plane_state->base.plane->dev); 9470 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9471 u32 cntl; 9472 9473 cntl = MCURSOR_GAMMA_ENABLE; 9474 9475 if (HAS_DDI(dev_priv)) 9476 cntl |= CURSOR_PIPE_CSC_ENABLE; 9477 9478 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 9479 9480 switch (plane_state->base.crtc_w) { 9481 case 64: 9482 cntl |= CURSOR_MODE_64_ARGB_AX; 9483 break; 9484 case 128: 9485 cntl |= CURSOR_MODE_128_ARGB_AX; 9486 break; 9487 case 256: 9488 cntl |= CURSOR_MODE_256_ARGB_AX; 9489 break; 9490 default: 9491 MISSING_CASE(plane_state->base.crtc_w); 9492 return 0; 9493 } 9494 9495 if (plane_state->base.rotation & DRM_MODE_ROTATE_180) 9496 cntl |= CURSOR_ROTATE_180; 9497 9498 return cntl; 9499 } 9500 9501 static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) 9502 { 9503 struct drm_i915_private *dev_priv = 9504 to_i915(plane_state->base.plane->dev); 9505 int width = plane_state->base.crtc_w; 9506 int height = plane_state->base.crtc_h; 9507 9508 if (!intel_cursor_size_ok(plane_state)) 9509 return false; 9510 9511 /* Cursor width is limited to a few power-of-two sizes */ 9512 switch (width) { 9513 case 256: 9514 case 128: 9515 case 64: 9516 break; 9517 default: 9518 return false; 9519 } 9520 9521 /* 9522 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor 9523 * height from 8 lines up to the cursor width, when the 9524 * cursor is not rotated. Everything else requires square 9525 * cursors. 9526 */ 9527 if (HAS_CUR_FBC(dev_priv) && 9528 plane_state->base.rotation & DRM_MODE_ROTATE_0) { 9529 if (height < 8 || height > width) 9530 return false; 9531 } else { 9532 if (height != width) 9533 return false; 9534 } 9535 9536 return true; 9537 } 9538 9539 static int i9xx_check_cursor(struct intel_plane *plane, 9540 struct intel_crtc_state *crtc_state, 9541 struct intel_plane_state *plane_state) 9542 { 9543 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9544 const struct drm_framebuffer *fb = plane_state->base.fb; 9545 enum i915_pipe pipe = plane->pipe; 9546 int ret; 9547 9548 ret = intel_check_cursor(crtc_state, plane_state); 9549 if (ret) 9550 return ret; 9551 9552 /* if we want to turn off the cursor ignore width and height */ 9553 if (!fb) 9554 return 0; 9555 9556 /* Check for which cursor types we support */ 9557 if (!i9xx_cursor_size_ok(plane_state)) { 9558 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 9559 plane_state->base.crtc_w, 9560 plane_state->base.crtc_h); 9561 return -EINVAL; 9562 } 9563 9564 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { 9565 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", 9566 fb->pitches[0], plane_state->base.crtc_w); 9567 return -EINVAL; 9568 } 9569 9570 /* 9571 * There's something wrong with the cursor on CHV pipe C. 9572 * If it straddles the left edge of the screen then 9573 * moving it away from the edge or disabling it often 9574 * results in a pipe underrun, and often that can lead to 9575 * dead pipe (constant underrun reported, and it scans 9576 * out just a solid color). To recover from that, the 9577 * display power well must be turned off and on again. 9578 * Refuse the put the cursor into that compromised position. 9579 */ 9580 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 9581 plane_state->base.visible && plane_state->base.crtc_x < 0) { 9582 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 9583 return -EINVAL; 9584 } 9585 9586 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); 9587 9588 return 0; 9589 } 9590 9591 static void i9xx_update_cursor(struct intel_plane *plane, 9592 const struct intel_crtc_state *crtc_state, 9593 const struct intel_plane_state *plane_state) 9594 { 9595 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9596 enum i915_pipe pipe = plane->pipe; 9597 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; 9598 unsigned long irqflags; 9599 9600 if (plane_state && plane_state->base.visible) { 9601 cntl = plane_state->ctl; 9602 9603 if (plane_state->base.crtc_h != plane_state->base.crtc_w) 9604 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); 9605 9606 base = intel_cursor_base(plane_state); 9607 pos = intel_cursor_position(plane_state); 9608 } 9609 9610 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9611 9612 /* 9613 * On some platforms writing CURCNTR first will also 9614 * cause CURPOS to be armed by the CURBASE write. 9615 * Without the CURCNTR write the CURPOS write would 9616 * arm itself. Thus we always start the full update 9617 * with a CURCNTR write. 9618 * 9619 * On other platforms CURPOS always requires the 9620 * CURBASE write to arm the update. Additonally 9621 * a write to any of the cursor register will cancel 9622 * an already armed cursor update. Thus leaving out 9623 * the CURBASE write after CURPOS could lead to a 9624 * cursor that doesn't appear to move, or even change 9625 * shape. Thus we always write CURBASE. 9626 * 9627 * CURCNTR and CUR_FBC_CTL are always 9628 * armed by the CURBASE write only. 9629 */ 9630 if (plane->cursor.base != base || 9631 plane->cursor.size != fbc_ctl || 9632 plane->cursor.cntl != cntl) { 9633 I915_WRITE_FW(CURCNTR(pipe), cntl); 9634 if (HAS_CUR_FBC(dev_priv)) 9635 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl); 9636 I915_WRITE_FW(CURPOS(pipe), pos); 9637 I915_WRITE_FW(CURBASE(pipe), base); 9638 9639 plane->cursor.base = base; 9640 plane->cursor.size = fbc_ctl; 9641 plane->cursor.cntl = cntl; 9642 } else { 9643 I915_WRITE_FW(CURPOS(pipe), pos); 9644 I915_WRITE_FW(CURBASE(pipe), base); 9645 } 9646 9647 POSTING_READ_FW(CURBASE(pipe)); 9648 9649 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9650 } 9651 9652 static void i9xx_disable_cursor(struct intel_plane *plane, 9653 struct intel_crtc *crtc) 9654 { 9655 i9xx_update_cursor(plane, NULL, NULL); 9656 } 9657 9658 static bool i9xx_cursor_get_hw_state(struct intel_plane *plane) 9659 { 9660 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 9661 enum intel_display_power_domain power_domain; 9662 enum i915_pipe pipe = plane->pipe; 9663 bool ret; 9664 9665 /* 9666 * Not 100% correct for planes that can move between pipes, 9667 * but that's only the case for gen2-3 which don't have any 9668 * display power wells. 9669 */ 9670 power_domain = POWER_DOMAIN_PIPE(pipe); 9671 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9672 return false; 9673 9674 ret = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 9675 9676 intel_display_power_put(dev_priv, power_domain); 9677 9678 return ret; 9679 } 9680 9681 /* VESA 640x480x72Hz mode to set on the pipe */ 9682 static const struct drm_display_mode load_detect_mode = { 9683 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9684 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9685 }; 9686 9687 struct drm_framebuffer * 9688 intel_framebuffer_create(struct drm_i915_gem_object *obj, 9689 struct drm_mode_fb_cmd2 *mode_cmd) 9690 { 9691 struct intel_framebuffer *intel_fb; 9692 int ret; 9693 9694 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9695 if (!intel_fb) 9696 return ERR_PTR(-ENOMEM); 9697 9698 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 9699 if (ret) 9700 goto err; 9701 9702 return &intel_fb->base; 9703 9704 err: 9705 kfree(intel_fb); 9706 return ERR_PTR(ret); 9707 } 9708 9709 static u32 9710 intel_framebuffer_pitch_for_width(int width, int bpp) 9711 { 9712 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 9713 return ALIGN(pitch, 64); 9714 } 9715 9716 static u32 9717 intel_framebuffer_size_for_mode(const struct drm_display_mode *mode, int bpp) 9718 { 9719 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 9720 return PAGE_ALIGN(pitch * mode->vdisplay); 9721 } 9722 9723 static struct drm_framebuffer * 9724 intel_framebuffer_create_for_mode(struct drm_device *dev, 9725 const struct drm_display_mode *mode, 9726 int depth, int bpp) 9727 { 9728 struct drm_framebuffer *fb; 9729 struct drm_i915_gem_object *obj; 9730 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 9731 9732 obj = i915_gem_object_create(to_i915(dev), 9733 intel_framebuffer_size_for_mode(mode, bpp)); 9734 if (IS_ERR(obj)) 9735 return ERR_CAST(obj); 9736 9737 mode_cmd.width = mode->hdisplay; 9738 mode_cmd.height = mode->vdisplay; 9739 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 9740 bpp); 9741 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 9742 9743 fb = intel_framebuffer_create(obj, &mode_cmd); 9744 if (IS_ERR(fb)) 9745 i915_gem_object_put(obj); 9746 9747 return fb; 9748 } 9749 9750 static struct drm_framebuffer * 9751 mode_fits_in_fbdev(struct drm_device *dev, 9752 const struct drm_display_mode *mode) 9753 { 9754 #ifdef CONFIG_DRM_FBDEV_EMULATION 9755 struct drm_i915_private *dev_priv = to_i915(dev); 9756 struct drm_i915_gem_object *obj; 9757 struct drm_framebuffer *fb; 9758 9759 if (!dev_priv->fbdev) 9760 return NULL; 9761 9762 if (!dev_priv->fbdev->fb) 9763 return NULL; 9764 9765 obj = dev_priv->fbdev->fb->obj; 9766 BUG_ON(!obj); 9767 9768 fb = &dev_priv->fbdev->fb->base; 9769 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 9770 fb->format->cpp[0] * 8)) 9771 return NULL; 9772 9773 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 9774 return NULL; 9775 9776 drm_framebuffer_get(fb); 9777 return fb; 9778 #else 9779 return NULL; 9780 #endif 9781 } 9782 9783 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 9784 struct drm_crtc *crtc, 9785 const struct drm_display_mode *mode, 9786 struct drm_framebuffer *fb, 9787 int x, int y) 9788 { 9789 struct drm_plane_state *plane_state; 9790 int hdisplay, vdisplay; 9791 int ret; 9792 9793 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 9794 if (IS_ERR(plane_state)) 9795 return PTR_ERR(plane_state); 9796 9797 if (mode) 9798 drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); 9799 else 9800 hdisplay = vdisplay = 0; 9801 9802 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 9803 if (ret) 9804 return ret; 9805 drm_atomic_set_fb_for_plane(plane_state, fb); 9806 plane_state->crtc_x = 0; 9807 plane_state->crtc_y = 0; 9808 plane_state->crtc_w = hdisplay; 9809 plane_state->crtc_h = vdisplay; 9810 plane_state->src_x = x << 16; 9811 plane_state->src_y = y << 16; 9812 plane_state->src_w = hdisplay << 16; 9813 plane_state->src_h = vdisplay << 16; 9814 9815 return 0; 9816 } 9817 9818 int intel_get_load_detect_pipe(struct drm_connector *connector, 9819 const struct drm_display_mode *mode, 9820 struct intel_load_detect_pipe *old, 9821 struct drm_modeset_acquire_ctx *ctx) 9822 { 9823 struct intel_crtc *intel_crtc; 9824 struct intel_encoder *intel_encoder = 9825 intel_attached_encoder(connector); 9826 struct drm_crtc *possible_crtc; 9827 struct drm_encoder *encoder = &intel_encoder->base; 9828 struct drm_crtc *crtc = NULL; 9829 struct drm_device *dev = encoder->dev; 9830 struct drm_i915_private *dev_priv = to_i915(dev); 9831 struct drm_framebuffer *fb; 9832 struct drm_mode_config *config = &dev->mode_config; 9833 struct drm_atomic_state *state = NULL, *restore_state = NULL; 9834 struct drm_connector_state *connector_state; 9835 struct intel_crtc_state *crtc_state; 9836 int ret, i = -1; 9837 9838 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9839 connector->base.id, connector->name, 9840 encoder->base.id, encoder->name); 9841 9842 old->restore_state = NULL; 9843 9844 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 9845 9846 /* 9847 * Algorithm gets a little messy: 9848 * 9849 * - if the connector already has an assigned crtc, use it (but make 9850 * sure it's on first) 9851 * 9852 * - try to find the first unused crtc that can drive this connector, 9853 * and use that if we find one 9854 */ 9855 9856 /* See if we already have a CRTC for this connector */ 9857 if (connector->state->crtc) { 9858 crtc = connector->state->crtc; 9859 9860 ret = drm_modeset_lock(&crtc->mutex, ctx); 9861 if (ret) 9862 goto fail; 9863 9864 /* Make sure the crtc and connector are running */ 9865 goto found; 9866 } 9867 9868 /* Find an unused one (if possible) */ 9869 for_each_crtc(dev, possible_crtc) { 9870 i++; 9871 if (!(encoder->possible_crtcs & (1 << i))) 9872 continue; 9873 9874 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 9875 if (ret) 9876 goto fail; 9877 9878 if (possible_crtc->state->enable) { 9879 drm_modeset_unlock(&possible_crtc->mutex); 9880 continue; 9881 } 9882 9883 crtc = possible_crtc; 9884 break; 9885 } 9886 9887 /* 9888 * If we didn't find an unused CRTC, don't use any. 9889 */ 9890 if (!crtc) { 9891 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 9892 ret = -ENODEV; 9893 goto fail; 9894 } 9895 9896 found: 9897 intel_crtc = to_intel_crtc(crtc); 9898 9899 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9900 if (ret) 9901 goto fail; 9902 9903 state = drm_atomic_state_alloc(dev); 9904 restore_state = drm_atomic_state_alloc(dev); 9905 if (!state || !restore_state) { 9906 ret = -ENOMEM; 9907 goto fail; 9908 } 9909 9910 state->acquire_ctx = ctx; 9911 restore_state->acquire_ctx = ctx; 9912 9913 connector_state = drm_atomic_get_connector_state(state, connector); 9914 if (IS_ERR(connector_state)) { 9915 ret = PTR_ERR(connector_state); 9916 goto fail; 9917 } 9918 9919 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 9920 if (ret) 9921 goto fail; 9922 9923 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9924 if (IS_ERR(crtc_state)) { 9925 ret = PTR_ERR(crtc_state); 9926 goto fail; 9927 } 9928 9929 crtc_state->base.active = crtc_state->base.enable = true; 9930 9931 if (!mode) 9932 mode = &load_detect_mode; 9933 9934 /* We need a framebuffer large enough to accommodate all accesses 9935 * that the plane may generate whilst we perform load detection. 9936 * We can not rely on the fbcon either being present (we get called 9937 * during its initialisation to detect all boot displays, or it may 9938 * not even exist) or that it is large enough to satisfy the 9939 * requested mode. 9940 */ 9941 fb = mode_fits_in_fbdev(dev, mode); 9942 if (fb == NULL) { 9943 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 9944 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 9945 } else 9946 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 9947 if (IS_ERR(fb)) { 9948 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 9949 ret = PTR_ERR(fb); 9950 goto fail; 9951 } 9952 9953 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9954 drm_framebuffer_put(fb); 9955 if (ret) 9956 goto fail; 9957 9958 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9959 if (ret) 9960 goto fail; 9961 9962 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9963 if (!ret) 9964 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9965 if (!ret) 9966 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 9967 if (ret) { 9968 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 9969 goto fail; 9970 } 9971 9972 ret = drm_atomic_commit(state); 9973 if (ret) { 9974 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9975 goto fail; 9976 } 9977 9978 old->restore_state = restore_state; 9979 drm_atomic_state_put(state); 9980 9981 /* let the connector get through one full cycle before testing */ 9982 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 9983 return true; 9984 9985 fail: 9986 if (state) { 9987 drm_atomic_state_put(state); 9988 state = NULL; 9989 } 9990 if (restore_state) { 9991 drm_atomic_state_put(restore_state); 9992 restore_state = NULL; 9993 } 9994 9995 if (ret == -EDEADLK) 9996 return ret; 9997 9998 return false; 9999 } 10000 10001 void intel_release_load_detect_pipe(struct drm_connector *connector, 10002 struct intel_load_detect_pipe *old, 10003 struct drm_modeset_acquire_ctx *ctx) 10004 { 10005 struct intel_encoder *intel_encoder = 10006 intel_attached_encoder(connector); 10007 struct drm_encoder *encoder = &intel_encoder->base; 10008 struct drm_atomic_state *state = old->restore_state; 10009 int ret; 10010 10011 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10012 connector->base.id, connector->name, 10013 encoder->base.id, encoder->name); 10014 10015 if (!state) 10016 return; 10017 10018 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 10019 if (ret) 10020 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 10021 drm_atomic_state_put(state); 10022 } 10023 10024 static int i9xx_pll_refclk(struct drm_device *dev, 10025 const struct intel_crtc_state *pipe_config) 10026 { 10027 struct drm_i915_private *dev_priv = to_i915(dev); 10028 u32 dpll = pipe_config->dpll_hw_state.dpll; 10029 10030 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10031 return dev_priv->vbt.lvds_ssc_freq; 10032 else if (HAS_PCH_SPLIT(dev_priv)) 10033 return 120000; 10034 else if (!IS_GEN2(dev_priv)) 10035 return 96000; 10036 else 10037 return 48000; 10038 } 10039 10040 /* Returns the clock of the currently programmed mode of the given pipe. */ 10041 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10042 struct intel_crtc_state *pipe_config) 10043 { 10044 struct drm_device *dev = crtc->base.dev; 10045 struct drm_i915_private *dev_priv = to_i915(dev); 10046 int pipe = pipe_config->cpu_transcoder; 10047 u32 dpll = pipe_config->dpll_hw_state.dpll; 10048 u32 fp; 10049 struct dpll clock; 10050 int port_clock; 10051 int refclk = i9xx_pll_refclk(dev, pipe_config); 10052 10053 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10054 fp = pipe_config->dpll_hw_state.fp0; 10055 else 10056 fp = pipe_config->dpll_hw_state.fp1; 10057 10058 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10059 if (IS_PINEVIEW(dev_priv)) { 10060 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10061 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10062 } else { 10063 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10064 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10065 } 10066 10067 if (!IS_GEN2(dev_priv)) { 10068 if (IS_PINEVIEW(dev_priv)) 10069 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10070 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10071 else 10072 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10073 DPLL_FPA01_P1_POST_DIV_SHIFT); 10074 10075 switch (dpll & DPLL_MODE_MASK) { 10076 case DPLLB_MODE_DAC_SERIAL: 10077 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10078 5 : 10; 10079 break; 10080 case DPLLB_MODE_LVDS: 10081 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10082 7 : 14; 10083 break; 10084 default: 10085 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10086 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10087 return; 10088 } 10089 10090 if (IS_PINEVIEW(dev_priv)) 10091 port_clock = pnv_calc_dpll_params(refclk, &clock); 10092 else 10093 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10094 } else { 10095 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 10096 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10097 10098 if (is_lvds) { 10099 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10100 DPLL_FPA01_P1_POST_DIV_SHIFT); 10101 10102 if (lvds & LVDS_CLKB_POWER_UP) 10103 clock.p2 = 7; 10104 else 10105 clock.p2 = 14; 10106 } else { 10107 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10108 clock.p1 = 2; 10109 else { 10110 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10111 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10112 } 10113 if (dpll & PLL_P2_DIVIDE_BY_4) 10114 clock.p2 = 4; 10115 else 10116 clock.p2 = 2; 10117 } 10118 10119 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10120 } 10121 10122 /* 10123 * This value includes pixel_multiplier. We will use 10124 * port_clock to compute adjusted_mode.crtc_clock in the 10125 * encoder's get_config() function. 10126 */ 10127 pipe_config->port_clock = port_clock; 10128 } 10129 10130 int intel_dotclock_calculate(int link_freq, 10131 const struct intel_link_m_n *m_n) 10132 { 10133 /* 10134 * The calculation for the data clock is: 10135 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10136 * But we want to avoid losing precison if possible, so: 10137 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10138 * 10139 * and the link clock is simpler: 10140 * link_clock = (m * link_clock) / n 10141 */ 10142 10143 if (!m_n->link_n) 10144 return 0; 10145 10146 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n); 10147 } 10148 10149 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10150 struct intel_crtc_state *pipe_config) 10151 { 10152 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10153 10154 /* read out port_clock from the DPLL */ 10155 i9xx_crtc_clock_get(crtc, pipe_config); 10156 10157 /* 10158 * In case there is an active pipe without active ports, 10159 * we may need some idea for the dotclock anyway. 10160 * Calculate one based on the FDI configuration. 10161 */ 10162 pipe_config->base.adjusted_mode.crtc_clock = 10163 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 10164 &pipe_config->fdi_m_n); 10165 } 10166 10167 /* Returns the currently programmed mode of the given encoder. */ 10168 struct drm_display_mode * 10169 intel_encoder_current_mode(struct intel_encoder *encoder) 10170 { 10171 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 10172 struct intel_crtc_state *crtc_state; 10173 struct drm_display_mode *mode; 10174 struct intel_crtc *crtc; 10175 enum i915_pipe pipe; 10176 10177 if (!encoder->get_hw_state(encoder, &pipe)) 10178 return NULL; 10179 10180 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10181 10182 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10183 if (!mode) 10184 return NULL; 10185 10186 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 10187 if (!crtc_state) { 10188 kfree(mode); 10189 return NULL; 10190 } 10191 10192 crtc_state->base.crtc = &crtc->base; 10193 10194 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) { 10195 kfree(crtc_state); 10196 kfree(mode); 10197 return NULL; 10198 } 10199 10200 encoder->get_config(encoder, crtc_state); 10201 10202 intel_mode_from_pipe_config(mode, crtc_state); 10203 10204 kfree(crtc_state); 10205 10206 return mode; 10207 } 10208 10209 static void intel_crtc_destroy(struct drm_crtc *crtc) 10210 { 10211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10212 10213 drm_crtc_cleanup(crtc); 10214 kfree(intel_crtc); 10215 } 10216 10217 /** 10218 * intel_wm_need_update - Check whether watermarks need updating 10219 * @plane: drm plane 10220 * @state: new plane state 10221 * 10222 * Check current plane state versus the new one to determine whether 10223 * watermarks need to be recalculated. 10224 * 10225 * Returns true or false. 10226 */ 10227 static bool intel_wm_need_update(struct drm_plane *plane, 10228 struct drm_plane_state *state) 10229 { 10230 struct intel_plane_state *new = to_intel_plane_state(state); 10231 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 10232 10233 /* Update watermarks on tiling or size changes. */ 10234 if (new->base.visible != cur->base.visible) 10235 return true; 10236 10237 if (!cur->base.fb || !new->base.fb) 10238 return false; 10239 10240 if (cur->base.fb->modifier != new->base.fb->modifier || 10241 cur->base.rotation != new->base.rotation || 10242 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 10243 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 10244 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 10245 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 10246 return true; 10247 10248 return false; 10249 } 10250 10251 static bool needs_scaling(const struct intel_plane_state *state) 10252 { 10253 int src_w = drm_rect_width(&state->base.src) >> 16; 10254 int src_h = drm_rect_height(&state->base.src) >> 16; 10255 int dst_w = drm_rect_width(&state->base.dst); 10256 int dst_h = drm_rect_height(&state->base.dst); 10257 10258 return (src_w != dst_w || src_h != dst_h); 10259 } 10260 10261 int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, 10262 struct drm_crtc_state *crtc_state, 10263 const struct intel_plane_state *old_plane_state, 10264 struct drm_plane_state *plane_state) 10265 { 10266 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 10267 struct drm_crtc *crtc = crtc_state->crtc; 10268 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10269 struct intel_plane *plane = to_intel_plane(plane_state->plane); 10270 struct drm_device *dev = crtc->dev; 10271 struct drm_i915_private *dev_priv = to_i915(dev); 10272 bool mode_changed = needs_modeset(crtc_state); 10273 bool was_crtc_enabled = old_crtc_state->base.active; 10274 bool is_crtc_enabled = crtc_state->active; 10275 bool turn_off, turn_on, visible, was_visible; 10276 struct drm_framebuffer *fb = plane_state->fb; 10277 int ret; 10278 10279 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 10280 ret = skl_update_scaler_plane( 10281 to_intel_crtc_state(crtc_state), 10282 to_intel_plane_state(plane_state)); 10283 if (ret) 10284 return ret; 10285 } 10286 10287 was_visible = old_plane_state->base.visible; 10288 visible = plane_state->visible; 10289 10290 if (!was_crtc_enabled && WARN_ON(was_visible)) 10291 was_visible = false; 10292 10293 /* 10294 * Visibility is calculated as if the crtc was on, but 10295 * after scaler setup everything depends on it being off 10296 * when the crtc isn't active. 10297 * 10298 * FIXME this is wrong for watermarks. Watermarks should also 10299 * be computed as if the pipe would be active. Perhaps move 10300 * per-plane wm computation to the .check_plane() hook, and 10301 * only combine the results from all planes in the current place? 10302 */ 10303 if (!is_crtc_enabled) { 10304 plane_state->visible = visible = false; 10305 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); 10306 } 10307 10308 if (!was_visible && !visible) 10309 return 0; 10310 10311 if (fb != old_plane_state->base.fb) 10312 pipe_config->fb_changed = true; 10313 10314 turn_off = was_visible && (!visible || mode_changed); 10315 turn_on = visible && (!was_visible || mode_changed); 10316 10317 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 10318 intel_crtc->base.base.id, intel_crtc->base.name, 10319 plane->base.base.id, plane->base.name, 10320 fb ? fb->base.id : -1); 10321 10322 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 10323 plane->base.base.id, plane->base.name, 10324 was_visible, visible, 10325 turn_off, turn_on, mode_changed); 10326 10327 if (turn_on) { 10328 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10329 pipe_config->update_wm_pre = true; 10330 10331 /* must disable cxsr around plane enable/disable */ 10332 if (plane->id != PLANE_CURSOR) 10333 pipe_config->disable_cxsr = true; 10334 } else if (turn_off) { 10335 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 10336 pipe_config->update_wm_post = true; 10337 10338 /* must disable cxsr around plane enable/disable */ 10339 if (plane->id != PLANE_CURSOR) 10340 pipe_config->disable_cxsr = true; 10341 } else if (intel_wm_need_update(&plane->base, plane_state)) { 10342 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 10343 /* FIXME bollocks */ 10344 pipe_config->update_wm_pre = true; 10345 pipe_config->update_wm_post = true; 10346 } 10347 } 10348 10349 if (visible || was_visible) 10350 pipe_config->fb_bits |= plane->frontbuffer_bit; 10351 10352 /* 10353 * WaCxSRDisabledForSpriteScaling:ivb 10354 * 10355 * cstate->update_wm was already set above, so this flag will 10356 * take effect when we commit and program watermarks. 10357 */ 10358 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 10359 needs_scaling(to_intel_plane_state(plane_state)) && 10360 !needs_scaling(old_plane_state)) 10361 pipe_config->disable_lp_wm = true; 10362 10363 return 0; 10364 } 10365 10366 static bool encoders_cloneable(const struct intel_encoder *a, 10367 const struct intel_encoder *b) 10368 { 10369 /* masks could be asymmetric, so check both ways */ 10370 return a == b || (a->cloneable & (1 << b->type) && 10371 b->cloneable & (1 << a->type)); 10372 } 10373 10374 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 10375 struct intel_crtc *crtc, 10376 struct intel_encoder *encoder) 10377 { 10378 struct intel_encoder *source_encoder; 10379 struct drm_connector *connector; 10380 struct drm_connector_state *connector_state; 10381 int i; 10382 10383 for_each_new_connector_in_state(state, connector, connector_state, i) { 10384 if (connector_state->crtc != &crtc->base) 10385 continue; 10386 10387 source_encoder = 10388 to_intel_encoder(connector_state->best_encoder); 10389 if (!encoders_cloneable(encoder, source_encoder)) 10390 return false; 10391 } 10392 10393 return true; 10394 } 10395 10396 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10397 struct drm_crtc_state *crtc_state) 10398 { 10399 struct drm_device *dev = crtc->dev; 10400 struct drm_i915_private *dev_priv = to_i915(dev); 10401 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10402 struct intel_crtc_state *pipe_config = 10403 to_intel_crtc_state(crtc_state); 10404 struct drm_atomic_state *state = crtc_state->state; 10405 int ret; 10406 bool mode_changed = needs_modeset(crtc_state); 10407 10408 if (mode_changed && !crtc_state->active) 10409 pipe_config->update_wm_post = true; 10410 10411 if (mode_changed && crtc_state->enable && 10412 dev_priv->display.crtc_compute_clock && 10413 !WARN_ON(pipe_config->shared_dpll)) { 10414 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 10415 pipe_config); 10416 if (ret) 10417 return ret; 10418 } 10419 10420 if (crtc_state->color_mgmt_changed) { 10421 ret = intel_color_check(crtc, crtc_state); 10422 if (ret) 10423 return ret; 10424 10425 /* 10426 * Changing color management on Intel hardware is 10427 * handled as part of planes update. 10428 */ 10429 crtc_state->planes_changed = true; 10430 } 10431 10432 ret = 0; 10433 if (dev_priv->display.compute_pipe_wm) { 10434 ret = dev_priv->display.compute_pipe_wm(pipe_config); 10435 if (ret) { 10436 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 10437 return ret; 10438 } 10439 } 10440 10441 if (dev_priv->display.compute_intermediate_wm && 10442 !to_intel_atomic_state(state)->skip_intermediate_wm) { 10443 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 10444 return 0; 10445 10446 /* 10447 * Calculate 'intermediate' watermarks that satisfy both the 10448 * old state and the new state. We can program these 10449 * immediately. 10450 */ 10451 ret = dev_priv->display.compute_intermediate_wm(dev, 10452 intel_crtc, 10453 pipe_config); 10454 if (ret) { 10455 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 10456 return ret; 10457 } 10458 } else if (dev_priv->display.compute_intermediate_wm) { 10459 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 10460 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 10461 } 10462 10463 if (INTEL_GEN(dev_priv) >= 9) { 10464 if (mode_changed) 10465 ret = skl_update_scaler_crtc(pipe_config); 10466 10467 if (!ret) 10468 ret = skl_check_pipe_max_pixel_rate(intel_crtc, 10469 pipe_config); 10470 if (!ret) 10471 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 10472 pipe_config); 10473 } 10474 10475 return ret; 10476 } 10477 10478 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 10479 .atomic_begin = intel_begin_crtc_commit, 10480 .atomic_flush = intel_finish_crtc_commit, 10481 .atomic_check = intel_crtc_atomic_check, 10482 }; 10483 10484 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 10485 { 10486 struct intel_connector *connector; 10487 struct drm_connector_list_iter conn_iter; 10488 10489 drm_connector_list_iter_begin(dev, &conn_iter); 10490 for_each_intel_connector_iter(connector, &conn_iter) { 10491 if (connector->base.state->crtc) 10492 drm_connector_unreference(&connector->base); 10493 10494 if (connector->base.encoder) { 10495 connector->base.state->best_encoder = 10496 connector->base.encoder; 10497 connector->base.state->crtc = 10498 connector->base.encoder->crtc; 10499 10500 drm_connector_reference(&connector->base); 10501 } else { 10502 connector->base.state->best_encoder = NULL; 10503 connector->base.state->crtc = NULL; 10504 } 10505 } 10506 drm_connector_list_iter_end(&conn_iter); 10507 } 10508 10509 static void 10510 connected_sink_compute_bpp(struct intel_connector *connector, 10511 struct intel_crtc_state *pipe_config) 10512 { 10513 const struct drm_display_info *info = &connector->base.display_info; 10514 int bpp = pipe_config->pipe_bpp; 10515 10516 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 10517 connector->base.base.id, 10518 connector->base.name); 10519 10520 /* Don't use an invalid EDID bpc value */ 10521 if (info->bpc != 0 && info->bpc * 3 < bpp) { 10522 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 10523 bpp, info->bpc * 3); 10524 pipe_config->pipe_bpp = info->bpc * 3; 10525 } 10526 10527 /* Clamp bpp to 8 on screens without EDID 1.4 */ 10528 if (info->bpc == 0 && bpp > 24) { 10529 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 10530 bpp); 10531 pipe_config->pipe_bpp = 24; 10532 } 10533 } 10534 10535 static int 10536 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 10537 struct intel_crtc_state *pipe_config) 10538 { 10539 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10540 struct drm_atomic_state *state; 10541 struct drm_connector *connector; 10542 struct drm_connector_state *connector_state; 10543 int bpp, i; 10544 10545 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 10546 IS_CHERRYVIEW(dev_priv))) 10547 bpp = 10*3; 10548 else if (INTEL_GEN(dev_priv) >= 5) 10549 bpp = 12*3; 10550 else 10551 bpp = 8*3; 10552 10553 10554 pipe_config->pipe_bpp = bpp; 10555 10556 state = pipe_config->base.state; 10557 10558 /* Clamp display bpp to EDID value */ 10559 for_each_new_connector_in_state(state, connector, connector_state, i) { 10560 if (connector_state->crtc != &crtc->base) 10561 continue; 10562 10563 connected_sink_compute_bpp(to_intel_connector(connector), 10564 pipe_config); 10565 } 10566 10567 return bpp; 10568 } 10569 10570 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 10571 { 10572 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 10573 "type: 0x%x flags: 0x%x\n", 10574 mode->crtc_clock, 10575 mode->crtc_hdisplay, mode->crtc_hsync_start, 10576 mode->crtc_hsync_end, mode->crtc_htotal, 10577 mode->crtc_vdisplay, mode->crtc_vsync_start, 10578 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 10579 } 10580 10581 static inline void 10582 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, 10583 unsigned int lane_count, struct intel_link_m_n *m_n) 10584 { 10585 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10586 id, lane_count, 10587 m_n->gmch_m, m_n->gmch_n, 10588 m_n->link_m, m_n->link_n, m_n->tu); 10589 } 10590 10591 #define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x 10592 10593 static const char * const output_type_str[] = { 10594 OUTPUT_TYPE(UNUSED), 10595 OUTPUT_TYPE(ANALOG), 10596 OUTPUT_TYPE(DVO), 10597 OUTPUT_TYPE(SDVO), 10598 OUTPUT_TYPE(LVDS), 10599 OUTPUT_TYPE(TVOUT), 10600 OUTPUT_TYPE(HDMI), 10601 OUTPUT_TYPE(DP), 10602 OUTPUT_TYPE(EDP), 10603 OUTPUT_TYPE(DSI), 10604 OUTPUT_TYPE(UNKNOWN), 10605 OUTPUT_TYPE(DP_MST), 10606 }; 10607 10608 #undef OUTPUT_TYPE 10609 10610 static void snprintf_output_types(char *buf, size_t len, 10611 unsigned int output_types) 10612 { 10613 char *str = buf; 10614 int i; 10615 10616 str[0] = '\0'; 10617 10618 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) { 10619 int r; 10620 10621 if ((output_types & BIT(i)) == 0) 10622 continue; 10623 10624 r = snprintf(str, len, "%s%s", 10625 str != buf ? "," : "", output_type_str[i]); 10626 if (r >= len) 10627 break; 10628 str += r; 10629 len -= r; 10630 10631 output_types &= ~BIT(i); 10632 } 10633 10634 WARN_ON_ONCE(output_types != 0); 10635 } 10636 10637 static void intel_dump_pipe_config(struct intel_crtc *crtc, 10638 struct intel_crtc_state *pipe_config, 10639 const char *context) 10640 { 10641 struct drm_device *dev = crtc->base.dev; 10642 struct drm_i915_private *dev_priv = to_i915(dev); 10643 struct drm_plane *plane; 10644 struct intel_plane *intel_plane; 10645 struct intel_plane_state *state; 10646 struct drm_framebuffer *fb; 10647 char buf[64]; 10648 10649 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", 10650 crtc->base.base.id, crtc->base.name, context); 10651 10652 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types); 10653 DRM_DEBUG_KMS("output_types: %s (0x%x)\n", 10654 buf, pipe_config->output_types); 10655 10656 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 10657 transcoder_name(pipe_config->cpu_transcoder), 10658 pipe_config->pipe_bpp, pipe_config->dither); 10659 10660 if (pipe_config->has_pch_encoder) 10661 intel_dump_m_n_config(pipe_config, "fdi", 10662 pipe_config->fdi_lanes, 10663 &pipe_config->fdi_m_n); 10664 10665 if (pipe_config->ycbcr420) 10666 DRM_DEBUG_KMS("YCbCr 4:2:0 output enabled\n"); 10667 10668 if (intel_crtc_has_dp_encoder(pipe_config)) { 10669 intel_dump_m_n_config(pipe_config, "dp m_n", 10670 pipe_config->lane_count, &pipe_config->dp_m_n); 10671 if (pipe_config->has_drrs) 10672 intel_dump_m_n_config(pipe_config, "dp m2_n2", 10673 pipe_config->lane_count, 10674 &pipe_config->dp_m2_n2); 10675 } 10676 10677 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 10678 pipe_config->has_audio, pipe_config->has_infoframe); 10679 10680 DRM_DEBUG_KMS("requested mode:\n"); 10681 drm_mode_debug_printmodeline(&pipe_config->base.mode); 10682 DRM_DEBUG_KMS("adjusted mode:\n"); 10683 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 10684 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 10685 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 10686 pipe_config->port_clock, 10687 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 10688 pipe_config->pixel_rate); 10689 10690 if (INTEL_GEN(dev_priv) >= 9) 10691 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 10692 crtc->num_scalers, 10693 pipe_config->scaler_state.scaler_users, 10694 pipe_config->scaler_state.scaler_id); 10695 10696 if (HAS_GMCH_DISPLAY(dev_priv)) 10697 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 10698 pipe_config->gmch_pfit.control, 10699 pipe_config->gmch_pfit.pgm_ratios, 10700 pipe_config->gmch_pfit.lvds_border_bits); 10701 else 10702 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 10703 pipe_config->pch_pfit.pos, 10704 pipe_config->pch_pfit.size, 10705 enableddisabled(pipe_config->pch_pfit.enabled)); 10706 10707 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 10708 pipe_config->ips_enabled, pipe_config->double_wide); 10709 10710 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 10711 10712 DRM_DEBUG_KMS("planes on this crtc\n"); 10713 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 10714 struct drm_format_name_buf format_name; 10715 intel_plane = to_intel_plane(plane); 10716 if (intel_plane->pipe != crtc->pipe) 10717 continue; 10718 10719 state = to_intel_plane_state(plane->state); 10720 fb = state->base.fb; 10721 if (!fb) { 10722 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 10723 plane->base.id, plane->name, state->scaler_id); 10724 continue; 10725 } 10726 10727 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", 10728 plane->base.id, plane->name, 10729 fb->base.id, fb->width, fb->height, 10730 drm_get_format_name(fb->format->format, &format_name)); 10731 if (INTEL_GEN(dev_priv) >= 9) 10732 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 10733 state->scaler_id, 10734 state->base.src.x1 >> 16, 10735 state->base.src.y1 >> 16, 10736 drm_rect_width(&state->base.src) >> 16, 10737 drm_rect_height(&state->base.src) >> 16, 10738 state->base.dst.x1, state->base.dst.y1, 10739 drm_rect_width(&state->base.dst), 10740 drm_rect_height(&state->base.dst)); 10741 } 10742 } 10743 10744 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 10745 { 10746 struct drm_device *dev = state->dev; 10747 struct drm_connector *connector; 10748 struct drm_connector_list_iter conn_iter; 10749 unsigned int used_ports = 0; 10750 unsigned int used_mst_ports = 0; 10751 10752 /* 10753 * Walk the connector list instead of the encoder 10754 * list to detect the problem on ddi platforms 10755 * where there's just one encoder per digital port. 10756 */ 10757 drm_connector_list_iter_begin(dev, &conn_iter); 10758 drm_for_each_connector_iter(connector, &conn_iter) { 10759 struct drm_connector_state *connector_state; 10760 struct intel_encoder *encoder; 10761 10762 connector_state = drm_atomic_get_existing_connector_state(state, connector); 10763 if (!connector_state) 10764 connector_state = connector->state; 10765 10766 if (!connector_state->best_encoder) 10767 continue; 10768 10769 encoder = to_intel_encoder(connector_state->best_encoder); 10770 10771 WARN_ON(!connector_state->crtc); 10772 10773 switch (encoder->type) { 10774 unsigned int port_mask; 10775 case INTEL_OUTPUT_UNKNOWN: 10776 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 10777 break; 10778 case INTEL_OUTPUT_DP: 10779 case INTEL_OUTPUT_HDMI: 10780 case INTEL_OUTPUT_EDP: 10781 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 10782 10783 /* the same port mustn't appear more than once */ 10784 if (used_ports & port_mask) 10785 return false; 10786 10787 used_ports |= port_mask; 10788 break; 10789 case INTEL_OUTPUT_DP_MST: 10790 used_mst_ports |= 10791 1 << enc_to_mst(&encoder->base)->primary->port; 10792 break; 10793 default: 10794 break; 10795 } 10796 } 10797 drm_connector_list_iter_end(&conn_iter); 10798 10799 /* can't mix MST and SST/HDMI on the same port */ 10800 if (used_ports & used_mst_ports) 10801 return false; 10802 10803 return true; 10804 } 10805 10806 static void 10807 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 10808 { 10809 struct drm_i915_private *dev_priv = 10810 to_i915(crtc_state->base.crtc->dev); 10811 struct intel_crtc_scaler_state scaler_state; 10812 struct intel_dpll_hw_state dpll_hw_state; 10813 struct intel_shared_dpll *shared_dpll; 10814 struct intel_crtc_wm_state wm_state; 10815 bool force_thru, ips_force_disable; 10816 10817 /* FIXME: before the switch to atomic started, a new pipe_config was 10818 * kzalloc'd. Code that depends on any field being zero should be 10819 * fixed, so that the crtc_state can be safely duplicated. For now, 10820 * only fields that are know to not cause problems are preserved. */ 10821 10822 scaler_state = crtc_state->scaler_state; 10823 shared_dpll = crtc_state->shared_dpll; 10824 dpll_hw_state = crtc_state->dpll_hw_state; 10825 force_thru = crtc_state->pch_pfit.force_thru; 10826 ips_force_disable = crtc_state->ips_force_disable; 10827 if (IS_G4X(dev_priv) || 10828 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 10829 wm_state = crtc_state->wm; 10830 10831 /* Keep base drm_crtc_state intact, only clear our extended struct */ 10832 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 10833 memset(&crtc_state->base + 1, 0, 10834 sizeof(*crtc_state) - sizeof(crtc_state->base)); 10835 10836 crtc_state->scaler_state = scaler_state; 10837 crtc_state->shared_dpll = shared_dpll; 10838 crtc_state->dpll_hw_state = dpll_hw_state; 10839 crtc_state->pch_pfit.force_thru = force_thru; 10840 crtc_state->ips_force_disable = ips_force_disable; 10841 if (IS_G4X(dev_priv) || 10842 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 10843 crtc_state->wm = wm_state; 10844 } 10845 10846 static int 10847 intel_modeset_pipe_config(struct drm_crtc *crtc, 10848 struct intel_crtc_state *pipe_config) 10849 { 10850 struct drm_atomic_state *state = pipe_config->base.state; 10851 struct intel_encoder *encoder; 10852 struct drm_connector *connector; 10853 struct drm_connector_state *connector_state; 10854 int base_bpp, ret = -EINVAL; 10855 int i; 10856 bool retry = true; 10857 10858 clear_intel_crtc_state(pipe_config); 10859 10860 pipe_config->cpu_transcoder = 10861 (enum transcoder) to_intel_crtc(crtc)->pipe; 10862 10863 /* 10864 * Sanitize sync polarity flags based on requested ones. If neither 10865 * positive or negative polarity is requested, treat this as meaning 10866 * negative polarity. 10867 */ 10868 if (!(pipe_config->base.adjusted_mode.flags & 10869 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10870 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10871 10872 if (!(pipe_config->base.adjusted_mode.flags & 10873 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10874 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10875 10876 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10877 pipe_config); 10878 if (base_bpp < 0) 10879 goto fail; 10880 10881 /* 10882 * Determine the real pipe dimensions. Note that stereo modes can 10883 * increase the actual pipe size due to the frame doubling and 10884 * insertion of additional space for blanks between the frame. This 10885 * is stored in the crtc timings. We use the requested mode to do this 10886 * computation to clearly distinguish it from the adjusted mode, which 10887 * can be changed by the connectors in the below retry loop. 10888 */ 10889 drm_mode_get_hv_timing(&pipe_config->base.mode, 10890 &pipe_config->pipe_src_w, 10891 &pipe_config->pipe_src_h); 10892 10893 for_each_new_connector_in_state(state, connector, connector_state, i) { 10894 if (connector_state->crtc != crtc) 10895 continue; 10896 10897 encoder = to_intel_encoder(connector_state->best_encoder); 10898 10899 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 10900 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10901 goto fail; 10902 } 10903 10904 /* 10905 * Determine output_types before calling the .compute_config() 10906 * hooks so that the hooks can use this information safely. 10907 */ 10908 pipe_config->output_types |= 1 << encoder->type; 10909 } 10910 10911 encoder_retry: 10912 /* Ensure the port clock defaults are reset when retrying. */ 10913 pipe_config->port_clock = 0; 10914 pipe_config->pixel_multiplier = 1; 10915 10916 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10917 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 10918 CRTC_STEREO_DOUBLE); 10919 10920 /* Pass our mode to the connectors and the CRTC to give them a chance to 10921 * adjust it according to limitations or connector properties, and also 10922 * a chance to reject the mode entirely. 10923 */ 10924 for_each_new_connector_in_state(state, connector, connector_state, i) { 10925 if (connector_state->crtc != crtc) 10926 continue; 10927 10928 encoder = to_intel_encoder(connector_state->best_encoder); 10929 10930 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 10931 DRM_DEBUG_KMS("Encoder config failure\n"); 10932 goto fail; 10933 } 10934 } 10935 10936 /* Set default port clock if not overwritten by the encoder. Needs to be 10937 * done afterwards in case the encoder adjusts the mode. */ 10938 if (!pipe_config->port_clock) 10939 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 10940 * pipe_config->pixel_multiplier; 10941 10942 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10943 if (ret < 0) { 10944 DRM_DEBUG_KMS("CRTC fixup failed\n"); 10945 goto fail; 10946 } 10947 10948 if (ret == RETRY) { 10949 if (WARN(!retry, "loop in pipe configuration computation\n")) { 10950 ret = -EINVAL; 10951 goto fail; 10952 } 10953 10954 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 10955 retry = false; 10956 goto encoder_retry; 10957 } 10958 10959 /* Dithering seems to not pass-through bits correctly when it should, so 10960 * only enable it on 6bpc panels and when its not a compliance 10961 * test requesting 6bpc video pattern. 10962 */ 10963 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 10964 !pipe_config->dither_force_disable; 10965 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 10966 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10967 10968 fail: 10969 return ret; 10970 } 10971 10972 static void 10973 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 10974 { 10975 struct drm_crtc *crtc; 10976 struct drm_crtc_state *new_crtc_state; 10977 int i; 10978 10979 /* Double check state. */ 10980 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 10981 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); 10982 10983 /* 10984 * Update legacy state to satisfy fbc code. This can 10985 * be removed when fbc uses the atomic state. 10986 */ 10987 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 10988 struct drm_plane_state *plane_state = crtc->primary->state; 10989 10990 crtc->primary->fb = plane_state->fb; 10991 crtc->x = plane_state->src_x >> 16; 10992 crtc->y = plane_state->src_y >> 16; 10993 } 10994 } 10995 } 10996 10997 static bool intel_fuzzy_clock_check(int clock1, int clock2) 10998 { 10999 int diff; 11000 11001 if (clock1 == clock2) 11002 return true; 11003 11004 if (!clock1 || !clock2) 11005 return false; 11006 11007 diff = abs(clock1 - clock2); 11008 11009 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 11010 return true; 11011 11012 return false; 11013 } 11014 11015 static bool 11016 intel_compare_m_n(unsigned int m, unsigned int n, 11017 unsigned int m2, unsigned int n2, 11018 bool exact) 11019 { 11020 if (m == m2 && n == n2) 11021 return true; 11022 11023 if (exact || !m || !n || !m2 || !n2) 11024 return false; 11025 11026 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 11027 11028 if (n > n2) { 11029 while (n > n2) { 11030 m2 <<= 1; 11031 n2 <<= 1; 11032 } 11033 } else if (n < n2) { 11034 while (n < n2) { 11035 m <<= 1; 11036 n <<= 1; 11037 } 11038 } 11039 11040 if (n != n2) 11041 return false; 11042 11043 return intel_fuzzy_clock_check(m, m2); 11044 } 11045 11046 static bool 11047 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 11048 struct intel_link_m_n *m2_n2, 11049 bool adjust) 11050 { 11051 if (m_n->tu == m2_n2->tu && 11052 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 11053 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 11054 intel_compare_m_n(m_n->link_m, m_n->link_n, 11055 m2_n2->link_m, m2_n2->link_n, !adjust)) { 11056 if (adjust) 11057 *m2_n2 = *m_n; 11058 11059 return true; 11060 } 11061 11062 return false; 11063 } 11064 11065 static void __printf(3, 4) 11066 pipe_config_err(bool adjust, const char *name, const char *format, ...) 11067 { 11068 char *level; 11069 unsigned int category; 11070 struct va_format vaf; 11071 va_list args; 11072 11073 if (adjust) { 11074 level = KERN_DEBUG; 11075 category = DRM_UT_KMS; 11076 } else { 11077 level = KERN_ERR; 11078 category = DRM_UT_NONE; 11079 } 11080 11081 va_start(args, format); 11082 vaf.fmt = format; 11083 vaf.va = &args; 11084 11085 drm_printk(level, category, "mismatch in %s %pV", name, &vaf); 11086 11087 va_end(args); 11088 } 11089 11090 static bool 11091 intel_pipe_config_compare(struct drm_i915_private *dev_priv, 11092 struct intel_crtc_state *current_config, 11093 struct intel_crtc_state *pipe_config, 11094 bool adjust) 11095 { 11096 bool ret = true; 11097 11098 #define PIPE_CONF_CHECK_X(name) \ 11099 if (current_config->name != pipe_config->name) { \ 11100 pipe_config_err(adjust, __stringify(name), \ 11101 "(expected 0x%08x, found 0x%08x)\n", \ 11102 current_config->name, \ 11103 pipe_config->name); \ 11104 ret = false; \ 11105 } 11106 11107 #define PIPE_CONF_CHECK_I(name) \ 11108 if (current_config->name != pipe_config->name) { \ 11109 pipe_config_err(adjust, __stringify(name), \ 11110 "(expected %i, found %i)\n", \ 11111 current_config->name, \ 11112 pipe_config->name); \ 11113 ret = false; \ 11114 } 11115 11116 #define PIPE_CONF_CHECK_P(name) \ 11117 if (current_config->name != pipe_config->name) { \ 11118 pipe_config_err(adjust, __stringify(name), \ 11119 "(expected %p, found %p)\n", \ 11120 current_config->name, \ 11121 pipe_config->name); \ 11122 ret = false; \ 11123 } 11124 11125 #define PIPE_CONF_CHECK_M_N(name) \ 11126 if (!intel_compare_link_m_n(¤t_config->name, \ 11127 &pipe_config->name,\ 11128 adjust)) { \ 11129 pipe_config_err(adjust, __stringify(name), \ 11130 "(expected tu %i gmch %i/%i link %i/%i, " \ 11131 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11132 current_config->name.tu, \ 11133 current_config->name.gmch_m, \ 11134 current_config->name.gmch_n, \ 11135 current_config->name.link_m, \ 11136 current_config->name.link_n, \ 11137 pipe_config->name.tu, \ 11138 pipe_config->name.gmch_m, \ 11139 pipe_config->name.gmch_n, \ 11140 pipe_config->name.link_m, \ 11141 pipe_config->name.link_n); \ 11142 ret = false; \ 11143 } 11144 11145 /* This is required for BDW+ where there is only one set of registers for 11146 * switching between high and low RR. 11147 * This macro can be used whenever a comparison has to be made between one 11148 * hw state and multiple sw state variables. 11149 */ 11150 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 11151 if (!intel_compare_link_m_n(¤t_config->name, \ 11152 &pipe_config->name, adjust) && \ 11153 !intel_compare_link_m_n(¤t_config->alt_name, \ 11154 &pipe_config->name, adjust)) { \ 11155 pipe_config_err(adjust, __stringify(name), \ 11156 "(expected tu %i gmch %i/%i link %i/%i, " \ 11157 "or tu %i gmch %i/%i link %i/%i, " \ 11158 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11159 current_config->name.tu, \ 11160 current_config->name.gmch_m, \ 11161 current_config->name.gmch_n, \ 11162 current_config->name.link_m, \ 11163 current_config->name.link_n, \ 11164 current_config->alt_name.tu, \ 11165 current_config->alt_name.gmch_m, \ 11166 current_config->alt_name.gmch_n, \ 11167 current_config->alt_name.link_m, \ 11168 current_config->alt_name.link_n, \ 11169 pipe_config->name.tu, \ 11170 pipe_config->name.gmch_m, \ 11171 pipe_config->name.gmch_n, \ 11172 pipe_config->name.link_m, \ 11173 pipe_config->name.link_n); \ 11174 ret = false; \ 11175 } 11176 11177 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 11178 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11179 pipe_config_err(adjust, __stringify(name), \ 11180 "(%x) (expected %i, found %i)\n", \ 11181 (mask), \ 11182 current_config->name & (mask), \ 11183 pipe_config->name & (mask)); \ 11184 ret = false; \ 11185 } 11186 11187 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 11188 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11189 pipe_config_err(adjust, __stringify(name), \ 11190 "(expected %i, found %i)\n", \ 11191 current_config->name, \ 11192 pipe_config->name); \ 11193 ret = false; \ 11194 } 11195 11196 #define PIPE_CONF_QUIRK(quirk) \ 11197 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11198 11199 PIPE_CONF_CHECK_I(cpu_transcoder); 11200 11201 PIPE_CONF_CHECK_I(has_pch_encoder); 11202 PIPE_CONF_CHECK_I(fdi_lanes); 11203 PIPE_CONF_CHECK_M_N(fdi_m_n); 11204 11205 PIPE_CONF_CHECK_I(lane_count); 11206 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 11207 11208 if (INTEL_GEN(dev_priv) < 8) { 11209 PIPE_CONF_CHECK_M_N(dp_m_n); 11210 11211 if (current_config->has_drrs) 11212 PIPE_CONF_CHECK_M_N(dp_m2_n2); 11213 } else 11214 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 11215 11216 PIPE_CONF_CHECK_X(output_types); 11217 11218 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 11219 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 11220 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 11221 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 11222 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 11223 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 11224 11225 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 11226 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 11227 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 11228 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 11229 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 11230 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11231 11232 PIPE_CONF_CHECK_I(pixel_multiplier); 11233 PIPE_CONF_CHECK_I(has_hdmi_sink); 11234 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11235 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11236 PIPE_CONF_CHECK_I(limited_color_range); 11237 11238 PIPE_CONF_CHECK_I(hdmi_scrambling); 11239 PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio); 11240 PIPE_CONF_CHECK_I(has_infoframe); 11241 PIPE_CONF_CHECK_I(ycbcr420); 11242 11243 PIPE_CONF_CHECK_I(has_audio); 11244 11245 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11246 DRM_MODE_FLAG_INTERLACE); 11247 11248 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11249 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11250 DRM_MODE_FLAG_PHSYNC); 11251 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11252 DRM_MODE_FLAG_NHSYNC); 11253 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11254 DRM_MODE_FLAG_PVSYNC); 11255 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11256 DRM_MODE_FLAG_NVSYNC); 11257 } 11258 11259 PIPE_CONF_CHECK_X(gmch_pfit.control); 11260 /* pfit ratios are autocomputed by the hw on gen4+ */ 11261 if (INTEL_GEN(dev_priv) < 4) 11262 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 11263 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 11264 11265 if (!adjust) { 11266 PIPE_CONF_CHECK_I(pipe_src_w); 11267 PIPE_CONF_CHECK_I(pipe_src_h); 11268 11269 PIPE_CONF_CHECK_I(pch_pfit.enabled); 11270 if (current_config->pch_pfit.enabled) { 11271 PIPE_CONF_CHECK_X(pch_pfit.pos); 11272 PIPE_CONF_CHECK_X(pch_pfit.size); 11273 } 11274 11275 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 11276 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 11277 } 11278 11279 /* BDW+ don't expose a synchronous way to read the state */ 11280 if (IS_HASWELL(dev_priv)) 11281 PIPE_CONF_CHECK_I(ips_enabled); 11282 11283 PIPE_CONF_CHECK_I(double_wide); 11284 11285 PIPE_CONF_CHECK_P(shared_dpll); 11286 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11287 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11288 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11289 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11290 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11291 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 11292 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11293 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11294 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11295 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0); 11296 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0); 11297 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4); 11298 PIPE_CONF_CHECK_X(dpll_hw_state.pll0); 11299 PIPE_CONF_CHECK_X(dpll_hw_state.pll1); 11300 PIPE_CONF_CHECK_X(dpll_hw_state.pll2); 11301 PIPE_CONF_CHECK_X(dpll_hw_state.pll3); 11302 PIPE_CONF_CHECK_X(dpll_hw_state.pll6); 11303 PIPE_CONF_CHECK_X(dpll_hw_state.pll8); 11304 PIPE_CONF_CHECK_X(dpll_hw_state.pll9); 11305 PIPE_CONF_CHECK_X(dpll_hw_state.pll10); 11306 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12); 11307 11308 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 11309 PIPE_CONF_CHECK_X(dsi_pll.div); 11310 11311 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 11312 PIPE_CONF_CHECK_I(pipe_bpp); 11313 11314 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 11315 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11316 11317 #undef PIPE_CONF_CHECK_X 11318 #undef PIPE_CONF_CHECK_I 11319 #undef PIPE_CONF_CHECK_P 11320 #undef PIPE_CONF_CHECK_FLAGS 11321 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 11322 #undef PIPE_CONF_QUIRK 11323 11324 return ret; 11325 } 11326 11327 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 11328 const struct intel_crtc_state *pipe_config) 11329 { 11330 if (pipe_config->has_pch_encoder) { 11331 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11332 &pipe_config->fdi_m_n); 11333 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 11334 11335 /* 11336 * FDI already provided one idea for the dotclock. 11337 * Yell if the encoder disagrees. 11338 */ 11339 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 11340 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 11341 fdi_dotclock, dotclock); 11342 } 11343 } 11344 11345 static void verify_wm_state(struct drm_crtc *crtc, 11346 struct drm_crtc_state *new_state) 11347 { 11348 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 11349 struct skl_ddb_allocation hw_ddb, *sw_ddb; 11350 struct skl_pipe_wm hw_wm, *sw_wm; 11351 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 11352 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 11353 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11354 const enum i915_pipe pipe = intel_crtc->pipe; 11355 int plane, level, max_level = ilk_wm_max_level(dev_priv); 11356 11357 if (INTEL_GEN(dev_priv) < 9 || !new_state->active) 11358 return; 11359 11360 skl_pipe_wm_get_hw_state(crtc, &hw_wm); 11361 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; 11362 11363 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 11364 sw_ddb = &dev_priv->wm.skl_hw.ddb; 11365 11366 /* planes */ 11367 for_each_universal_plane(dev_priv, pipe, plane) { 11368 hw_plane_wm = &hw_wm.planes[plane]; 11369 sw_plane_wm = &sw_wm->planes[plane]; 11370 11371 /* Watermarks */ 11372 for (level = 0; level <= max_level; level++) { 11373 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11374 &sw_plane_wm->wm[level])) 11375 continue; 11376 11377 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11378 pipe_name(pipe), plane + 1, level, 11379 sw_plane_wm->wm[level].plane_en, 11380 sw_plane_wm->wm[level].plane_res_b, 11381 sw_plane_wm->wm[level].plane_res_l, 11382 hw_plane_wm->wm[level].plane_en, 11383 hw_plane_wm->wm[level].plane_res_b, 11384 hw_plane_wm->wm[level].plane_res_l); 11385 } 11386 11387 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11388 &sw_plane_wm->trans_wm)) { 11389 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11390 pipe_name(pipe), plane + 1, 11391 sw_plane_wm->trans_wm.plane_en, 11392 sw_plane_wm->trans_wm.plane_res_b, 11393 sw_plane_wm->trans_wm.plane_res_l, 11394 hw_plane_wm->trans_wm.plane_en, 11395 hw_plane_wm->trans_wm.plane_res_b, 11396 hw_plane_wm->trans_wm.plane_res_l); 11397 } 11398 11399 /* DDB */ 11400 hw_ddb_entry = &hw_ddb.plane[pipe][plane]; 11401 sw_ddb_entry = &sw_ddb->plane[pipe][plane]; 11402 11403 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11404 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 11405 pipe_name(pipe), plane + 1, 11406 sw_ddb_entry->start, sw_ddb_entry->end, 11407 hw_ddb_entry->start, hw_ddb_entry->end); 11408 } 11409 } 11410 11411 /* 11412 * cursor 11413 * If the cursor plane isn't active, we may not have updated it's ddb 11414 * allocation. In that case since the ddb allocation will be updated 11415 * once the plane becomes visible, we can skip this check 11416 */ 11417 if (1) { 11418 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; 11419 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 11420 11421 /* Watermarks */ 11422 for (level = 0; level <= max_level; level++) { 11423 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11424 &sw_plane_wm->wm[level])) 11425 continue; 11426 11427 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11428 pipe_name(pipe), level, 11429 sw_plane_wm->wm[level].plane_en, 11430 sw_plane_wm->wm[level].plane_res_b, 11431 sw_plane_wm->wm[level].plane_res_l, 11432 hw_plane_wm->wm[level].plane_en, 11433 hw_plane_wm->wm[level].plane_res_b, 11434 hw_plane_wm->wm[level].plane_res_l); 11435 } 11436 11437 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11438 &sw_plane_wm->trans_wm)) { 11439 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11440 pipe_name(pipe), 11441 sw_plane_wm->trans_wm.plane_en, 11442 sw_plane_wm->trans_wm.plane_res_b, 11443 sw_plane_wm->trans_wm.plane_res_l, 11444 hw_plane_wm->trans_wm.plane_en, 11445 hw_plane_wm->trans_wm.plane_res_b, 11446 hw_plane_wm->trans_wm.plane_res_l); 11447 } 11448 11449 /* DDB */ 11450 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 11451 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 11452 11453 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11454 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 11455 pipe_name(pipe), 11456 sw_ddb_entry->start, sw_ddb_entry->end, 11457 hw_ddb_entry->start, hw_ddb_entry->end); 11458 } 11459 } 11460 } 11461 11462 static void 11463 verify_connector_state(struct drm_device *dev, 11464 struct drm_atomic_state *state, 11465 struct drm_crtc *crtc) 11466 { 11467 struct drm_connector *connector; 11468 struct drm_connector_state *new_conn_state; 11469 int i; 11470 11471 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 11472 struct drm_encoder *encoder = connector->encoder; 11473 struct drm_crtc_state *crtc_state = NULL; 11474 11475 if (new_conn_state->crtc != crtc) 11476 continue; 11477 11478 if (crtc) 11479 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); 11480 11481 intel_connector_verify_state(crtc_state, new_conn_state); 11482 11483 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 11484 "connector's atomic encoder doesn't match legacy encoder\n"); 11485 } 11486 } 11487 11488 static void 11489 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) 11490 { 11491 struct intel_encoder *encoder; 11492 struct drm_connector *connector; 11493 struct drm_connector_state *old_conn_state, *new_conn_state; 11494 int i; 11495 11496 for_each_intel_encoder(dev, encoder) { 11497 bool enabled = false, found = false; 11498 enum i915_pipe pipe; 11499 11500 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 11501 encoder->base.base.id, 11502 encoder->base.name); 11503 11504 for_each_oldnew_connector_in_state(state, connector, old_conn_state, 11505 new_conn_state, i) { 11506 if (old_conn_state->best_encoder == &encoder->base) 11507 found = true; 11508 11509 if (new_conn_state->best_encoder != &encoder->base) 11510 continue; 11511 found = enabled = true; 11512 11513 I915_STATE_WARN(new_conn_state->crtc != 11514 encoder->base.crtc, 11515 "connector's crtc doesn't match encoder crtc\n"); 11516 } 11517 11518 if (!found) 11519 continue; 11520 11521 I915_STATE_WARN(!!encoder->base.crtc != enabled, 11522 "encoder's enabled state mismatch " 11523 "(expected %i, found %i)\n", 11524 !!encoder->base.crtc, enabled); 11525 11526 if (!encoder->base.crtc) { 11527 bool active; 11528 11529 active = encoder->get_hw_state(encoder, &pipe); 11530 I915_STATE_WARN(active, 11531 "encoder detached but still enabled on pipe %c.\n", 11532 pipe_name(pipe)); 11533 } 11534 } 11535 } 11536 11537 static void 11538 verify_crtc_state(struct drm_crtc *crtc, 11539 struct drm_crtc_state *old_crtc_state, 11540 struct drm_crtc_state *new_crtc_state) 11541 { 11542 struct drm_device *dev = crtc->dev; 11543 struct drm_i915_private *dev_priv = to_i915(dev); 11544 struct intel_encoder *encoder; 11545 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11546 struct intel_crtc_state *pipe_config, *sw_config; 11547 struct drm_atomic_state *old_state; 11548 bool active; 11549 11550 old_state = old_crtc_state->state; 11551 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 11552 pipe_config = to_intel_crtc_state(old_crtc_state); 11553 memset(pipe_config, 0, sizeof(*pipe_config)); 11554 pipe_config->base.crtc = crtc; 11555 pipe_config->base.state = old_state; 11556 11557 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 11558 11559 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 11560 11561 /* we keep both pipes enabled on 830 */ 11562 if (IS_I830(dev_priv)) 11563 active = new_crtc_state->active; 11564 11565 I915_STATE_WARN(new_crtc_state->active != active, 11566 "crtc active state doesn't match with hw state " 11567 "(expected %i, found %i)\n", new_crtc_state->active, active); 11568 11569 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 11570 "transitional active state does not match atomic hw state " 11571 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 11572 11573 for_each_encoder_on_crtc(dev, crtc, encoder) { 11574 enum i915_pipe pipe; 11575 11576 active = encoder->get_hw_state(encoder, &pipe); 11577 I915_STATE_WARN(active != new_crtc_state->active, 11578 "[ENCODER:%i] active %i with crtc active %i\n", 11579 encoder->base.base.id, active, new_crtc_state->active); 11580 11581 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 11582 "Encoder connected to wrong pipe %c\n", 11583 pipe_name(pipe)); 11584 11585 if (active) { 11586 pipe_config->output_types |= 1 << encoder->type; 11587 encoder->get_config(encoder, pipe_config); 11588 } 11589 } 11590 11591 intel_crtc_compute_pixel_rate(pipe_config); 11592 11593 if (!new_crtc_state->active) 11594 return; 11595 11596 intel_pipe_config_sanity_check(dev_priv, pipe_config); 11597 11598 sw_config = to_intel_crtc_state(new_crtc_state); 11599 if (!intel_pipe_config_compare(dev_priv, sw_config, 11600 pipe_config, false)) { 11601 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 11602 intel_dump_pipe_config(intel_crtc, pipe_config, 11603 "[hw state]"); 11604 intel_dump_pipe_config(intel_crtc, sw_config, 11605 "[sw state]"); 11606 } 11607 } 11608 11609 static void 11610 verify_single_dpll_state(struct drm_i915_private *dev_priv, 11611 struct intel_shared_dpll *pll, 11612 struct drm_crtc *crtc, 11613 struct drm_crtc_state *new_state) 11614 { 11615 struct intel_dpll_hw_state dpll_hw_state; 11616 unsigned crtc_mask; 11617 bool active; 11618 11619 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 11620 11621 DRM_DEBUG_KMS("%s\n", pll->name); 11622 11623 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 11624 11625 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 11626 I915_STATE_WARN(!pll->on && pll->active_mask, 11627 "pll in active use but not on in sw tracking\n"); 11628 I915_STATE_WARN(pll->on && !pll->active_mask, 11629 "pll is on but not used by any active crtc\n"); 11630 I915_STATE_WARN(pll->on != active, 11631 "pll on state mismatch (expected %i, found %i)\n", 11632 pll->on, active); 11633 } 11634 11635 if (!crtc) { 11636 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 11637 "more active pll users than references: %x vs %x\n", 11638 pll->active_mask, pll->state.crtc_mask); 11639 11640 return; 11641 } 11642 11643 crtc_mask = 1 << drm_crtc_index(crtc); 11644 11645 if (new_state->active) 11646 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 11647 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 11648 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 11649 else 11650 I915_STATE_WARN(pll->active_mask & crtc_mask, 11651 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 11652 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 11653 11654 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 11655 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 11656 crtc_mask, pll->state.crtc_mask); 11657 11658 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 11659 &dpll_hw_state, 11660 sizeof(dpll_hw_state)), 11661 "pll hw state mismatch\n"); 11662 } 11663 11664 static void 11665 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 11666 struct drm_crtc_state *old_crtc_state, 11667 struct drm_crtc_state *new_crtc_state) 11668 { 11669 struct drm_i915_private *dev_priv = to_i915(dev); 11670 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 11671 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 11672 11673 if (new_state->shared_dpll) 11674 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 11675 11676 if (old_state->shared_dpll && 11677 old_state->shared_dpll != new_state->shared_dpll) { 11678 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 11679 struct intel_shared_dpll *pll = old_state->shared_dpll; 11680 11681 I915_STATE_WARN(pll->active_mask & crtc_mask, 11682 "pll active mismatch (didn't expect pipe %c in active mask)\n", 11683 pipe_name(drm_crtc_index(crtc))); 11684 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 11685 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 11686 pipe_name(drm_crtc_index(crtc))); 11687 } 11688 } 11689 11690 static void 11691 intel_modeset_verify_crtc(struct drm_crtc *crtc, 11692 struct drm_atomic_state *state, 11693 struct drm_crtc_state *old_state, 11694 struct drm_crtc_state *new_state) 11695 { 11696 if (!needs_modeset(new_state) && 11697 !to_intel_crtc_state(new_state)->update_pipe) 11698 return; 11699 11700 verify_wm_state(crtc, new_state); 11701 verify_connector_state(crtc->dev, state, crtc); 11702 verify_crtc_state(crtc, old_state, new_state); 11703 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 11704 } 11705 11706 static void 11707 verify_disabled_dpll_state(struct drm_device *dev) 11708 { 11709 struct drm_i915_private *dev_priv = to_i915(dev); 11710 int i; 11711 11712 for (i = 0; i < dev_priv->num_shared_dpll; i++) 11713 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 11714 } 11715 11716 static void 11717 intel_modeset_verify_disabled(struct drm_device *dev, 11718 struct drm_atomic_state *state) 11719 { 11720 verify_encoder_state(dev, state); 11721 verify_connector_state(dev, state, NULL); 11722 verify_disabled_dpll_state(dev); 11723 } 11724 11725 static void update_scanline_offset(struct intel_crtc *crtc) 11726 { 11727 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11728 11729 /* 11730 * The scanline counter increments at the leading edge of hsync. 11731 * 11732 * On most platforms it starts counting from vtotal-1 on the 11733 * first active line. That means the scanline counter value is 11734 * always one less than what we would expect. Ie. just after 11735 * start of vblank, which also occurs at start of hsync (on the 11736 * last active line), the scanline counter will read vblank_start-1. 11737 * 11738 * On gen2 the scanline counter starts counting from 1 instead 11739 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 11740 * to keep the value positive), instead of adding one. 11741 * 11742 * On HSW+ the behaviour of the scanline counter depends on the output 11743 * type. For DP ports it behaves like most other platforms, but on HDMI 11744 * there's an extra 1 line difference. So we need to add two instead of 11745 * one to the value. 11746 * 11747 * On VLV/CHV DSI the scanline counter would appear to increment 11748 * approx. 1/3 of a scanline before start of vblank. Unfortunately 11749 * that means we can't tell whether we're in vblank or not while 11750 * we're on that particular line. We must still set scanline_offset 11751 * to 1 so that the vblank timestamps come out correct when we query 11752 * the scanline counter from within the vblank interrupt handler. 11753 * However if queried just before the start of vblank we'll get an 11754 * answer that's slightly in the future. 11755 */ 11756 if (IS_GEN2(dev_priv)) { 11757 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 11758 int vtotal; 11759 11760 vtotal = adjusted_mode->crtc_vtotal; 11761 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 11762 vtotal /= 2; 11763 11764 crtc->scanline_offset = vtotal - 1; 11765 } else if (HAS_DDI(dev_priv) && 11766 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 11767 crtc->scanline_offset = 2; 11768 } else 11769 crtc->scanline_offset = 1; 11770 } 11771 11772 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 11773 { 11774 struct drm_device *dev = state->dev; 11775 struct drm_i915_private *dev_priv = to_i915(dev); 11776 struct drm_crtc *crtc; 11777 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11778 int i; 11779 11780 if (!dev_priv->display.crtc_compute_clock) 11781 return; 11782 11783 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11784 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11785 struct intel_shared_dpll *old_dpll = 11786 to_intel_crtc_state(old_crtc_state)->shared_dpll; 11787 11788 if (!needs_modeset(new_crtc_state)) 11789 continue; 11790 11791 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL; 11792 11793 if (!old_dpll) 11794 continue; 11795 11796 intel_release_shared_dpll(old_dpll, intel_crtc, state); 11797 } 11798 } 11799 11800 /* 11801 * This implements the workaround described in the "notes" section of the mode 11802 * set sequence documentation. When going from no pipes or single pipe to 11803 * multiple pipes, and planes are enabled after the pipe, we need to wait at 11804 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 11805 */ 11806 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 11807 { 11808 struct drm_crtc_state *crtc_state; 11809 struct intel_crtc *intel_crtc; 11810 struct drm_crtc *crtc; 11811 struct intel_crtc_state *first_crtc_state = NULL; 11812 struct intel_crtc_state *other_crtc_state = NULL; 11813 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 11814 int i; 11815 11816 /* look at all crtc's that are going to be enabled in during modeset */ 11817 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 11818 intel_crtc = to_intel_crtc(crtc); 11819 11820 if (!crtc_state->active || !needs_modeset(crtc_state)) 11821 continue; 11822 11823 if (first_crtc_state) { 11824 other_crtc_state = to_intel_crtc_state(crtc_state); 11825 break; 11826 } else { 11827 first_crtc_state = to_intel_crtc_state(crtc_state); 11828 first_pipe = intel_crtc->pipe; 11829 } 11830 } 11831 11832 /* No workaround needed? */ 11833 if (!first_crtc_state) 11834 return 0; 11835 11836 /* w/a possibly needed, check how many crtc's are already enabled. */ 11837 for_each_intel_crtc(state->dev, intel_crtc) { 11838 struct intel_crtc_state *pipe_config; 11839 11840 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 11841 if (IS_ERR(pipe_config)) 11842 return PTR_ERR(pipe_config); 11843 11844 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 11845 11846 if (!pipe_config->base.active || 11847 needs_modeset(&pipe_config->base)) 11848 continue; 11849 11850 /* 2 or more enabled crtcs means no need for w/a */ 11851 if (enabled_pipe != INVALID_PIPE) 11852 return 0; 11853 11854 enabled_pipe = intel_crtc->pipe; 11855 } 11856 11857 if (enabled_pipe != INVALID_PIPE) 11858 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 11859 else if (other_crtc_state) 11860 other_crtc_state->hsw_workaround_pipe = first_pipe; 11861 11862 return 0; 11863 } 11864 11865 static int intel_lock_all_pipes(struct drm_atomic_state *state) 11866 { 11867 struct drm_crtc *crtc; 11868 11869 /* Add all pipes to the state */ 11870 for_each_crtc(state->dev, crtc) { 11871 struct drm_crtc_state *crtc_state; 11872 11873 crtc_state = drm_atomic_get_crtc_state(state, crtc); 11874 if (IS_ERR(crtc_state)) 11875 return PTR_ERR(crtc_state); 11876 } 11877 11878 return 0; 11879 } 11880 11881 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 11882 { 11883 struct drm_crtc *crtc; 11884 11885 /* 11886 * Add all pipes to the state, and force 11887 * a modeset on all the active ones. 11888 */ 11889 for_each_crtc(state->dev, crtc) { 11890 struct drm_crtc_state *crtc_state; 11891 int ret; 11892 11893 crtc_state = drm_atomic_get_crtc_state(state, crtc); 11894 if (IS_ERR(crtc_state)) 11895 return PTR_ERR(crtc_state); 11896 11897 if (!crtc_state->active || needs_modeset(crtc_state)) 11898 continue; 11899 11900 crtc_state->mode_changed = true; 11901 11902 ret = drm_atomic_add_affected_connectors(state, crtc); 11903 if (ret) 11904 return ret; 11905 11906 ret = drm_atomic_add_affected_planes(state, crtc); 11907 if (ret) 11908 return ret; 11909 } 11910 11911 return 0; 11912 } 11913 11914 static int intel_modeset_checks(struct drm_atomic_state *state) 11915 { 11916 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 11917 struct drm_i915_private *dev_priv = to_i915(state->dev); 11918 struct drm_crtc *crtc; 11919 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 11920 int ret = 0, i; 11921 11922 if (!check_digital_port_conflicts(state)) { 11923 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 11924 return -EINVAL; 11925 } 11926 11927 intel_state->modeset = true; 11928 intel_state->active_crtcs = dev_priv->active_crtcs; 11929 intel_state->cdclk.logical = dev_priv->cdclk.logical; 11930 intel_state->cdclk.actual = dev_priv->cdclk.actual; 11931 11932 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 11933 if (new_crtc_state->active) 11934 intel_state->active_crtcs |= 1 << i; 11935 else 11936 intel_state->active_crtcs &= ~(1 << i); 11937 11938 if (old_crtc_state->active != new_crtc_state->active) 11939 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 11940 } 11941 11942 /* 11943 * See if the config requires any additional preparation, e.g. 11944 * to adjust global state with pipes off. We need to do this 11945 * here so we can get the modeset_pipe updated config for the new 11946 * mode set on this crtc. For other crtcs we need to use the 11947 * adjusted_mode bits in the crtc directly. 11948 */ 11949 if (dev_priv->display.modeset_calc_cdclk) { 11950 ret = dev_priv->display.modeset_calc_cdclk(state); 11951 if (ret < 0) 11952 return ret; 11953 11954 /* 11955 * Writes to dev_priv->cdclk.logical must protected by 11956 * holding all the crtc locks, even if we don't end up 11957 * touching the hardware 11958 */ 11959 if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical, 11960 &intel_state->cdclk.logical)) { 11961 ret = intel_lock_all_pipes(state); 11962 if (ret < 0) 11963 return ret; 11964 } 11965 11966 /* All pipes must be switched off while we change the cdclk. */ 11967 if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual, 11968 &intel_state->cdclk.actual)) { 11969 ret = intel_modeset_all_pipes(state); 11970 if (ret < 0) 11971 return ret; 11972 } 11973 11974 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 11975 intel_state->cdclk.logical.cdclk, 11976 intel_state->cdclk.actual.cdclk); 11977 } else { 11978 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical; 11979 } 11980 11981 intel_modeset_clear_plls(state); 11982 11983 if (IS_HASWELL(dev_priv)) 11984 return haswell_mode_set_planes_workaround(state); 11985 11986 return 0; 11987 } 11988 11989 /* 11990 * Handle calculation of various watermark data at the end of the atomic check 11991 * phase. The code here should be run after the per-crtc and per-plane 'check' 11992 * handlers to ensure that all derived state has been updated. 11993 */ 11994 static int calc_watermark_data(struct drm_atomic_state *state) 11995 { 11996 struct drm_device *dev = state->dev; 11997 struct drm_i915_private *dev_priv = to_i915(dev); 11998 11999 /* Is there platform-specific watermark information to calculate? */ 12000 if (dev_priv->display.compute_global_watermarks) 12001 return dev_priv->display.compute_global_watermarks(state); 12002 12003 return 0; 12004 } 12005 12006 /** 12007 * intel_atomic_check - validate state object 12008 * @dev: drm device 12009 * @state: state to validate 12010 */ 12011 static int intel_atomic_check(struct drm_device *dev, 12012 struct drm_atomic_state *state) 12013 { 12014 struct drm_i915_private *dev_priv = to_i915(dev); 12015 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12016 struct drm_crtc *crtc; 12017 struct drm_crtc_state *old_crtc_state, *crtc_state; 12018 int ret, i; 12019 bool any_ms = false; 12020 12021 ret = drm_atomic_helper_check_modeset(dev, state); 12022 if (ret) 12023 return ret; 12024 12025 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { 12026 struct intel_crtc_state *pipe_config = 12027 to_intel_crtc_state(crtc_state); 12028 12029 /* Catch I915_MODE_FLAG_INHERITED */ 12030 if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags) 12031 crtc_state->mode_changed = true; 12032 12033 if (!needs_modeset(crtc_state)) 12034 continue; 12035 12036 if (!crtc_state->enable) { 12037 any_ms = true; 12038 continue; 12039 } 12040 12041 /* FIXME: For only active_changed we shouldn't need to do any 12042 * state recomputation at all. */ 12043 12044 ret = drm_atomic_add_affected_connectors(state, crtc); 12045 if (ret) 12046 return ret; 12047 12048 ret = intel_modeset_pipe_config(crtc, pipe_config); 12049 if (ret) { 12050 intel_dump_pipe_config(to_intel_crtc(crtc), 12051 pipe_config, "[failed]"); 12052 return ret; 12053 } 12054 12055 if (i915_modparams.fastboot && 12056 intel_pipe_config_compare(dev_priv, 12057 to_intel_crtc_state(old_crtc_state), 12058 pipe_config, true)) { 12059 crtc_state->mode_changed = false; 12060 pipe_config->update_pipe = true; 12061 } 12062 12063 if (needs_modeset(crtc_state)) 12064 any_ms = true; 12065 12066 ret = drm_atomic_add_affected_planes(state, crtc); 12067 if (ret) 12068 return ret; 12069 12070 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 12071 needs_modeset(crtc_state) ? 12072 "[modeset]" : "[fastset]"); 12073 } 12074 12075 if (any_ms) { 12076 ret = intel_modeset_checks(state); 12077 12078 if (ret) 12079 return ret; 12080 } else { 12081 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12082 } 12083 12084 ret = drm_atomic_helper_check_planes(dev, state); 12085 if (ret) 12086 return ret; 12087 12088 intel_fbc_choose_crtc(dev_priv, state); 12089 return calc_watermark_data(state); 12090 } 12091 12092 static int intel_atomic_prepare_commit(struct drm_device *dev, 12093 struct drm_atomic_state *state) 12094 { 12095 return drm_atomic_helper_prepare_planes(dev, state); 12096 } 12097 12098 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 12099 { 12100 struct drm_device *dev = crtc->base.dev; 12101 12102 if (!dev->max_vblank_count) 12103 return drm_crtc_accurate_vblank_count(&crtc->base); 12104 12105 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12106 } 12107 12108 static void intel_update_crtc(struct drm_crtc *crtc, 12109 struct drm_atomic_state *state, 12110 struct drm_crtc_state *old_crtc_state, 12111 struct drm_crtc_state *new_crtc_state) 12112 { 12113 struct drm_device *dev = crtc->dev; 12114 struct drm_i915_private *dev_priv = to_i915(dev); 12115 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12116 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12117 bool modeset = needs_modeset(new_crtc_state); 12118 12119 if (modeset) { 12120 update_scanline_offset(intel_crtc); 12121 dev_priv->display.crtc_enable(pipe_config, state); 12122 } else { 12123 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12124 pipe_config); 12125 } 12126 12127 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12128 intel_fbc_enable( 12129 intel_crtc, pipe_config, 12130 to_intel_plane_state(crtc->primary->state)); 12131 } 12132 12133 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12134 } 12135 12136 static void intel_update_crtcs(struct drm_atomic_state *state) 12137 { 12138 struct drm_crtc *crtc; 12139 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12140 int i; 12141 12142 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12143 if (!new_crtc_state->active) 12144 continue; 12145 12146 intel_update_crtc(crtc, state, old_crtc_state, 12147 new_crtc_state); 12148 } 12149 } 12150 12151 static void skl_update_crtcs(struct drm_atomic_state *state) 12152 { 12153 struct drm_i915_private *dev_priv = to_i915(state->dev); 12154 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12155 struct drm_crtc *crtc; 12156 struct intel_crtc *intel_crtc; 12157 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12158 struct intel_crtc_state *cstate; 12159 unsigned int updated = 0; 12160 bool progress; 12161 enum i915_pipe pipe; 12162 int i; 12163 12164 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; 12165 12166 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12167 /* ignore allocations for crtc's that have been turned off. */ 12168 if (new_crtc_state->active) 12169 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12170 12171 /* 12172 * Whenever the number of active pipes changes, we need to make sure we 12173 * update the pipes in the right order so that their ddb allocations 12174 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 12175 * cause pipe underruns and other bad stuff. 12176 */ 12177 do { 12178 progress = false; 12179 12180 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12181 bool vbl_wait = false; 12182 unsigned int cmask = drm_crtc_mask(crtc); 12183 12184 intel_crtc = to_intel_crtc(crtc); 12185 cstate = to_intel_crtc_state(new_crtc_state); 12186 pipe = intel_crtc->pipe; 12187 12188 if (updated & cmask || !cstate->base.active) 12189 continue; 12190 12191 if (skl_ddb_allocation_overlaps(dev_priv, 12192 entries, 12193 &cstate->wm.skl.ddb, 12194 i)) 12195 continue; 12196 12197 updated |= cmask; 12198 entries[i] = &cstate->wm.skl.ddb; 12199 12200 /* 12201 * If this is an already active pipe, it's DDB changed, 12202 * and this isn't the last pipe that needs updating 12203 * then we need to wait for a vblank to pass for the 12204 * new ddb allocation to take effect. 12205 */ 12206 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, 12207 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && 12208 !new_crtc_state->active_changed && 12209 intel_state->wm_results.dirty_pipes != updated) 12210 vbl_wait = true; 12211 12212 intel_update_crtc(crtc, state, old_crtc_state, 12213 new_crtc_state); 12214 12215 if (vbl_wait) 12216 intel_wait_for_vblank(dev_priv, pipe); 12217 12218 progress = true; 12219 } 12220 } while (progress); 12221 } 12222 12223 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 12224 { 12225 struct intel_atomic_state *state, *next; 12226 struct llist_node *freed; 12227 12228 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 12229 llist_for_each_entry_safe(state, next, freed, freed) 12230 drm_atomic_state_put(&state->base); 12231 } 12232 12233 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 12234 { 12235 struct drm_i915_private *dev_priv = 12236 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 12237 12238 intel_atomic_helper_free_state(dev_priv); 12239 } 12240 12241 static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state) 12242 { 12243 struct wait_queue_entry wait_fence, wait_reset; 12244 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev); 12245 12246 init_wait_entry(&wait_fence, 0); 12247 init_wait_entry(&wait_reset, 0); 12248 for (;;) { 12249 prepare_to_wait(&intel_state->commit_ready.wait, 12250 &wait_fence, TASK_UNINTERRUPTIBLE); 12251 prepare_to_wait(&dev_priv->gpu_error.wait_queue, 12252 &wait_reset, TASK_UNINTERRUPTIBLE); 12253 12254 12255 if (i915_sw_fence_done(&intel_state->commit_ready) 12256 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags)) 12257 break; 12258 12259 schedule(); 12260 } 12261 finish_wait(&intel_state->commit_ready.wait, &wait_fence); 12262 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset); 12263 } 12264 12265 static void intel_atomic_commit_tail(struct drm_atomic_state *state) 12266 { 12267 struct drm_device *dev = state->dev; 12268 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12269 struct drm_i915_private *dev_priv = to_i915(dev); 12270 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12271 struct drm_crtc *crtc; 12272 struct intel_crtc_state *intel_cstate; 12273 u64 put_domains[I915_MAX_PIPES] = {}; 12274 int i; 12275 12276 intel_atomic_commit_fence_wait(intel_state); 12277 12278 drm_atomic_helper_wait_for_dependencies(state); 12279 12280 if (intel_state->modeset) 12281 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 12282 12283 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12285 12286 if (needs_modeset(new_crtc_state) || 12287 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12288 12289 put_domains[to_intel_crtc(crtc)->pipe] = 12290 modeset_get_crtc_power_domains(crtc, 12291 to_intel_crtc_state(new_crtc_state)); 12292 } 12293 12294 if (!needs_modeset(new_crtc_state)) 12295 continue; 12296 12297 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12298 to_intel_crtc_state(new_crtc_state)); 12299 12300 if (old_crtc_state->active) { 12301 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 12302 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 12303 intel_crtc->active = false; 12304 intel_fbc_disable(intel_crtc); 12305 intel_disable_shared_dpll(intel_crtc); 12306 12307 /* 12308 * Underruns don't always raise 12309 * interrupts, so check manually. 12310 */ 12311 intel_check_cpu_fifo_underruns(dev_priv); 12312 intel_check_pch_fifo_underruns(dev_priv); 12313 12314 if (!new_crtc_state->active) { 12315 /* 12316 * Make sure we don't call initial_watermarks 12317 * for ILK-style watermark updates. 12318 * 12319 * No clue what this is supposed to achieve. 12320 */ 12321 if (INTEL_GEN(dev_priv) >= 9) 12322 dev_priv->display.initial_watermarks(intel_state, 12323 to_intel_crtc_state(new_crtc_state)); 12324 } 12325 } 12326 } 12327 12328 /* Only after disabling all output pipelines that will be changed can we 12329 * update the the output configuration. */ 12330 intel_modeset_update_crtc_state(state); 12331 12332 if (intel_state->modeset) { 12333 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 12334 12335 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual); 12336 12337 /* 12338 * SKL workaround: bspec recommends we disable the SAGV when we 12339 * have more then one pipe enabled 12340 */ 12341 if (!intel_can_enable_sagv(state)) 12342 intel_disable_sagv(dev_priv); 12343 12344 intel_modeset_verify_disabled(dev, state); 12345 } 12346 12347 /* Complete the events for pipes that have now been disabled */ 12348 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12349 bool modeset = needs_modeset(new_crtc_state); 12350 12351 /* Complete events for now disable pipes here. */ 12352 if (modeset && !new_crtc_state->active && new_crtc_state->event) { 12353 spin_lock_irq(&dev->event_lock); 12354 drm_crtc_send_vblank_event(crtc, new_crtc_state->event); 12355 spin_unlock_irq(&dev->event_lock); 12356 12357 new_crtc_state->event = NULL; 12358 } 12359 } 12360 12361 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12362 dev_priv->display.update_crtcs(state); 12363 12364 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 12365 * already, but still need the state for the delayed optimization. To 12366 * fix this: 12367 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 12368 * - schedule that vblank worker _before_ calling hw_done 12369 * - at the start of commit_tail, cancel it _synchrously 12370 * - switch over to the vblank wait helper in the core after that since 12371 * we don't need out special handling any more. 12372 */ 12373 drm_atomic_helper_wait_for_flip_done(dev, state); 12374 12375 /* 12376 * Now that the vblank has passed, we can go ahead and program the 12377 * optimal watermarks on platforms that need two-step watermark 12378 * programming. 12379 * 12380 * TODO: Move this (and other cleanup) to an async worker eventually. 12381 */ 12382 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12383 intel_cstate = to_intel_crtc_state(new_crtc_state); 12384 12385 if (dev_priv->display.optimize_watermarks) 12386 dev_priv->display.optimize_watermarks(intel_state, 12387 intel_cstate); 12388 } 12389 12390 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12391 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 12392 12393 if (put_domains[i]) 12394 modeset_put_power_domains(dev_priv, put_domains[i]); 12395 12396 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 12397 } 12398 12399 if (intel_state->modeset && intel_can_enable_sagv(state)) 12400 intel_enable_sagv(dev_priv); 12401 12402 drm_atomic_helper_commit_hw_done(state); 12403 12404 if (intel_state->modeset) { 12405 /* As one of the primary mmio accessors, KMS has a high 12406 * likelihood of triggering bugs in unclaimed access. After we 12407 * finish modesetting, see if an error has been flagged, and if 12408 * so enable debugging for the next modeset - and hope we catch 12409 * the culprit. 12410 */ 12411 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 12412 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 12413 } 12414 12415 drm_atomic_helper_cleanup_planes(dev, state); 12416 12417 drm_atomic_helper_commit_cleanup_done(state); 12418 12419 drm_atomic_state_put(state); 12420 12421 intel_atomic_helper_free_state(dev_priv); 12422 } 12423 12424 static void intel_atomic_commit_work(struct work_struct *work) 12425 { 12426 struct drm_atomic_state *state = 12427 container_of(work, struct drm_atomic_state, commit_work); 12428 12429 intel_atomic_commit_tail(state); 12430 } 12431 12432 static int __i915_sw_fence_call 12433 intel_atomic_commit_ready(struct i915_sw_fence *fence, 12434 enum i915_sw_fence_notify notify) 12435 { 12436 struct intel_atomic_state *state = 12437 container_of(fence, struct intel_atomic_state, commit_ready); 12438 12439 switch (notify) { 12440 case FENCE_COMPLETE: 12441 /* we do blocking waits in the worker, nothing to do here */ 12442 break; 12443 case FENCE_FREE: 12444 { 12445 struct intel_atomic_helper *helper = 12446 &to_i915(state->base.dev)->atomic_helper; 12447 12448 if (llist_add(&state->freed, &helper->free_list)) 12449 schedule_work(&helper->free_work); 12450 break; 12451 } 12452 } 12453 12454 return NOTIFY_DONE; 12455 } 12456 12457 static void intel_atomic_track_fbs(struct drm_atomic_state *state) 12458 { 12459 struct drm_plane_state *old_plane_state, *new_plane_state; 12460 struct drm_plane *plane; 12461 int i; 12462 12463 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 12464 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), 12465 intel_fb_obj(new_plane_state->fb), 12466 to_intel_plane(plane)->frontbuffer_bit); 12467 } 12468 12469 /** 12470 * intel_atomic_commit - commit validated state object 12471 * @dev: DRM device 12472 * @state: the top-level driver state object 12473 * @nonblock: nonblocking commit 12474 * 12475 * This function commits a top-level state object that has been validated 12476 * with drm_atomic_helper_check(). 12477 * 12478 * RETURNS 12479 * Zero for success or -errno. 12480 */ 12481 static int intel_atomic_commit(struct drm_device *dev, 12482 struct drm_atomic_state *state, 12483 bool nonblock) 12484 { 12485 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12486 struct drm_i915_private *dev_priv = to_i915(dev); 12487 int ret = 0; 12488 12489 drm_atomic_state_get(state); 12490 i915_sw_fence_init(&intel_state->commit_ready, 12491 intel_atomic_commit_ready); 12492 12493 /* 12494 * The intel_legacy_cursor_update() fast path takes care 12495 * of avoiding the vblank waits for simple cursor 12496 * movement and flips. For cursor on/off and size changes, 12497 * we want to perform the vblank waits so that watermark 12498 * updates happen during the correct frames. Gen9+ have 12499 * double buffered watermarks and so shouldn't need this. 12500 * 12501 * Unset state->legacy_cursor_update before the call to 12502 * drm_atomic_helper_setup_commit() because otherwise 12503 * drm_atomic_helper_wait_for_flip_done() is a noop and 12504 * we get FIFO underruns because we didn't wait 12505 * for vblank. 12506 * 12507 * FIXME doing watermarks and fb cleanup from a vblank worker 12508 * (assuming we had any) would solve these problems. 12509 */ 12510 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) { 12511 struct intel_crtc_state *new_crtc_state; 12512 struct intel_crtc *crtc; 12513 int i; 12514 12515 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i) 12516 if (new_crtc_state->wm.need_postvbl_update || 12517 new_crtc_state->update_wm_post) 12518 state->legacy_cursor_update = false; 12519 } 12520 12521 ret = intel_atomic_prepare_commit(dev, state); 12522 if (ret) { 12523 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 12524 i915_sw_fence_commit(&intel_state->commit_ready); 12525 return ret; 12526 } 12527 12528 ret = drm_atomic_helper_setup_commit(state, nonblock); 12529 if (!ret) 12530 ret = drm_atomic_helper_swap_state(state, true); 12531 12532 if (ret) { 12533 i915_sw_fence_commit(&intel_state->commit_ready); 12534 12535 drm_atomic_helper_cleanup_planes(dev, state); 12536 return ret; 12537 } 12538 dev_priv->wm.distrust_bios_wm = false; 12539 intel_shared_dpll_swap_state(state); 12540 intel_atomic_track_fbs(state); 12541 12542 if (intel_state->modeset) { 12543 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk, 12544 sizeof(intel_state->min_cdclk)); 12545 dev_priv->active_crtcs = intel_state->active_crtcs; 12546 dev_priv->cdclk.logical = intel_state->cdclk.logical; 12547 dev_priv->cdclk.actual = intel_state->cdclk.actual; 12548 } 12549 12550 drm_atomic_state_get(state); 12551 INIT_WORK(&state->commit_work, intel_atomic_commit_work); 12552 12553 i915_sw_fence_commit(&intel_state->commit_ready); 12554 if (nonblock && intel_state->modeset) { 12555 queue_work(dev_priv->modeset_wq, &state->commit_work); 12556 } else if (nonblock) { 12557 queue_work(system_unbound_wq, &state->commit_work); 12558 } else { 12559 if (intel_state->modeset) 12560 flush_workqueue(dev_priv->modeset_wq); 12561 intel_atomic_commit_tail(state); 12562 } 12563 12564 return 0; 12565 } 12566 12567 static const struct drm_crtc_funcs intel_crtc_funcs = { 12568 .gamma_set = drm_atomic_helper_legacy_gamma_set, 12569 .set_config = drm_atomic_helper_set_config, 12570 .destroy = intel_crtc_destroy, 12571 .page_flip = drm_atomic_helper_page_flip, 12572 .atomic_duplicate_state = intel_crtc_duplicate_state, 12573 .atomic_destroy_state = intel_crtc_destroy_state, 12574 .set_crc_source = intel_crtc_set_crc_source, 12575 }; 12576 12577 struct wait_rps_boost { 12578 struct wait_queue_entry wait; 12579 12580 struct drm_crtc *crtc; 12581 struct drm_i915_gem_request *request; 12582 }; 12583 12584 static int do_rps_boost(struct wait_queue_entry *_wait, 12585 unsigned mode, int sync, void *key) 12586 { 12587 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait); 12588 struct drm_i915_gem_request *rq = wait->request; 12589 12590 gen6_rps_boost(rq, NULL); 12591 i915_gem_request_put(rq); 12592 12593 drm_crtc_vblank_put(wait->crtc); 12594 12595 list_del(&wait->wait.entry); 12596 kfree(wait); 12597 return 1; 12598 } 12599 12600 static void add_rps_boost_after_vblank(struct drm_crtc *crtc, 12601 struct dma_fence *fence) 12602 { 12603 struct wait_rps_boost *wait; 12604 12605 if (!dma_fence_is_i915(fence)) 12606 return; 12607 12608 if (INTEL_GEN(to_i915(crtc->dev)) < 6) 12609 return; 12610 12611 if (drm_crtc_vblank_get(crtc)) 12612 return; 12613 12614 wait = kmalloc(sizeof(*wait), M_DRM, GFP_KERNEL); 12615 if (!wait) { 12616 drm_crtc_vblank_put(crtc); 12617 return; 12618 } 12619 12620 wait->request = to_request(dma_fence_get(fence)); 12621 wait->crtc = crtc; 12622 12623 wait->wait.func = do_rps_boost; 12624 wait->wait.flags = 0; 12625 12626 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); 12627 } 12628 12629 /** 12630 * intel_prepare_plane_fb - Prepare fb for usage on plane 12631 * @plane: drm plane to prepare for 12632 * @fb: framebuffer to prepare for presentation 12633 * 12634 * Prepares a framebuffer for usage on a display plane. Generally this 12635 * involves pinning the underlying object and updating the frontbuffer tracking 12636 * bits. Some older platforms need special physical address handling for 12637 * cursor planes. 12638 * 12639 * Must be called with struct_mutex held. 12640 * 12641 * Returns 0 on success, negative error code on failure. 12642 */ 12643 int 12644 intel_prepare_plane_fb(struct drm_plane *plane, 12645 struct drm_plane_state *new_state) 12646 { 12647 struct intel_atomic_state *intel_state = 12648 to_intel_atomic_state(new_state->state); 12649 struct drm_i915_private *dev_priv = to_i915(plane->dev); 12650 struct drm_framebuffer *fb = new_state->fb; 12651 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12652 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 12653 int ret; 12654 12655 if (old_obj) { 12656 struct drm_crtc_state *crtc_state = 12657 drm_atomic_get_existing_crtc_state(new_state->state, 12658 plane->state->crtc); 12659 12660 /* Big Hammer, we also need to ensure that any pending 12661 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 12662 * current scanout is retired before unpinning the old 12663 * framebuffer. Note that we rely on userspace rendering 12664 * into the buffer attached to the pipe they are waiting 12665 * on. If not, userspace generates a GPU hang with IPEHR 12666 * point to the MI_WAIT_FOR_EVENT. 12667 * 12668 * This should only fail upon a hung GPU, in which case we 12669 * can safely continue. 12670 */ 12671 if (needs_modeset(crtc_state)) { 12672 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 12673 old_obj->resv, NULL, 12674 false, 0, 12675 GFP_KERNEL); 12676 if (ret < 0) 12677 return ret; 12678 } 12679 } 12680 12681 if (new_state->fence) { /* explicit fencing */ 12682 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 12683 new_state->fence, 12684 I915_FENCE_TIMEOUT, 12685 GFP_KERNEL); 12686 if (ret < 0) 12687 return ret; 12688 } 12689 12690 if (!obj) 12691 return 0; 12692 12693 ret = i915_gem_object_pin_pages(obj); 12694 if (ret) 12695 return ret; 12696 12697 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 12698 if (ret) { 12699 i915_gem_object_unpin_pages(obj); 12700 return ret; 12701 } 12702 12703 if (plane->type == DRM_PLANE_TYPE_CURSOR && 12704 INTEL_INFO(dev_priv)->cursor_needs_physical) { 12705 const int align = intel_cursor_alignment(dev_priv); 12706 12707 ret = i915_gem_object_attach_phys(obj, align); 12708 } else { 12709 struct i915_vma *vma; 12710 12711 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 12712 if (!IS_ERR(vma)) 12713 to_intel_plane_state(new_state)->vma = vma; 12714 else 12715 ret = PTR_ERR(vma); 12716 } 12717 12718 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 12719 12720 mutex_unlock(&dev_priv->drm.struct_mutex); 12721 i915_gem_object_unpin_pages(obj); 12722 if (ret) 12723 return ret; 12724 12725 if (!new_state->fence) { /* implicit fencing */ 12726 struct dma_fence *fence; 12727 12728 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 12729 obj->resv, NULL, 12730 false, I915_FENCE_TIMEOUT, 12731 GFP_KERNEL); 12732 if (ret < 0) 12733 return ret; 12734 12735 fence = reservation_object_get_excl_rcu(obj->resv); 12736 if (fence) { 12737 add_rps_boost_after_vblank(new_state->crtc, fence); 12738 dma_fence_put(fence); 12739 } 12740 } else { 12741 add_rps_boost_after_vblank(new_state->crtc, new_state->fence); 12742 } 12743 12744 return 0; 12745 } 12746 12747 /** 12748 * intel_cleanup_plane_fb - Cleans up an fb after plane use 12749 * @plane: drm plane to clean up for 12750 * @fb: old framebuffer that was on plane 12751 * 12752 * Cleans up a framebuffer that has just been removed from a plane. 12753 * 12754 * Must be called with struct_mutex held. 12755 */ 12756 void 12757 intel_cleanup_plane_fb(struct drm_plane *plane, 12758 struct drm_plane_state *old_state) 12759 { 12760 struct i915_vma *vma; 12761 12762 /* Should only be called after a successful intel_prepare_plane_fb()! */ 12763 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 12764 if (vma) { 12765 mutex_lock(&plane->dev->struct_mutex); 12766 intel_unpin_fb_vma(vma); 12767 mutex_unlock(&plane->dev->struct_mutex); 12768 } 12769 } 12770 12771 int 12772 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 12773 { 12774 struct drm_i915_private *dev_priv; 12775 int max_scale; 12776 int crtc_clock, max_dotclk; 12777 12778 if (!intel_crtc || !crtc_state->base.enable) 12779 return DRM_PLANE_HELPER_NO_SCALING; 12780 12781 dev_priv = to_i915(intel_crtc->base.dev); 12782 12783 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 12784 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 12785 12786 if (IS_GEMINILAKE(dev_priv)) 12787 max_dotclk *= 2; 12788 12789 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 12790 return DRM_PLANE_HELPER_NO_SCALING; 12791 12792 /* 12793 * skl max scale is lower of: 12794 * close to 3 but not 3, -1 is for that purpose 12795 * or 12796 * cdclk/crtc_clock 12797 */ 12798 max_scale = min((1 << 16) * 3 - 1, 12799 (1 << 8) * ((max_dotclk << 8) / crtc_clock)); 12800 12801 return max_scale; 12802 } 12803 12804 static int 12805 intel_check_primary_plane(struct intel_plane *plane, 12806 struct intel_crtc_state *crtc_state, 12807 struct intel_plane_state *state) 12808 { 12809 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 12810 struct drm_crtc *crtc = state->base.crtc; 12811 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 12812 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 12813 bool can_position = false; 12814 int ret; 12815 12816 if (INTEL_GEN(dev_priv) >= 9) { 12817 /* use scaler when colorkey is not required */ 12818 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 12819 min_scale = 1; 12820 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 12821 } 12822 can_position = true; 12823 } 12824 12825 ret = drm_plane_helper_check_state(&state->base, 12826 &state->clip, 12827 min_scale, max_scale, 12828 can_position, true); 12829 if (ret) 12830 return ret; 12831 12832 if (!state->base.fb) 12833 return 0; 12834 12835 if (INTEL_GEN(dev_priv) >= 9) { 12836 ret = skl_check_plane_surface(state); 12837 if (ret) 12838 return ret; 12839 12840 state->ctl = skl_plane_ctl(crtc_state, state); 12841 } else { 12842 ret = i9xx_check_plane_surface(state); 12843 if (ret) 12844 return ret; 12845 12846 state->ctl = i9xx_plane_ctl(crtc_state, state); 12847 } 12848 12849 return 0; 12850 } 12851 12852 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 12853 struct drm_crtc_state *old_crtc_state) 12854 { 12855 struct drm_device *dev = crtc->dev; 12856 struct drm_i915_private *dev_priv = to_i915(dev); 12857 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12858 struct intel_crtc_state *old_intel_cstate = 12859 to_intel_crtc_state(old_crtc_state); 12860 struct intel_atomic_state *old_intel_state = 12861 to_intel_atomic_state(old_crtc_state->state); 12862 struct intel_crtc_state *intel_cstate = 12863 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc); 12864 bool modeset = needs_modeset(&intel_cstate->base); 12865 12866 if (!modeset && 12867 (intel_cstate->base.color_mgmt_changed || 12868 intel_cstate->update_pipe)) { 12869 intel_color_set_csc(&intel_cstate->base); 12870 intel_color_load_luts(&intel_cstate->base); 12871 } 12872 12873 /* Perform vblank evasion around commit operation */ 12874 intel_pipe_update_start(intel_cstate); 12875 12876 if (modeset) 12877 goto out; 12878 12879 if (intel_cstate->update_pipe) 12880 intel_update_pipe_config(old_intel_cstate, intel_cstate); 12881 else if (INTEL_GEN(dev_priv) >= 9) 12882 skl_detach_scalers(intel_crtc); 12883 12884 out: 12885 if (dev_priv->display.atomic_update_watermarks) 12886 dev_priv->display.atomic_update_watermarks(old_intel_state, 12887 intel_cstate); 12888 } 12889 12890 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 12891 struct drm_crtc_state *old_crtc_state) 12892 { 12893 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12894 struct intel_atomic_state *old_intel_state = 12895 to_intel_atomic_state(old_crtc_state->state); 12896 struct intel_crtc_state *new_crtc_state = 12897 intel_atomic_get_new_crtc_state(old_intel_state, intel_crtc); 12898 12899 intel_pipe_update_end(new_crtc_state); 12900 } 12901 12902 /** 12903 * intel_plane_destroy - destroy a plane 12904 * @plane: plane to destroy 12905 * 12906 * Common destruction function for all types of planes (primary, cursor, 12907 * sprite). 12908 */ 12909 void intel_plane_destroy(struct drm_plane *plane) 12910 { 12911 drm_plane_cleanup(plane); 12912 kfree(to_intel_plane(plane)); 12913 } 12914 12915 static bool i8xx_mod_supported(uint32_t format, uint64_t modifier) 12916 { 12917 switch (format) { 12918 case DRM_FORMAT_C8: 12919 case DRM_FORMAT_RGB565: 12920 case DRM_FORMAT_XRGB1555: 12921 case DRM_FORMAT_XRGB8888: 12922 return modifier == DRM_FORMAT_MOD_LINEAR || 12923 modifier == I915_FORMAT_MOD_X_TILED; 12924 default: 12925 return false; 12926 } 12927 } 12928 12929 static bool i965_mod_supported(uint32_t format, uint64_t modifier) 12930 { 12931 switch (format) { 12932 case DRM_FORMAT_C8: 12933 case DRM_FORMAT_RGB565: 12934 case DRM_FORMAT_XRGB8888: 12935 case DRM_FORMAT_XBGR8888: 12936 case DRM_FORMAT_XRGB2101010: 12937 case DRM_FORMAT_XBGR2101010: 12938 return modifier == DRM_FORMAT_MOD_LINEAR || 12939 modifier == I915_FORMAT_MOD_X_TILED; 12940 default: 12941 return false; 12942 } 12943 } 12944 12945 static bool skl_mod_supported(uint32_t format, uint64_t modifier) 12946 { 12947 switch (format) { 12948 case DRM_FORMAT_XRGB8888: 12949 case DRM_FORMAT_XBGR8888: 12950 case DRM_FORMAT_ARGB8888: 12951 case DRM_FORMAT_ABGR8888: 12952 if (modifier == I915_FORMAT_MOD_Yf_TILED_CCS || 12953 modifier == I915_FORMAT_MOD_Y_TILED_CCS) 12954 return true; 12955 /* fall through */ 12956 case DRM_FORMAT_RGB565: 12957 case DRM_FORMAT_XRGB2101010: 12958 case DRM_FORMAT_XBGR2101010: 12959 case DRM_FORMAT_YUYV: 12960 case DRM_FORMAT_YVYU: 12961 case DRM_FORMAT_UYVY: 12962 case DRM_FORMAT_VYUY: 12963 if (modifier == I915_FORMAT_MOD_Yf_TILED) 12964 return true; 12965 /* fall through */ 12966 case DRM_FORMAT_C8: 12967 if (modifier == DRM_FORMAT_MOD_LINEAR || 12968 modifier == I915_FORMAT_MOD_X_TILED || 12969 modifier == I915_FORMAT_MOD_Y_TILED) 12970 return true; 12971 /* fall through */ 12972 default: 12973 return false; 12974 } 12975 } 12976 12977 static bool intel_primary_plane_format_mod_supported(struct drm_plane *plane, 12978 uint32_t format, 12979 uint64_t modifier) 12980 { 12981 struct drm_i915_private *dev_priv = to_i915(plane->dev); 12982 12983 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) 12984 return false; 12985 12986 if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_INTEL && 12987 modifier != DRM_FORMAT_MOD_LINEAR) 12988 return false; 12989 12990 if (INTEL_GEN(dev_priv) >= 9) 12991 return skl_mod_supported(format, modifier); 12992 else if (INTEL_GEN(dev_priv) >= 4) 12993 return i965_mod_supported(format, modifier); 12994 else 12995 return i8xx_mod_supported(format, modifier); 12996 12997 unreachable(); 12998 } 12999 13000 static bool intel_cursor_plane_format_mod_supported(struct drm_plane *plane, 13001 uint32_t format, 13002 uint64_t modifier) 13003 { 13004 if (WARN_ON(modifier == DRM_FORMAT_MOD_INVALID)) 13005 return false; 13006 13007 return modifier == DRM_FORMAT_MOD_LINEAR && format == DRM_FORMAT_ARGB8888; 13008 } 13009 13010 static struct drm_plane_funcs intel_plane_funcs = { 13011 .update_plane = drm_atomic_helper_update_plane, 13012 .disable_plane = drm_atomic_helper_disable_plane, 13013 .destroy = intel_plane_destroy, 13014 .atomic_get_property = intel_plane_atomic_get_property, 13015 .atomic_set_property = intel_plane_atomic_set_property, 13016 .atomic_duplicate_state = intel_plane_duplicate_state, 13017 .atomic_destroy_state = intel_plane_destroy_state, 13018 .format_mod_supported = intel_primary_plane_format_mod_supported, 13019 }; 13020 13021 static int 13022 intel_legacy_cursor_update(struct drm_plane *plane, 13023 struct drm_crtc *crtc, 13024 struct drm_framebuffer *fb, 13025 int crtc_x, int crtc_y, 13026 unsigned int crtc_w, unsigned int crtc_h, 13027 uint32_t src_x, uint32_t src_y, 13028 uint32_t src_w, uint32_t src_h, 13029 struct drm_modeset_acquire_ctx *ctx) 13030 { 13031 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 13032 int ret; 13033 struct drm_plane_state *old_plane_state, *new_plane_state; 13034 struct intel_plane *intel_plane = to_intel_plane(plane); 13035 struct drm_framebuffer *old_fb; 13036 struct drm_crtc_state *crtc_state = crtc->state; 13037 struct i915_vma *old_vma, *vma; 13038 13039 /* 13040 * When crtc is inactive or there is a modeset pending, 13041 * wait for it to complete in the slowpath 13042 */ 13043 if (!crtc_state->active || needs_modeset(crtc_state) || 13044 to_intel_crtc_state(crtc_state)->update_pipe) 13045 goto slow; 13046 13047 old_plane_state = plane->state; 13048 /* 13049 * Don't do an async update if there is an outstanding commit modifying 13050 * the plane. This prevents our async update's changes from getting 13051 * overridden by a previous synchronous update's state. 13052 */ 13053 if (old_plane_state->commit && 13054 !try_wait_for_completion(&old_plane_state->commit->hw_done)) 13055 goto slow; 13056 13057 /* 13058 * If any parameters change that may affect watermarks, 13059 * take the slowpath. Only changing fb or position should be 13060 * in the fastpath. 13061 */ 13062 if (old_plane_state->crtc != crtc || 13063 old_plane_state->src_w != src_w || 13064 old_plane_state->src_h != src_h || 13065 old_plane_state->crtc_w != crtc_w || 13066 old_plane_state->crtc_h != crtc_h || 13067 !old_plane_state->fb != !fb) 13068 goto slow; 13069 13070 new_plane_state = intel_plane_duplicate_state(plane); 13071 if (!new_plane_state) 13072 return -ENOMEM; 13073 13074 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13075 13076 new_plane_state->src_x = src_x; 13077 new_plane_state->src_y = src_y; 13078 new_plane_state->src_w = src_w; 13079 new_plane_state->src_h = src_h; 13080 new_plane_state->crtc_x = crtc_x; 13081 new_plane_state->crtc_y = crtc_y; 13082 new_plane_state->crtc_w = crtc_w; 13083 new_plane_state->crtc_h = crtc_h; 13084 13085 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13086 to_intel_crtc_state(crtc->state), /* FIXME need a new crtc state? */ 13087 to_intel_plane_state(plane->state), 13088 to_intel_plane_state(new_plane_state)); 13089 if (ret) 13090 goto out_free; 13091 13092 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 13093 if (ret) 13094 goto out_free; 13095 13096 if (INTEL_INFO(dev_priv)->cursor_needs_physical) { 13097 int align = intel_cursor_alignment(dev_priv); 13098 13099 ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); 13100 if (ret) { 13101 DRM_DEBUG_KMS("failed to attach phys object\n"); 13102 goto out_unlock; 13103 } 13104 } else { 13105 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13106 if (IS_ERR(vma)) { 13107 DRM_DEBUG_KMS("failed to pin object\n"); 13108 13109 ret = PTR_ERR(vma); 13110 goto out_unlock; 13111 } 13112 13113 to_intel_plane_state(new_plane_state)->vma = vma; 13114 } 13115 13116 old_fb = old_plane_state->fb; 13117 13118 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), 13119 intel_plane->frontbuffer_bit); 13120 13121 /* Swap plane state */ 13122 plane->state = new_plane_state; 13123 13124 if (plane->state->visible) { 13125 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13126 intel_plane->update_plane(intel_plane, 13127 to_intel_crtc_state(crtc->state), 13128 to_intel_plane_state(plane->state)); 13129 } else { 13130 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13131 intel_plane->disable_plane(intel_plane, to_intel_crtc(crtc)); 13132 } 13133 13134 old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma); 13135 if (old_vma) 13136 intel_unpin_fb_vma(old_vma); 13137 13138 out_unlock: 13139 mutex_unlock(&dev_priv->drm.struct_mutex); 13140 out_free: 13141 if (ret) 13142 intel_plane_destroy_state(plane, new_plane_state); 13143 else 13144 intel_plane_destroy_state(plane, old_plane_state); 13145 return ret; 13146 13147 slow: 13148 return drm_atomic_helper_update_plane(plane, crtc, fb, 13149 crtc_x, crtc_y, crtc_w, crtc_h, 13150 src_x, src_y, src_w, src_h, ctx); 13151 } 13152 13153 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 13154 .update_plane = intel_legacy_cursor_update, 13155 .disable_plane = drm_atomic_helper_disable_plane, 13156 .destroy = intel_plane_destroy, 13157 .atomic_get_property = intel_plane_atomic_get_property, 13158 .atomic_set_property = intel_plane_atomic_set_property, 13159 .atomic_duplicate_state = intel_plane_duplicate_state, 13160 .atomic_destroy_state = intel_plane_destroy_state, 13161 .format_mod_supported = intel_cursor_plane_format_mod_supported, 13162 }; 13163 13164 static struct intel_plane * 13165 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 13166 { 13167 struct intel_plane *primary = NULL; 13168 struct intel_plane_state *state = NULL; 13169 const uint32_t *intel_primary_formats; 13170 unsigned int supported_rotations; 13171 unsigned int num_formats; 13172 const uint64_t *modifiers; 13173 int ret; 13174 13175 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13176 if (!primary) { 13177 ret = -ENOMEM; 13178 goto fail; 13179 } 13180 13181 state = intel_create_plane_state(&primary->base); 13182 if (!state) { 13183 ret = -ENOMEM; 13184 goto fail; 13185 } 13186 13187 primary->base.state = &state->base; 13188 13189 primary->can_scale = false; 13190 primary->max_downscale = 1; 13191 if (INTEL_GEN(dev_priv) >= 9) { 13192 primary->can_scale = true; 13193 state->scaler_id = -1; 13194 } 13195 primary->pipe = pipe; 13196 /* 13197 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13198 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13199 */ 13200 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13201 primary->plane = (enum plane) !pipe; 13202 else 13203 primary->plane = (enum plane) pipe; 13204 primary->id = PLANE_PRIMARY; 13205 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13206 primary->check_plane = intel_check_primary_plane; 13207 13208 if (INTEL_GEN(dev_priv) >= 10) { 13209 intel_primary_formats = skl_primary_formats; 13210 num_formats = ARRAY_SIZE(skl_primary_formats); 13211 modifiers = skl_format_modifiers_ccs; 13212 13213 primary->update_plane = skl_update_plane; 13214 primary->disable_plane = skl_disable_plane; 13215 primary->get_hw_state = skl_plane_get_hw_state; 13216 } else if (INTEL_GEN(dev_priv) >= 9) { 13217 intel_primary_formats = skl_primary_formats; 13218 num_formats = ARRAY_SIZE(skl_primary_formats); 13219 if (pipe < PIPE_C) 13220 modifiers = skl_format_modifiers_ccs; 13221 else 13222 modifiers = skl_format_modifiers_noccs; 13223 13224 primary->update_plane = skl_update_plane; 13225 primary->disable_plane = skl_disable_plane; 13226 primary->get_hw_state = skl_plane_get_hw_state; 13227 } else if (INTEL_GEN(dev_priv) >= 4) { 13228 intel_primary_formats = i965_primary_formats; 13229 num_formats = ARRAY_SIZE(i965_primary_formats); 13230 modifiers = i9xx_format_modifiers; 13231 13232 primary->update_plane = i9xx_update_primary_plane; 13233 primary->disable_plane = i9xx_disable_primary_plane; 13234 primary->get_hw_state = i9xx_plane_get_hw_state; 13235 } else { 13236 intel_primary_formats = i8xx_primary_formats; 13237 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13238 modifiers = i9xx_format_modifiers; 13239 13240 primary->update_plane = i9xx_update_primary_plane; 13241 primary->disable_plane = i9xx_disable_primary_plane; 13242 primary->get_hw_state = i9xx_plane_get_hw_state; 13243 } 13244 13245 if (INTEL_GEN(dev_priv) >= 9) 13246 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13247 0, &intel_plane_funcs, 13248 intel_primary_formats, num_formats, 13249 modifiers, 13250 DRM_PLANE_TYPE_PRIMARY, 13251 "plane 1%c", pipe_name(pipe)); 13252 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 13253 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13254 0, &intel_plane_funcs, 13255 intel_primary_formats, num_formats, 13256 modifiers, 13257 DRM_PLANE_TYPE_PRIMARY, 13258 "primary %c", pipe_name(pipe)); 13259 else 13260 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13261 0, &intel_plane_funcs, 13262 intel_primary_formats, num_formats, 13263 modifiers, 13264 DRM_PLANE_TYPE_PRIMARY, 13265 "plane %c", plane_name(primary->plane)); 13266 if (ret) 13267 goto fail; 13268 13269 if (INTEL_GEN(dev_priv) >= 9) { 13270 supported_rotations = 13271 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | 13272 DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; 13273 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 13274 supported_rotations = 13275 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | 13276 DRM_MODE_REFLECT_X; 13277 } else if (INTEL_GEN(dev_priv) >= 4) { 13278 supported_rotations = 13279 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; 13280 } else { 13281 supported_rotations = DRM_MODE_ROTATE_0; 13282 } 13283 13284 if (INTEL_GEN(dev_priv) >= 4) 13285 drm_plane_create_rotation_property(&primary->base, 13286 DRM_MODE_ROTATE_0, 13287 supported_rotations); 13288 13289 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13290 13291 return primary; 13292 13293 fail: 13294 kfree(state); 13295 kfree(primary); 13296 13297 return ERR_PTR(ret); 13298 } 13299 13300 static struct intel_plane * 13301 intel_cursor_plane_create(struct drm_i915_private *dev_priv, 13302 enum i915_pipe pipe) 13303 { 13304 struct intel_plane *cursor = NULL; 13305 struct intel_plane_state *state = NULL; 13306 int ret; 13307 13308 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13309 if (!cursor) { 13310 ret = -ENOMEM; 13311 goto fail; 13312 } 13313 13314 state = intel_create_plane_state(&cursor->base); 13315 if (!state) { 13316 ret = -ENOMEM; 13317 goto fail; 13318 } 13319 13320 cursor->base.state = &state->base; 13321 13322 cursor->can_scale = false; 13323 cursor->max_downscale = 1; 13324 cursor->pipe = pipe; 13325 cursor->plane = pipe; 13326 cursor->id = PLANE_CURSOR; 13327 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13328 13329 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 13330 cursor->update_plane = i845_update_cursor; 13331 cursor->disable_plane = i845_disable_cursor; 13332 cursor->get_hw_state = i845_cursor_get_hw_state; 13333 cursor->check_plane = i845_check_cursor; 13334 } else { 13335 cursor->update_plane = i9xx_update_cursor; 13336 cursor->disable_plane = i9xx_disable_cursor; 13337 cursor->get_hw_state = i9xx_cursor_get_hw_state; 13338 cursor->check_plane = i9xx_check_cursor; 13339 } 13340 13341 cursor->cursor.base = ~0; 13342 cursor->cursor.cntl = ~0; 13343 13344 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) 13345 cursor->cursor.size = ~0; 13346 13347 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13348 0, &intel_cursor_plane_funcs, 13349 intel_cursor_formats, 13350 ARRAY_SIZE(intel_cursor_formats), 13351 cursor_format_modifiers, 13352 DRM_PLANE_TYPE_CURSOR, 13353 "cursor %c", pipe_name(pipe)); 13354 if (ret) 13355 goto fail; 13356 13357 if (INTEL_GEN(dev_priv) >= 4) 13358 drm_plane_create_rotation_property(&cursor->base, 13359 DRM_MODE_ROTATE_0, 13360 DRM_MODE_ROTATE_0 | 13361 DRM_MODE_ROTATE_180); 13362 13363 if (INTEL_GEN(dev_priv) >= 9) 13364 state->scaler_id = -1; 13365 13366 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13367 13368 return cursor; 13369 13370 fail: 13371 kfree(state); 13372 kfree(cursor); 13373 13374 return ERR_PTR(ret); 13375 } 13376 13377 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 13378 struct intel_crtc_state *crtc_state) 13379 { 13380 struct intel_crtc_scaler_state *scaler_state = 13381 &crtc_state->scaler_state; 13382 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13383 int i; 13384 13385 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; 13386 if (!crtc->num_scalers) 13387 return; 13388 13389 for (i = 0; i < crtc->num_scalers; i++) { 13390 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13391 13392 scaler->in_use = 0; 13393 scaler->mode = PS_SCALER_MODE_DYN; 13394 } 13395 13396 scaler_state->scaler_id = -1; 13397 } 13398 13399 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 13400 { 13401 struct intel_crtc *intel_crtc; 13402 struct intel_crtc_state *crtc_state = NULL; 13403 struct intel_plane *primary = NULL; 13404 struct intel_plane *cursor = NULL; 13405 int sprite, ret; 13406 13407 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13408 if (!intel_crtc) 13409 return -ENOMEM; 13410 13411 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13412 if (!crtc_state) { 13413 ret = -ENOMEM; 13414 goto fail; 13415 } 13416 intel_crtc->config = crtc_state; 13417 intel_crtc->base.state = &crtc_state->base; 13418 crtc_state->base.crtc = &intel_crtc->base; 13419 13420 primary = intel_primary_plane_create(dev_priv, pipe); 13421 if (IS_ERR(primary)) { 13422 ret = PTR_ERR(primary); 13423 goto fail; 13424 } 13425 intel_crtc->plane_ids_mask |= BIT(primary->id); 13426 13427 for_each_sprite(dev_priv, pipe, sprite) { 13428 struct intel_plane *plane; 13429 13430 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 13431 if (IS_ERR(plane)) { 13432 ret = PTR_ERR(plane); 13433 goto fail; 13434 } 13435 intel_crtc->plane_ids_mask |= BIT(plane->id); 13436 } 13437 13438 cursor = intel_cursor_plane_create(dev_priv, pipe); 13439 if (IS_ERR(cursor)) { 13440 ret = PTR_ERR(cursor); 13441 goto fail; 13442 } 13443 intel_crtc->plane_ids_mask |= BIT(cursor->id); 13444 13445 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 13446 &primary->base, &cursor->base, 13447 &intel_crtc_funcs, 13448 "pipe %c", pipe_name(pipe)); 13449 if (ret) 13450 goto fail; 13451 13452 intel_crtc->pipe = pipe; 13453 intel_crtc->plane = primary->plane; 13454 13455 /* initialize shared scalers */ 13456 intel_crtc_init_scalers(intel_crtc, crtc_state); 13457 13458 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13459 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13460 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; 13461 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; 13462 13463 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13464 13465 intel_color_init(&intel_crtc->base); 13466 13467 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13468 13469 return 0; 13470 13471 fail: 13472 /* 13473 * drm_mode_config_cleanup() will free up any 13474 * crtcs/planes already initialized. 13475 */ 13476 kfree(crtc_state); 13477 kfree(intel_crtc); 13478 13479 return ret; 13480 } 13481 13482 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 13483 { 13484 struct drm_device *dev = connector->base.dev; 13485 13486 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 13487 13488 if (!connector->base.state->crtc) 13489 return INVALID_PIPE; 13490 13491 return to_intel_crtc(connector->base.state->crtc)->pipe; 13492 } 13493 13494 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13495 struct drm_file *file) 13496 { 13497 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13498 struct drm_crtc *drmmode_crtc; 13499 struct intel_crtc *crtc; 13500 13501 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id); 13502 if (!drmmode_crtc) 13503 return -ENOENT; 13504 13505 crtc = to_intel_crtc(drmmode_crtc); 13506 pipe_from_crtc_id->pipe = crtc->pipe; 13507 13508 return 0; 13509 } 13510 13511 static int intel_encoder_clones(struct intel_encoder *encoder) 13512 { 13513 struct drm_device *dev = encoder->base.dev; 13514 struct intel_encoder *source_encoder; 13515 int index_mask = 0; 13516 int entry = 0; 13517 13518 for_each_intel_encoder(dev, source_encoder) { 13519 if (encoders_cloneable(encoder, source_encoder)) 13520 index_mask |= (1 << entry); 13521 13522 entry++; 13523 } 13524 13525 return index_mask; 13526 } 13527 13528 static bool has_edp_a(struct drm_i915_private *dev_priv) 13529 { 13530 if (!IS_MOBILE(dev_priv)) 13531 return false; 13532 13533 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 13534 return false; 13535 13536 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13537 return false; 13538 13539 return true; 13540 } 13541 13542 static bool intel_crt_present(struct drm_i915_private *dev_priv) 13543 { 13544 if (INTEL_GEN(dev_priv) >= 9) 13545 return false; 13546 13547 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 13548 return false; 13549 13550 if (IS_CHERRYVIEW(dev_priv)) 13551 return false; 13552 13553 if (HAS_PCH_LPT_H(dev_priv) && 13554 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 13555 return false; 13556 13557 /* DDI E can't be used if DDI A requires 4 lanes */ 13558 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 13559 return false; 13560 13561 if (!dev_priv->vbt.int_crt_support) 13562 return false; 13563 13564 return true; 13565 } 13566 13567 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 13568 { 13569 int pps_num; 13570 int pps_idx; 13571 13572 if (HAS_DDI(dev_priv)) 13573 return; 13574 /* 13575 * This w/a is needed at least on CPT/PPT, but to be sure apply it 13576 * everywhere where registers can be write protected. 13577 */ 13578 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13579 pps_num = 2; 13580 else 13581 pps_num = 1; 13582 13583 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 13584 u32 val = I915_READ(PP_CONTROL(pps_idx)); 13585 13586 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 13587 I915_WRITE(PP_CONTROL(pps_idx), val); 13588 } 13589 } 13590 13591 static void intel_pps_init(struct drm_i915_private *dev_priv) 13592 { 13593 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 13594 dev_priv->pps_mmio_base = PCH_PPS_BASE; 13595 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 13596 dev_priv->pps_mmio_base = VLV_PPS_BASE; 13597 else 13598 dev_priv->pps_mmio_base = PPS_BASE; 13599 13600 intel_pps_unlock_regs_wa(dev_priv); 13601 } 13602 13603 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 13604 { 13605 struct intel_encoder *encoder; 13606 bool dpd_is_edp = false; 13607 13608 intel_pps_init(dev_priv); 13609 13610 /* 13611 * intel_edp_init_connector() depends on this completing first, to 13612 * prevent the registeration of both eDP and LVDS and the incorrect 13613 * sharing of the PPS. 13614 */ 13615 intel_lvds_init(dev_priv); 13616 13617 if (intel_crt_present(dev_priv)) 13618 intel_crt_init(dev_priv); 13619 13620 if (IS_GEN9_LP(dev_priv)) { 13621 /* 13622 * FIXME: Broxton doesn't support port detection via the 13623 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 13624 * detect the ports. 13625 */ 13626 intel_ddi_init(dev_priv, PORT_A); 13627 intel_ddi_init(dev_priv, PORT_B); 13628 intel_ddi_init(dev_priv, PORT_C); 13629 13630 intel_dsi_init(dev_priv); 13631 } else if (HAS_DDI(dev_priv)) { 13632 int found; 13633 13634 /* 13635 * Haswell uses DDI functions to detect digital outputs. 13636 * On SKL pre-D0 the strap isn't connected, so we assume 13637 * it's there. 13638 */ 13639 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 13640 /* WaIgnoreDDIAStrap: skl */ 13641 if (found || IS_GEN9_BC(dev_priv)) 13642 intel_ddi_init(dev_priv, PORT_A); 13643 13644 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 13645 * register */ 13646 found = I915_READ(SFUSE_STRAP); 13647 13648 if (found & SFUSE_STRAP_DDIB_DETECTED) 13649 intel_ddi_init(dev_priv, PORT_B); 13650 if (found & SFUSE_STRAP_DDIC_DETECTED) 13651 intel_ddi_init(dev_priv, PORT_C); 13652 if (found & SFUSE_STRAP_DDID_DETECTED) 13653 intel_ddi_init(dev_priv, PORT_D); 13654 /* 13655 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 13656 */ 13657 if (IS_GEN9_BC(dev_priv) && 13658 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 13659 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 13660 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 13661 intel_ddi_init(dev_priv, PORT_E); 13662 13663 } else if (HAS_PCH_SPLIT(dev_priv)) { 13664 int found; 13665 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D); 13666 13667 if (has_edp_a(dev_priv)) 13668 intel_dp_init(dev_priv, DP_A, PORT_A); 13669 13670 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 13671 /* PCH SDVOB multiplex with HDMIB */ 13672 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 13673 if (!found) 13674 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 13675 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 13676 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 13677 } 13678 13679 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 13680 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 13681 13682 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 13683 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 13684 13685 if (I915_READ(PCH_DP_C) & DP_DETECTED) 13686 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 13687 13688 if (I915_READ(PCH_DP_D) & DP_DETECTED) 13689 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 13690 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 13691 bool has_edp, has_port; 13692 13693 /* 13694 * The DP_DETECTED bit is the latched state of the DDC 13695 * SDA pin at boot. However since eDP doesn't require DDC 13696 * (no way to plug in a DP->HDMI dongle) the DDC pins for 13697 * eDP ports may have been muxed to an alternate function. 13698 * Thus we can't rely on the DP_DETECTED bit alone to detect 13699 * eDP ports. Consult the VBT as well as DP_DETECTED to 13700 * detect eDP ports. 13701 * 13702 * Sadly the straps seem to be missing sometimes even for HDMI 13703 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 13704 * and VBT for the presence of the port. Additionally we can't 13705 * trust the port type the VBT declares as we've seen at least 13706 * HDMI ports that the VBT claim are DP or eDP. 13707 */ 13708 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B); 13709 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 13710 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 13711 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 13712 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 13713 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 13714 13715 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C); 13716 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 13717 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 13718 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 13719 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 13720 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 13721 13722 if (IS_CHERRYVIEW(dev_priv)) { 13723 /* 13724 * eDP not supported on port D, 13725 * so no need to worry about it 13726 */ 13727 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 13728 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 13729 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 13730 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 13731 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 13732 } 13733 13734 intel_dsi_init(dev_priv); 13735 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { 13736 bool found = false; 13737 13738 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 13739 DRM_DEBUG_KMS("probing SDVOB\n"); 13740 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 13741 if (!found && IS_G4X(dev_priv)) { 13742 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 13743 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 13744 } 13745 13746 if (!found && IS_G4X(dev_priv)) 13747 intel_dp_init(dev_priv, DP_B, PORT_B); 13748 } 13749 13750 /* Before G4X SDVOC doesn't have its own detect register */ 13751 13752 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 13753 DRM_DEBUG_KMS("probing SDVOC\n"); 13754 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 13755 } 13756 13757 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 13758 13759 if (IS_G4X(dev_priv)) { 13760 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 13761 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 13762 } 13763 if (IS_G4X(dev_priv)) 13764 intel_dp_init(dev_priv, DP_C, PORT_C); 13765 } 13766 13767 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 13768 intel_dp_init(dev_priv, DP_D, PORT_D); 13769 } else if (IS_GEN2(dev_priv)) 13770 intel_dvo_init(dev_priv); 13771 13772 if (SUPPORTS_TV(dev_priv)) 13773 intel_tv_init(dev_priv); 13774 13775 intel_psr_init(dev_priv); 13776 13777 for_each_intel_encoder(&dev_priv->drm, encoder) { 13778 encoder->base.possible_crtcs = encoder->crtc_mask; 13779 encoder->base.possible_clones = 13780 intel_encoder_clones(encoder); 13781 } 13782 13783 intel_init_pch_refclk(dev_priv); 13784 13785 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 13786 } 13787 13788 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 13789 { 13790 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 13791 13792 drm_framebuffer_cleanup(fb); 13793 13794 i915_gem_object_lock(intel_fb->obj); 13795 WARN_ON(!intel_fb->obj->framebuffer_references--); 13796 i915_gem_object_unlock(intel_fb->obj); 13797 13798 i915_gem_object_put(intel_fb->obj); 13799 13800 kfree(intel_fb); 13801 } 13802 13803 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 13804 struct drm_file *file, 13805 unsigned int *handle) 13806 { 13807 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 13808 struct drm_i915_gem_object *obj = intel_fb->obj; 13809 13810 if (obj->userptr.mm) { 13811 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 13812 return -EINVAL; 13813 } 13814 13815 return drm_gem_handle_create(file, &obj->base, handle); 13816 } 13817 13818 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 13819 struct drm_file *file, 13820 unsigned flags, unsigned color, 13821 struct drm_clip_rect *clips, 13822 unsigned num_clips) 13823 { 13824 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13825 13826 i915_gem_object_flush_if_display(obj); 13827 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 13828 13829 return 0; 13830 } 13831 13832 static const struct drm_framebuffer_funcs intel_fb_funcs = { 13833 .destroy = intel_user_framebuffer_destroy, 13834 .create_handle = intel_user_framebuffer_create_handle, 13835 .dirty = intel_user_framebuffer_dirty, 13836 }; 13837 13838 static 13839 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 13840 uint64_t fb_modifier, uint32_t pixel_format) 13841 { 13842 u32 gen = INTEL_GEN(dev_priv); 13843 13844 if (gen >= 9) { 13845 int cpp = drm_format_plane_cpp(pixel_format, 0); 13846 13847 /* "The stride in bytes must not exceed the of the size of 8K 13848 * pixels and 32K bytes." 13849 */ 13850 return min(8192 * cpp, 32768); 13851 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) { 13852 return 32*1024; 13853 } else if (gen >= 4) { 13854 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 13855 return 16*1024; 13856 else 13857 return 32*1024; 13858 } else if (gen >= 3) { 13859 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 13860 return 8*1024; 13861 else 13862 return 16*1024; 13863 } else { 13864 /* XXX DSPC is limited to 4k tiled */ 13865 return 8*1024; 13866 } 13867 } 13868 13869 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 13870 struct drm_i915_gem_object *obj, 13871 struct drm_mode_fb_cmd2 *mode_cmd) 13872 { 13873 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 13874 struct drm_framebuffer *fb = &intel_fb->base; 13875 struct drm_format_name_buf format_name; 13876 u32 pitch_limit; 13877 unsigned int tiling, stride; 13878 int ret = -EINVAL; 13879 int i; 13880 13881 i915_gem_object_lock(obj); 13882 obj->framebuffer_references++; 13883 tiling = i915_gem_object_get_tiling(obj); 13884 stride = i915_gem_object_get_stride(obj); 13885 i915_gem_object_unlock(obj); 13886 13887 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 13888 /* 13889 * If there's a fence, enforce that 13890 * the fb modifier and tiling mode match. 13891 */ 13892 if (tiling != I915_TILING_NONE && 13893 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 13894 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 13895 goto err; 13896 } 13897 } else { 13898 if (tiling == I915_TILING_X) { 13899 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 13900 } else if (tiling == I915_TILING_Y) { 13901 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 13902 goto err; 13903 } 13904 } 13905 13906 /* Passed in modifier sanity checking. */ 13907 switch (mode_cmd->modifier[0]) { 13908 case I915_FORMAT_MOD_Y_TILED_CCS: 13909 case I915_FORMAT_MOD_Yf_TILED_CCS: 13910 switch (mode_cmd->pixel_format) { 13911 case DRM_FORMAT_XBGR8888: 13912 case DRM_FORMAT_ABGR8888: 13913 case DRM_FORMAT_XRGB8888: 13914 case DRM_FORMAT_ARGB8888: 13915 break; 13916 default: 13917 DRM_DEBUG_KMS("RC supported only with RGB8888 formats\n"); 13918 goto err; 13919 } 13920 /* fall through */ 13921 case I915_FORMAT_MOD_Y_TILED: 13922 case I915_FORMAT_MOD_Yf_TILED: 13923 if (INTEL_GEN(dev_priv) < 9) { 13924 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 13925 mode_cmd->modifier[0]); 13926 goto err; 13927 } 13928 case DRM_FORMAT_MOD_LINEAR: 13929 case I915_FORMAT_MOD_X_TILED: 13930 break; 13931 default: 13932 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", 13933 mode_cmd->modifier[0]); 13934 goto err; 13935 } 13936 13937 /* 13938 * gen2/3 display engine uses the fence if present, 13939 * so the tiling mode must match the fb modifier exactly. 13940 */ 13941 if (INTEL_INFO(dev_priv)->gen < 4 && 13942 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 13943 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 13944 goto err; 13945 } 13946 13947 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 13948 mode_cmd->pixel_format); 13949 if (mode_cmd->pitches[0] > pitch_limit) { 13950 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 13951 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 13952 "tiled" : "linear", 13953 mode_cmd->pitches[0], pitch_limit); 13954 goto err; 13955 } 13956 13957 /* 13958 * If there's a fence, enforce that 13959 * the fb pitch and fence stride match. 13960 */ 13961 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 13962 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 13963 mode_cmd->pitches[0], stride); 13964 goto err; 13965 } 13966 13967 /* Reject formats not supported by any plane early. */ 13968 switch (mode_cmd->pixel_format) { 13969 case DRM_FORMAT_C8: 13970 case DRM_FORMAT_RGB565: 13971 case DRM_FORMAT_XRGB8888: 13972 case DRM_FORMAT_ARGB8888: 13973 break; 13974 case DRM_FORMAT_XRGB1555: 13975 if (INTEL_GEN(dev_priv) > 3) { 13976 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 13977 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 13978 goto err; 13979 } 13980 break; 13981 case DRM_FORMAT_ABGR8888: 13982 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 13983 INTEL_GEN(dev_priv) < 9) { 13984 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 13985 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 13986 goto err; 13987 } 13988 break; 13989 case DRM_FORMAT_XBGR8888: 13990 case DRM_FORMAT_XRGB2101010: 13991 case DRM_FORMAT_XBGR2101010: 13992 if (INTEL_GEN(dev_priv) < 4) { 13993 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 13994 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 13995 goto err; 13996 } 13997 break; 13998 case DRM_FORMAT_ABGR2101010: 13999 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 14000 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14001 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14002 goto err; 14003 } 14004 break; 14005 case DRM_FORMAT_YUYV: 14006 case DRM_FORMAT_UYVY: 14007 case DRM_FORMAT_YVYU: 14008 case DRM_FORMAT_VYUY: 14009 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) { 14010 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14011 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14012 goto err; 14013 } 14014 break; 14015 default: 14016 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14017 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14018 goto err; 14019 } 14020 14021 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14022 if (mode_cmd->offsets[0] != 0) 14023 goto err; 14024 14025 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd); 14026 14027 for (i = 0; i < fb->format->num_planes; i++) { 14028 u32 stride_alignment; 14029 14030 if (mode_cmd->handles[i] != mode_cmd->handles[0]) { 14031 DRM_DEBUG_KMS("bad plane %d handle\n", i); 14032 goto err; 14033 } 14034 14035 stride_alignment = intel_fb_stride_alignment(fb, i); 14036 14037 /* 14038 * Display WA #0531: skl,bxt,kbl,glk 14039 * 14040 * Render decompression and plane width > 3840 14041 * combined with horizontal panning requires the 14042 * plane stride to be a multiple of 4. We'll just 14043 * require the entire fb to accommodate that to avoid 14044 * potential runtime errors at plane configuration time. 14045 */ 14046 if (IS_GEN9(dev_priv) && i == 0 && fb->width > 3840 && 14047 (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS || 14048 fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)) 14049 stride_alignment *= 4; 14050 14051 if (fb->pitches[i] & (stride_alignment - 1)) { 14052 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n", 14053 i, fb->pitches[i], stride_alignment); 14054 goto err; 14055 } 14056 } 14057 14058 intel_fb->obj = obj; 14059 14060 ret = intel_fill_fb_info(dev_priv, fb); 14061 if (ret) 14062 goto err; 14063 14064 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs); 14065 if (ret) { 14066 DRM_ERROR("framebuffer init failed %d\n", ret); 14067 goto err; 14068 } 14069 14070 return 0; 14071 14072 err: 14073 i915_gem_object_lock(obj); 14074 obj->framebuffer_references--; 14075 i915_gem_object_unlock(obj); 14076 return ret; 14077 } 14078 14079 static struct drm_framebuffer * 14080 intel_user_framebuffer_create(struct drm_device *dev, 14081 struct drm_file *filp, 14082 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14083 { 14084 struct drm_framebuffer *fb; 14085 struct drm_i915_gem_object *obj; 14086 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14087 14088 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 14089 if (!obj) 14090 return ERR_PTR(-ENOENT); 14091 14092 fb = intel_framebuffer_create(obj, &mode_cmd); 14093 if (IS_ERR(fb)) 14094 i915_gem_object_put(obj); 14095 14096 return fb; 14097 } 14098 14099 static void intel_atomic_state_free(struct drm_atomic_state *state) 14100 { 14101 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 14102 14103 drm_atomic_state_default_release(state); 14104 14105 i915_sw_fence_fini(&intel_state->commit_ready); 14106 14107 kfree(state); 14108 } 14109 14110 static const struct drm_mode_config_funcs intel_mode_funcs = { 14111 .fb_create = intel_user_framebuffer_create, 14112 .get_format_info = intel_get_format_info, 14113 .output_poll_changed = intel_fbdev_output_poll_changed, 14114 .atomic_check = intel_atomic_check, 14115 .atomic_commit = intel_atomic_commit, 14116 .atomic_state_alloc = intel_atomic_state_alloc, 14117 .atomic_state_clear = intel_atomic_state_clear, 14118 .atomic_state_free = intel_atomic_state_free, 14119 }; 14120 14121 /** 14122 * intel_init_display_hooks - initialize the display modesetting hooks 14123 * @dev_priv: device private 14124 */ 14125 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 14126 { 14127 intel_init_cdclk_hooks(dev_priv); 14128 14129 if (INTEL_INFO(dev_priv)->gen >= 9) { 14130 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14131 dev_priv->display.get_initial_plane_config = 14132 skylake_get_initial_plane_config; 14133 dev_priv->display.crtc_compute_clock = 14134 haswell_crtc_compute_clock; 14135 dev_priv->display.crtc_enable = haswell_crtc_enable; 14136 dev_priv->display.crtc_disable = haswell_crtc_disable; 14137 } else if (HAS_DDI(dev_priv)) { 14138 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14139 dev_priv->display.get_initial_plane_config = 14140 ironlake_get_initial_plane_config; 14141 dev_priv->display.crtc_compute_clock = 14142 haswell_crtc_compute_clock; 14143 dev_priv->display.crtc_enable = haswell_crtc_enable; 14144 dev_priv->display.crtc_disable = haswell_crtc_disable; 14145 } else if (HAS_PCH_SPLIT(dev_priv)) { 14146 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14147 dev_priv->display.get_initial_plane_config = 14148 ironlake_get_initial_plane_config; 14149 dev_priv->display.crtc_compute_clock = 14150 ironlake_crtc_compute_clock; 14151 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14152 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14153 } else if (IS_CHERRYVIEW(dev_priv)) { 14154 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14155 dev_priv->display.get_initial_plane_config = 14156 i9xx_get_initial_plane_config; 14157 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 14158 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14159 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14160 } else if (IS_VALLEYVIEW(dev_priv)) { 14161 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14162 dev_priv->display.get_initial_plane_config = 14163 i9xx_get_initial_plane_config; 14164 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 14165 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14166 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14167 } else if (IS_G4X(dev_priv)) { 14168 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14169 dev_priv->display.get_initial_plane_config = 14170 i9xx_get_initial_plane_config; 14171 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 14172 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14173 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14174 } else if (IS_PINEVIEW(dev_priv)) { 14175 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14176 dev_priv->display.get_initial_plane_config = 14177 i9xx_get_initial_plane_config; 14178 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 14179 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14180 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14181 } else if (!IS_GEN2(dev_priv)) { 14182 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14183 dev_priv->display.get_initial_plane_config = 14184 i9xx_get_initial_plane_config; 14185 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14186 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14187 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14188 } else { 14189 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14190 dev_priv->display.get_initial_plane_config = 14191 i9xx_get_initial_plane_config; 14192 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 14193 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14194 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14195 } 14196 14197 if (IS_GEN5(dev_priv)) { 14198 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14199 } else if (IS_GEN6(dev_priv)) { 14200 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14201 } else if (IS_IVYBRIDGE(dev_priv)) { 14202 /* FIXME: detect B0+ stepping and use auto training */ 14203 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14204 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 14205 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14206 } 14207 14208 if (INTEL_GEN(dev_priv) >= 9) 14209 dev_priv->display.update_crtcs = skl_update_crtcs; 14210 else 14211 dev_priv->display.update_crtcs = intel_update_crtcs; 14212 } 14213 14214 /* 14215 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14216 */ 14217 static void quirk_ssc_force_disable(struct drm_device *dev) 14218 { 14219 struct drm_i915_private *dev_priv = to_i915(dev); 14220 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14221 DRM_INFO("applying lvds SSC disable quirk\n"); 14222 } 14223 14224 /* 14225 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14226 * brightness value 14227 */ 14228 static void quirk_invert_brightness(struct drm_device *dev) 14229 { 14230 struct drm_i915_private *dev_priv = to_i915(dev); 14231 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14232 DRM_INFO("applying inverted panel brightness quirk\n"); 14233 } 14234 14235 /* Some VBT's incorrectly indicate no backlight is present */ 14236 static void quirk_backlight_present(struct drm_device *dev) 14237 { 14238 struct drm_i915_private *dev_priv = to_i915(dev); 14239 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14240 DRM_INFO("applying backlight present quirk\n"); 14241 } 14242 14243 /* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms 14244 * which is 300 ms greater than eDP spec T12 min. 14245 */ 14246 static void quirk_increase_t12_delay(struct drm_device *dev) 14247 { 14248 struct drm_i915_private *dev_priv = to_i915(dev); 14249 14250 dev_priv->quirks |= QUIRK_INCREASE_T12_DELAY; 14251 DRM_INFO("Applying T12 delay quirk\n"); 14252 } 14253 14254 struct intel_quirk { 14255 int device; 14256 int subsystem_vendor; 14257 int subsystem_device; 14258 void (*hook)(struct drm_device *dev); 14259 }; 14260 14261 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14262 struct intel_dmi_quirk { 14263 void (*hook)(struct drm_device *dev); 14264 const struct dmi_system_id (*dmi_id_list)[]; 14265 }; 14266 14267 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14268 { 14269 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14270 return 1; 14271 } 14272 14273 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14274 { 14275 .dmi_id_list = &(const struct dmi_system_id[]) { 14276 { 14277 .callback = intel_dmi_reverse_brightness, 14278 .ident = "NCR Corporation", 14279 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14280 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14281 }, 14282 }, 14283 { } /* terminating entry */ 14284 }, 14285 .hook = quirk_invert_brightness, 14286 }, 14287 }; 14288 14289 static struct intel_quirk intel_quirks[] = { 14290 /* Lenovo U160 cannot use SSC on LVDS */ 14291 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14292 14293 /* Sony Vaio Y cannot use SSC on LVDS */ 14294 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14295 14296 /* Acer Aspire 5734Z must invert backlight brightness */ 14297 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14298 14299 /* Acer/eMachines G725 */ 14300 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14301 14302 /* Acer/eMachines e725 */ 14303 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14304 14305 /* Acer/Packard Bell NCL20 */ 14306 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14307 14308 /* Acer Aspire 4736Z */ 14309 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14310 14311 /* Acer Aspire 5336 */ 14312 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14313 14314 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14315 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14316 14317 /* Acer C720 Chromebook (Core i3 4005U) */ 14318 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14319 14320 /* Apple Macbook 2,1 (Core 2 T7400) */ 14321 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14322 14323 /* Apple Macbook 4,1 */ 14324 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14325 14326 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14327 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14328 14329 /* HP Chromebook 14 (Celeron 2955U) */ 14330 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14331 14332 /* Dell Chromebook 11 */ 14333 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14334 14335 /* Dell Chromebook 11 (2015 version) */ 14336 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 14337 14338 /* Toshiba Satellite P50-C-18C */ 14339 { 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay }, 14340 }; 14341 14342 static void intel_init_quirks(struct drm_device *dev) 14343 { 14344 struct pci_dev *d = dev->pdev; 14345 int i; 14346 14347 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14348 struct intel_quirk *q = &intel_quirks[i]; 14349 14350 if (d->device == q->device && 14351 (d->subsystem_vendor == q->subsystem_vendor || 14352 q->subsystem_vendor == PCI_ANY_ID) && 14353 (d->subsystem_device == q->subsystem_device || 14354 q->subsystem_device == PCI_ANY_ID)) 14355 q->hook(dev); 14356 } 14357 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14358 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14359 intel_dmi_quirks[i].hook(dev); 14360 } 14361 } 14362 14363 /* Disable the VGA plane that we never use */ 14364 static void i915_disable_vga(struct drm_i915_private *dev_priv) 14365 { 14366 struct pci_dev *pdev = dev_priv->drm.pdev; 14367 u8 sr1; 14368 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 14369 14370 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14371 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 14372 outb(SR01, VGA_SR_INDEX); 14373 sr1 = inb(VGA_SR_DATA); 14374 outb(sr1 | 1<<5, VGA_SR_DATA); 14375 vga_put(pdev, VGA_RSRC_LEGACY_IO); 14376 udelay(300); 14377 14378 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14379 POSTING_READ(vga_reg); 14380 } 14381 14382 void intel_modeset_init_hw(struct drm_device *dev) 14383 { 14384 struct drm_i915_private *dev_priv = to_i915(dev); 14385 14386 intel_update_cdclk(dev_priv); 14387 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 14388 } 14389 14390 /* 14391 * Calculate what we think the watermarks should be for the state we've read 14392 * out of the hardware and then immediately program those watermarks so that 14393 * we ensure the hardware settings match our internal state. 14394 * 14395 * We can calculate what we think WM's should be by creating a duplicate of the 14396 * current state (which was constructed during hardware readout) and running it 14397 * through the atomic check code to calculate new watermark values in the 14398 * state object. 14399 */ 14400 static void sanitize_watermarks(struct drm_device *dev) 14401 { 14402 struct drm_i915_private *dev_priv = to_i915(dev); 14403 struct drm_atomic_state *state; 14404 struct intel_atomic_state *intel_state; 14405 struct drm_crtc *crtc; 14406 struct drm_crtc_state *cstate; 14407 struct drm_modeset_acquire_ctx ctx; 14408 int ret; 14409 int i; 14410 14411 /* Only supported on platforms that use atomic watermark design */ 14412 if (!dev_priv->display.optimize_watermarks) 14413 return; 14414 14415 /* 14416 * We need to hold connection_mutex before calling duplicate_state so 14417 * that the connector loop is protected. 14418 */ 14419 drm_modeset_acquire_init(&ctx, 0); 14420 retry: 14421 ret = drm_modeset_lock_all_ctx(dev, &ctx); 14422 if (ret == -EDEADLK) { 14423 drm_modeset_backoff(&ctx); 14424 goto retry; 14425 } else if (WARN_ON(ret)) { 14426 goto fail; 14427 } 14428 14429 state = drm_atomic_helper_duplicate_state(dev, &ctx); 14430 if (WARN_ON(IS_ERR(state))) 14431 goto fail; 14432 14433 intel_state = to_intel_atomic_state(state); 14434 14435 /* 14436 * Hardware readout is the only time we don't want to calculate 14437 * intermediate watermarks (since we don't trust the current 14438 * watermarks). 14439 */ 14440 if (!HAS_GMCH_DISPLAY(dev_priv)) 14441 intel_state->skip_intermediate_wm = true; 14442 14443 ret = intel_atomic_check(dev, state); 14444 if (ret) { 14445 /* 14446 * If we fail here, it means that the hardware appears to be 14447 * programmed in a way that shouldn't be possible, given our 14448 * understanding of watermark requirements. This might mean a 14449 * mistake in the hardware readout code or a mistake in the 14450 * watermark calculations for a given platform. Raise a WARN 14451 * so that this is noticeable. 14452 * 14453 * If this actually happens, we'll have to just leave the 14454 * BIOS-programmed watermarks untouched and hope for the best. 14455 */ 14456 WARN(true, "Could not determine valid watermarks for inherited state\n"); 14457 goto put_state; 14458 } 14459 14460 /* Write calculated watermark values back */ 14461 for_each_new_crtc_in_state(state, crtc, cstate, i) { 14462 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 14463 14464 cs->wm.need_postvbl_update = true; 14465 dev_priv->display.optimize_watermarks(intel_state, cs); 14466 14467 to_intel_crtc_state(crtc->state)->wm = cs->wm; 14468 } 14469 14470 put_state: 14471 drm_atomic_state_put(state); 14472 fail: 14473 drm_modeset_drop_locks(&ctx); 14474 drm_modeset_acquire_fini(&ctx); 14475 } 14476 14477 int intel_modeset_init(struct drm_device *dev) 14478 { 14479 struct drm_i915_private *dev_priv = to_i915(dev); 14480 struct i915_ggtt *ggtt = &dev_priv->ggtt; 14481 enum i915_pipe pipe; 14482 struct intel_crtc *crtc; 14483 14484 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); 14485 14486 drm_mode_config_init(dev); 14487 14488 dev->mode_config.min_width = 0; 14489 dev->mode_config.min_height = 0; 14490 14491 dev->mode_config.preferred_depth = 24; 14492 dev->mode_config.prefer_shadow = 1; 14493 14494 dev->mode_config.allow_fb_modifiers = true; 14495 14496 dev->mode_config.funcs = &intel_mode_funcs; 14497 14498 init_llist_head(&dev_priv->atomic_helper.free_list); 14499 INIT_WORK(&dev_priv->atomic_helper.free_work, 14500 intel_atomic_helper_free_state_worker); 14501 14502 intel_init_quirks(dev); 14503 14504 intel_init_pm(dev_priv); 14505 14506 if (INTEL_INFO(dev_priv)->num_pipes == 0) 14507 return 0; 14508 14509 /* 14510 * There may be no VBT; and if the BIOS enabled SSC we can 14511 * just keep using it to avoid unnecessary flicker. Whereas if the 14512 * BIOS isn't using it, don't assume it will work even if the VBT 14513 * indicates as much. 14514 */ 14515 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 14516 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 14517 DREF_SSC1_ENABLE); 14518 14519 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 14520 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 14521 bios_lvds_use_ssc ? "en" : "dis", 14522 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 14523 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 14524 } 14525 } 14526 14527 if (IS_GEN2(dev_priv)) { 14528 dev->mode_config.max_width = 2048; 14529 dev->mode_config.max_height = 2048; 14530 } else if (IS_GEN3(dev_priv)) { 14531 dev->mode_config.max_width = 4096; 14532 dev->mode_config.max_height = 4096; 14533 } else { 14534 dev->mode_config.max_width = 8192; 14535 dev->mode_config.max_height = 8192; 14536 } 14537 14538 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 14539 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 14540 dev->mode_config.cursor_height = 1023; 14541 } else if (IS_GEN2(dev_priv)) { 14542 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 14543 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 14544 } else { 14545 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 14546 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 14547 } 14548 14549 dev->mode_config.fb_base = ggtt->mappable_base; 14550 14551 DRM_DEBUG_KMS("%d display pipe%s available.\n", 14552 INTEL_INFO(dev_priv)->num_pipes, 14553 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 14554 14555 for_each_pipe(dev_priv, pipe) { 14556 int ret; 14557 14558 ret = intel_crtc_init(dev_priv, pipe); 14559 if (ret) { 14560 drm_mode_config_cleanup(dev); 14561 return ret; 14562 } 14563 } 14564 14565 intel_shared_dpll_init(dev); 14566 14567 intel_update_czclk(dev_priv); 14568 intel_modeset_init_hw(dev); 14569 14570 if (dev_priv->max_cdclk_freq == 0) 14571 intel_update_max_cdclk(dev_priv); 14572 14573 /* Just disable it once at startup */ 14574 i915_disable_vga(dev_priv); 14575 intel_setup_outputs(dev_priv); 14576 14577 drm_modeset_lock_all(dev); 14578 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 14579 drm_modeset_unlock_all(dev); 14580 14581 for_each_intel_crtc(dev, crtc) { 14582 struct intel_initial_plane_config plane_config = {}; 14583 14584 if (!crtc->active) 14585 continue; 14586 14587 /* 14588 * Note that reserving the BIOS fb up front prevents us 14589 * from stuffing other stolen allocations like the ring 14590 * on top. This prevents some ugliness at boot time, and 14591 * can even allow for smooth boot transitions if the BIOS 14592 * fb is large enough for the active pipe configuration. 14593 */ 14594 dev_priv->display.get_initial_plane_config(crtc, 14595 &plane_config); 14596 14597 /* 14598 * If the fb is shared between multiple heads, we'll 14599 * just get the first one. 14600 */ 14601 intel_find_initial_plane_obj(crtc, &plane_config); 14602 } 14603 14604 /* 14605 * Make sure hardware watermarks really match the state we read out. 14606 * Note that we need to do this after reconstructing the BIOS fb's 14607 * since the watermark calculation done here will use pstate->fb. 14608 */ 14609 if (!HAS_GMCH_DISPLAY(dev_priv)) 14610 sanitize_watermarks(dev); 14611 14612 return 0; 14613 } 14614 14615 void i830_enable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 14616 { 14617 /* 640x480@60Hz, ~25175 kHz */ 14618 struct dpll clock = { 14619 .m1 = 18, 14620 .m2 = 7, 14621 .p1 = 13, 14622 .p2 = 4, 14623 .n = 2, 14624 }; 14625 u32 dpll, fp; 14626 int i; 14627 14628 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154); 14629 14630 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n", 14631 pipe_name(pipe), clock.vco, clock.dot); 14632 14633 fp = i9xx_dpll_compute_fp(&clock); 14634 dpll = (I915_READ(DPLL(pipe)) & DPLL_DVO_2X_MODE) | 14635 DPLL_VGA_MODE_DIS | 14636 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) | 14637 PLL_P2_DIVIDE_BY_4 | 14638 PLL_REF_INPUT_DREFCLK | 14639 DPLL_VCO_ENABLE; 14640 14641 I915_WRITE(FP0(pipe), fp); 14642 I915_WRITE(FP1(pipe), fp); 14643 14644 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16)); 14645 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16)); 14646 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16)); 14647 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16)); 14648 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16)); 14649 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16)); 14650 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1)); 14651 14652 /* 14653 * Apparently we need to have VGA mode enabled prior to changing 14654 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 14655 * dividers, even though the register value does change. 14656 */ 14657 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS); 14658 I915_WRITE(DPLL(pipe), dpll); 14659 14660 /* Wait for the clocks to stabilize. */ 14661 POSTING_READ(DPLL(pipe)); 14662 udelay(150); 14663 14664 /* The pixel multiplier can only be updated once the 14665 * DPLL is enabled and the clocks are stable. 14666 * 14667 * So write it again. 14668 */ 14669 I915_WRITE(DPLL(pipe), dpll); 14670 14671 /* We do this three times for luck */ 14672 for (i = 0; i < 3 ; i++) { 14673 I915_WRITE(DPLL(pipe), dpll); 14674 POSTING_READ(DPLL(pipe)); 14675 udelay(150); /* wait for warmup */ 14676 } 14677 14678 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); 14679 POSTING_READ(PIPECONF(pipe)); 14680 } 14681 14682 void i830_disable_pipe(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 14683 { 14684 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 14685 14686 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n", 14687 pipe_name(pipe)); 14688 14689 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE); 14690 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE); 14691 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); 14692 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & CURSOR_MODE); 14693 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & CURSOR_MODE); 14694 14695 I915_WRITE(PIPECONF(pipe), 0); 14696 POSTING_READ(PIPECONF(pipe)); 14697 14698 intel_wait_for_pipe_scanline_stopped(crtc); 14699 14700 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 14701 POSTING_READ(DPLL(pipe)); 14702 } 14703 14704 static bool intel_plane_mapping_ok(struct intel_crtc *crtc, 14705 struct intel_plane *primary) 14706 { 14707 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14708 enum plane plane = primary->plane; 14709 u32 val = I915_READ(DSPCNTR(plane)); 14710 14711 return (val & DISPLAY_PLANE_ENABLE) == 0 || 14712 (val & DISPPLANE_SEL_PIPE_MASK) == DISPPLANE_SEL_PIPE(crtc->pipe); 14713 } 14714 14715 static void 14716 intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv) 14717 { 14718 struct intel_crtc *crtc; 14719 14720 if (INTEL_GEN(dev_priv) >= 4) 14721 return; 14722 14723 for_each_intel_crtc(&dev_priv->drm, crtc) { 14724 struct intel_plane *plane = 14725 to_intel_plane(crtc->base.primary); 14726 14727 if (intel_plane_mapping_ok(crtc, plane)) 14728 continue; 14729 14730 DRM_DEBUG_KMS("%s attached to the wrong pipe, disabling plane\n", 14731 plane->base.name); 14732 intel_plane_disable_noatomic(crtc, plane); 14733 } 14734 } 14735 14736 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 14737 { 14738 struct drm_device *dev = crtc->base.dev; 14739 struct intel_encoder *encoder; 14740 14741 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 14742 return true; 14743 14744 return false; 14745 } 14746 14747 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 14748 { 14749 struct drm_device *dev = encoder->base.dev; 14750 struct intel_connector *connector; 14751 14752 for_each_connector_on_encoder(dev, &encoder->base, connector) 14753 return connector; 14754 14755 return NULL; 14756 } 14757 14758 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 14759 enum i915_pipe pch_transcoder) 14760 { 14761 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 14762 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A); 14763 } 14764 14765 static void intel_sanitize_crtc(struct intel_crtc *crtc, 14766 struct drm_modeset_acquire_ctx *ctx) 14767 { 14768 struct drm_device *dev = crtc->base.dev; 14769 struct drm_i915_private *dev_priv = to_i915(dev); 14770 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 14771 14772 /* Clear any frame start delays used for debugging left by the BIOS */ 14773 if (!transcoder_is_dsi(cpu_transcoder)) { 14774 i915_reg_t reg = PIPECONF(cpu_transcoder); 14775 14776 I915_WRITE(reg, 14777 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 14778 } 14779 14780 /* restore vblank interrupts to correct state */ 14781 drm_crtc_vblank_reset(&crtc->base); 14782 if (crtc->active) { 14783 struct intel_plane *plane; 14784 14785 drm_crtc_vblank_on(&crtc->base); 14786 14787 /* Disable everything but the primary plane */ 14788 for_each_intel_plane_on_crtc(dev, crtc, plane) { 14789 const struct intel_plane_state *plane_state = 14790 to_intel_plane_state(plane->base.state); 14791 14792 if (plane_state->base.visible && 14793 plane->base.type != DRM_PLANE_TYPE_PRIMARY) 14794 intel_plane_disable_noatomic(crtc, plane); 14795 } 14796 } 14797 14798 /* Adjust the state of the output pipe according to whether we 14799 * have active connectors/encoders. */ 14800 if (crtc->active && !intel_crtc_has_encoders(crtc)) 14801 intel_crtc_disable_noatomic(&crtc->base, ctx); 14802 14803 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 14804 /* 14805 * We start out with underrun reporting disabled to avoid races. 14806 * For correct bookkeeping mark this on active crtcs. 14807 * 14808 * Also on gmch platforms we dont have any hardware bits to 14809 * disable the underrun reporting. Which means we need to start 14810 * out with underrun reporting disabled also on inactive pipes, 14811 * since otherwise we'll complain about the garbage we read when 14812 * e.g. coming up after runtime pm. 14813 * 14814 * No protection against concurrent access is required - at 14815 * worst a fifo underrun happens which also sets this to false. 14816 */ 14817 crtc->cpu_fifo_underrun_disabled = true; 14818 /* 14819 * We track the PCH trancoder underrun reporting state 14820 * within the crtc. With crtc for pipe A housing the underrun 14821 * reporting state for PCH transcoder A, crtc for pipe B housing 14822 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 14823 * and marking underrun reporting as disabled for the non-existing 14824 * PCH transcoders B and C would prevent enabling the south 14825 * error interrupt (see cpt_can_enable_serr_int()). 14826 */ 14827 if (has_pch_trancoder(dev_priv, crtc->pipe)) 14828 crtc->pch_fifo_underrun_disabled = true; 14829 } 14830 } 14831 14832 static void intel_sanitize_encoder(struct intel_encoder *encoder) 14833 { 14834 struct intel_connector *connector; 14835 14836 /* We need to check both for a crtc link (meaning that the 14837 * encoder is active and trying to read from a pipe) and the 14838 * pipe itself being active. */ 14839 bool has_active_crtc = encoder->base.crtc && 14840 to_intel_crtc(encoder->base.crtc)->active; 14841 14842 connector = intel_encoder_find_connector(encoder); 14843 if (connector && !has_active_crtc) { 14844 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 14845 encoder->base.base.id, 14846 encoder->base.name); 14847 14848 /* Connector is active, but has no active pipe. This is 14849 * fallout from our resume register restoring. Disable 14850 * the encoder manually again. */ 14851 if (encoder->base.crtc) { 14852 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 14853 14854 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 14855 encoder->base.base.id, 14856 encoder->base.name); 14857 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 14858 if (encoder->post_disable) 14859 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 14860 } 14861 encoder->base.crtc = NULL; 14862 14863 /* Inconsistent output/port/pipe state happens presumably due to 14864 * a bug in one of the get_hw_state functions. Or someplace else 14865 * in our code, like the register restore mess on resume. Clamp 14866 * things to off as a safer default. */ 14867 14868 connector->base.dpms = DRM_MODE_DPMS_OFF; 14869 connector->base.encoder = NULL; 14870 } 14871 /* Enabled encoders without active connectors will be fixed in 14872 * the crtc fixup. */ 14873 } 14874 14875 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 14876 { 14877 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 14878 14879 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 14880 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 14881 i915_disable_vga(dev_priv); 14882 } 14883 } 14884 14885 void i915_redisable_vga(struct drm_i915_private *dev_priv) 14886 { 14887 /* This function can be called both from intel_modeset_setup_hw_state or 14888 * at a very early point in our resume sequence, where the power well 14889 * structures are not yet restored. Since this function is at a very 14890 * paranoid "someone might have enabled VGA while we were not looking" 14891 * level, just check if the power well is enabled instead of trying to 14892 * follow the "don't touch the power well if we don't need it" policy 14893 * the rest of the driver uses. */ 14894 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 14895 return; 14896 14897 i915_redisable_vga_power_on(dev_priv); 14898 14899 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 14900 } 14901 14902 /* FIXME read out full plane state for all planes */ 14903 static void readout_plane_state(struct intel_crtc *crtc) 14904 { 14905 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 14906 struct intel_crtc_state *crtc_state = 14907 to_intel_crtc_state(crtc->base.state); 14908 struct intel_plane *plane; 14909 14910 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { 14911 struct intel_plane_state *plane_state = 14912 to_intel_plane_state(plane->base.state); 14913 bool visible = plane->get_hw_state(plane); 14914 14915 intel_set_plane_visible(crtc_state, plane_state, visible); 14916 } 14917 } 14918 14919 static void intel_modeset_readout_hw_state(struct drm_device *dev) 14920 { 14921 struct drm_i915_private *dev_priv = to_i915(dev); 14922 enum i915_pipe pipe; 14923 struct intel_crtc *crtc; 14924 struct intel_encoder *encoder; 14925 struct intel_connector *connector; 14926 struct drm_connector_list_iter conn_iter; 14927 int i; 14928 14929 dev_priv->active_crtcs = 0; 14930 14931 for_each_intel_crtc(dev, crtc) { 14932 struct intel_crtc_state *crtc_state = 14933 to_intel_crtc_state(crtc->base.state); 14934 14935 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 14936 memset(crtc_state, 0, sizeof(*crtc_state)); 14937 crtc_state->base.crtc = &crtc->base; 14938 14939 crtc_state->base.active = crtc_state->base.enable = 14940 dev_priv->display.get_pipe_config(crtc, crtc_state); 14941 14942 crtc->base.enabled = crtc_state->base.enable; 14943 crtc->active = crtc_state->base.active; 14944 14945 if (crtc_state->base.active) 14946 dev_priv->active_crtcs |= 1 << crtc->pipe; 14947 14948 readout_plane_state(crtc); 14949 14950 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 14951 crtc->base.base.id, crtc->base.name, 14952 enableddisabled(crtc_state->base.active)); 14953 } 14954 14955 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 14956 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 14957 14958 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 14959 &pll->state.hw_state); 14960 pll->state.crtc_mask = 0; 14961 for_each_intel_crtc(dev, crtc) { 14962 struct intel_crtc_state *crtc_state = 14963 to_intel_crtc_state(crtc->base.state); 14964 14965 if (crtc_state->base.active && 14966 crtc_state->shared_dpll == pll) 14967 pll->state.crtc_mask |= 1 << crtc->pipe; 14968 } 14969 pll->active_mask = pll->state.crtc_mask; 14970 14971 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 14972 pll->name, pll->state.crtc_mask, pll->on); 14973 } 14974 14975 for_each_intel_encoder(dev, encoder) { 14976 pipe = 0; 14977 14978 if (encoder->get_hw_state(encoder, &pipe)) { 14979 struct intel_crtc_state *crtc_state; 14980 14981 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 14982 crtc_state = to_intel_crtc_state(crtc->base.state); 14983 14984 encoder->base.crtc = &crtc->base; 14985 crtc_state->output_types |= 1 << encoder->type; 14986 encoder->get_config(encoder, crtc_state); 14987 } else { 14988 encoder->base.crtc = NULL; 14989 } 14990 14991 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 14992 encoder->base.base.id, encoder->base.name, 14993 enableddisabled(encoder->base.crtc), 14994 pipe_name(pipe)); 14995 } 14996 14997 drm_connector_list_iter_begin(dev, &conn_iter); 14998 for_each_intel_connector_iter(connector, &conn_iter) { 14999 if (connector->get_hw_state(connector)) { 15000 connector->base.dpms = DRM_MODE_DPMS_ON; 15001 15002 encoder = connector->encoder; 15003 connector->base.encoder = &encoder->base; 15004 15005 if (encoder->base.crtc && 15006 encoder->base.crtc->state->active) { 15007 /* 15008 * This has to be done during hardware readout 15009 * because anything calling .crtc_disable may 15010 * rely on the connector_mask being accurate. 15011 */ 15012 encoder->base.crtc->state->connector_mask |= 15013 1 << drm_connector_index(&connector->base); 15014 encoder->base.crtc->state->encoder_mask |= 15015 1 << drm_encoder_index(&encoder->base); 15016 } 15017 15018 } else { 15019 connector->base.dpms = DRM_MODE_DPMS_OFF; 15020 connector->base.encoder = NULL; 15021 } 15022 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15023 connector->base.base.id, connector->base.name, 15024 enableddisabled(connector->base.encoder)); 15025 } 15026 drm_connector_list_iter_end(&conn_iter); 15027 15028 for_each_intel_crtc(dev, crtc) { 15029 struct intel_crtc_state *crtc_state = 15030 to_intel_crtc_state(crtc->base.state); 15031 int min_cdclk = 0; 15032 15033 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15034 if (crtc_state->base.active) { 15035 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 15036 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 15037 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15038 15039 /* 15040 * The initial mode needs to be set in order to keep 15041 * the atomic core happy. It wants a valid mode if the 15042 * crtc's enabled, so we do the above call. 15043 * 15044 * But we don't set all the derived state fully, hence 15045 * set a flag to indicate that a full recalculation is 15046 * needed on the next commit. 15047 */ 15048 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 15049 15050 intel_crtc_compute_pixel_rate(crtc_state); 15051 15052 if (dev_priv->display.modeset_calc_cdclk) { 15053 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); 15054 if (WARN_ON(min_cdclk < 0)) 15055 min_cdclk = 0; 15056 } 15057 15058 drm_calc_timestamping_constants(&crtc->base, 15059 &crtc_state->base.adjusted_mode); 15060 update_scanline_offset(crtc); 15061 } 15062 15063 dev_priv->min_cdclk[crtc->pipe] = min_cdclk; 15064 15065 intel_pipe_config_sanity_check(dev_priv, crtc_state); 15066 } 15067 } 15068 15069 static void 15070 get_encoder_power_domains(struct drm_i915_private *dev_priv) 15071 { 15072 struct intel_encoder *encoder; 15073 15074 for_each_intel_encoder(&dev_priv->drm, encoder) { 15075 u64 get_domains; 15076 enum intel_display_power_domain domain; 15077 15078 if (!encoder->get_power_domains) 15079 continue; 15080 15081 get_domains = encoder->get_power_domains(encoder); 15082 for_each_power_domain(domain, get_domains) 15083 intel_display_power_get(dev_priv, domain); 15084 } 15085 } 15086 15087 /* Scan out the current hw modeset state, 15088 * and sanitizes it to the current state 15089 */ 15090 static void 15091 intel_modeset_setup_hw_state(struct drm_device *dev, 15092 struct drm_modeset_acquire_ctx *ctx) 15093 { 15094 struct drm_i915_private *dev_priv = to_i915(dev); 15095 enum i915_pipe pipe; 15096 struct intel_crtc *crtc; 15097 struct intel_encoder *encoder; 15098 int i; 15099 15100 if (IS_HASWELL(dev_priv)) { 15101 /* 15102 * WaRsPkgCStateDisplayPMReq:hsw 15103 * System hang if this isn't done before disabling all planes! 15104 */ 15105 I915_WRITE(CHICKEN_PAR1_1, 15106 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES); 15107 } 15108 15109 intel_modeset_readout_hw_state(dev); 15110 15111 /* HW state is read out, now we need to sanitize this mess. */ 15112 get_encoder_power_domains(dev_priv); 15113 15114 intel_sanitize_plane_mapping(dev_priv); 15115 15116 for_each_intel_encoder(dev, encoder) { 15117 intel_sanitize_encoder(encoder); 15118 } 15119 15120 for_each_pipe(dev_priv, pipe) { 15121 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15122 15123 intel_sanitize_crtc(crtc, ctx); 15124 intel_dump_pipe_config(crtc, crtc->config, 15125 "[setup_hw_state]"); 15126 } 15127 15128 intel_modeset_update_connector_atomic_state(dev); 15129 15130 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15131 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15132 15133 if (!pll->on || pll->active_mask) 15134 continue; 15135 15136 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15137 15138 pll->funcs.disable(dev_priv, pll); 15139 pll->on = false; 15140 } 15141 15142 if (IS_G4X(dev_priv)) { 15143 g4x_wm_get_hw_state(dev); 15144 g4x_wm_sanitize(dev_priv); 15145 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15146 vlv_wm_get_hw_state(dev); 15147 vlv_wm_sanitize(dev_priv); 15148 } else if (INTEL_GEN(dev_priv) >= 9) { 15149 skl_wm_get_hw_state(dev); 15150 } else if (HAS_PCH_SPLIT(dev_priv)) { 15151 ilk_wm_get_hw_state(dev); 15152 } 15153 15154 for_each_intel_crtc(dev, crtc) { 15155 u64 put_domains; 15156 15157 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15158 if (WARN_ON(put_domains)) 15159 modeset_put_power_domains(dev_priv, put_domains); 15160 } 15161 intel_display_set_init_power(dev_priv, false); 15162 15163 intel_power_domains_verify_state(dev_priv); 15164 15165 intel_fbc_init_pipe_state(dev_priv); 15166 } 15167 15168 void intel_display_resume(struct drm_device *dev) 15169 { 15170 struct drm_i915_private *dev_priv = to_i915(dev); 15171 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15172 struct drm_modeset_acquire_ctx ctx; 15173 int ret; 15174 15175 dev_priv->modeset_restore_state = NULL; 15176 if (state) 15177 state->acquire_ctx = &ctx; 15178 15179 drm_modeset_acquire_init(&ctx, 0); 15180 15181 while (1) { 15182 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15183 if (ret != -EDEADLK) 15184 break; 15185 15186 drm_modeset_backoff(&ctx); 15187 } 15188 15189 if (!ret) 15190 ret = __intel_display_resume(dev, state, &ctx); 15191 15192 intel_enable_ipc(dev_priv); 15193 drm_modeset_drop_locks(&ctx); 15194 drm_modeset_acquire_fini(&ctx); 15195 15196 if (ret) 15197 DRM_ERROR("Restoring old state failed with %i\n", ret); 15198 if (state) 15199 drm_atomic_state_put(state); 15200 } 15201 15202 void intel_modeset_gem_init(struct drm_device *dev) 15203 { 15204 struct drm_i915_private *dev_priv = to_i915(dev); 15205 15206 intel_init_gt_powersave(dev_priv); 15207 15208 intel_init_clock_gating(dev_priv); 15209 15210 intel_setup_overlay(dev_priv); 15211 } 15212 15213 int intel_connector_register(struct drm_connector *connector) 15214 { 15215 struct intel_connector *intel_connector = to_intel_connector(connector); 15216 int ret; 15217 15218 ret = intel_backlight_device_register(intel_connector); 15219 if (ret) 15220 goto err; 15221 15222 return 0; 15223 15224 err: 15225 return ret; 15226 } 15227 15228 void intel_connector_unregister(struct drm_connector *connector) 15229 { 15230 struct intel_connector *intel_connector = to_intel_connector(connector); 15231 15232 intel_backlight_device_unregister(intel_connector); 15233 intel_panel_destroy_backlight(connector); 15234 } 15235 15236 static void intel_hpd_poll_fini(struct drm_device *dev) 15237 { 15238 struct intel_connector *connector; 15239 struct drm_connector_list_iter conn_iter; 15240 15241 /* First disable polling... */ 15242 drm_kms_helper_poll_fini(dev); 15243 15244 /* Then kill the work that may have been queued by hpd. */ 15245 drm_connector_list_iter_begin(dev, &conn_iter); 15246 for_each_intel_connector_iter(connector, &conn_iter) { 15247 if (connector->modeset_retry_work.func) 15248 cancel_work_sync(&connector->modeset_retry_work); 15249 } 15250 drm_connector_list_iter_end(&conn_iter); 15251 } 15252 15253 void intel_modeset_cleanup(struct drm_device *dev) 15254 { 15255 struct drm_i915_private *dev_priv = to_i915(dev); 15256 15257 flush_work(&dev_priv->atomic_helper.free_work); 15258 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 15259 15260 intel_disable_gt_powersave(dev_priv); 15261 15262 /* 15263 * Interrupts and polling as the first thing to avoid creating havoc. 15264 * Too much stuff here (turning of connectors, ...) would 15265 * experience fancy races otherwise. 15266 */ 15267 intel_irq_uninstall(dev_priv); 15268 15269 /* 15270 * Due to the hpd irq storm handling the hotplug work can re-arm the 15271 * poll handlers. Hence disable polling after hpd handling is shut down. 15272 */ 15273 intel_hpd_poll_fini(dev); 15274 15275 /* poll work can call into fbdev, hence clean that up afterwards */ 15276 intel_fbdev_fini(dev_priv); 15277 15278 intel_unregister_dsm_handler(); 15279 15280 intel_fbc_global_disable(dev_priv); 15281 15282 /* flush any delayed tasks or pending work */ 15283 flush_scheduled_work(); 15284 15285 drm_mode_config_cleanup(dev); 15286 15287 intel_cleanup_overlay(dev_priv); 15288 15289 intel_cleanup_gt_powersave(dev_priv); 15290 15291 intel_teardown_gmbus(dev_priv); 15292 15293 destroy_workqueue(dev_priv->modeset_wq); 15294 } 15295 15296 void intel_connector_attach_encoder(struct intel_connector *connector, 15297 struct intel_encoder *encoder) 15298 { 15299 connector->encoder = encoder; 15300 drm_mode_connector_attach_encoder(&connector->base, 15301 &encoder->base); 15302 } 15303 15304 /* 15305 * set vga decode state - true == enable VGA decode 15306 */ 15307 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 15308 { 15309 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15310 u16 gmch_ctrl; 15311 15312 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15313 DRM_ERROR("failed to read control word\n"); 15314 return -EIO; 15315 } 15316 15317 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15318 return 0; 15319 15320 if (state) 15321 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15322 else 15323 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15324 15325 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15326 DRM_ERROR("failed to write control word\n"); 15327 return -EIO; 15328 } 15329 15330 return 0; 15331 } 15332 15333 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 15334 15335 struct intel_display_error_state { 15336 15337 u32 power_well_driver; 15338 15339 int num_transcoders; 15340 15341 struct intel_cursor_error_state { 15342 u32 control; 15343 u32 position; 15344 u32 base; 15345 u32 size; 15346 } cursor[I915_MAX_PIPES]; 15347 15348 struct intel_pipe_error_state { 15349 bool power_domain_on; 15350 u32 source; 15351 u32 stat; 15352 } pipe[I915_MAX_PIPES]; 15353 15354 struct intel_plane_error_state { 15355 u32 control; 15356 u32 stride; 15357 u32 size; 15358 u32 pos; 15359 u32 addr; 15360 u32 surface; 15361 u32 tile_offset; 15362 } plane[I915_MAX_PIPES]; 15363 15364 struct intel_transcoder_error_state { 15365 bool power_domain_on; 15366 enum transcoder cpu_transcoder; 15367 15368 u32 conf; 15369 15370 u32 htotal; 15371 u32 hblank; 15372 u32 hsync; 15373 u32 vtotal; 15374 u32 vblank; 15375 u32 vsync; 15376 } transcoder[4]; 15377 }; 15378 15379 struct intel_display_error_state * 15380 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 15381 { 15382 struct intel_display_error_state *error; 15383 int transcoders[] = { 15384 TRANSCODER_A, 15385 TRANSCODER_B, 15386 TRANSCODER_C, 15387 TRANSCODER_EDP, 15388 }; 15389 int i; 15390 15391 if (INTEL_INFO(dev_priv)->num_pipes == 0) 15392 return NULL; 15393 15394 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15395 if (error == NULL) 15396 return NULL; 15397 15398 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15399 error->power_well_driver = 15400 I915_READ(HSW_PWR_WELL_CTL_DRIVER(HSW_DISP_PW_GLOBAL)); 15401 15402 for_each_pipe(dev_priv, i) { 15403 error->pipe[i].power_domain_on = 15404 __intel_display_power_is_enabled(dev_priv, 15405 POWER_DOMAIN_PIPE(i)); 15406 if (!error->pipe[i].power_domain_on) 15407 continue; 15408 15409 error->cursor[i].control = I915_READ(CURCNTR(i)); 15410 error->cursor[i].position = I915_READ(CURPOS(i)); 15411 error->cursor[i].base = I915_READ(CURBASE(i)); 15412 15413 error->plane[i].control = I915_READ(DSPCNTR(i)); 15414 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15415 if (INTEL_GEN(dev_priv) <= 3) { 15416 error->plane[i].size = I915_READ(DSPSIZE(i)); 15417 error->plane[i].pos = I915_READ(DSPPOS(i)); 15418 } 15419 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15420 error->plane[i].addr = I915_READ(DSPADDR(i)); 15421 if (INTEL_GEN(dev_priv) >= 4) { 15422 error->plane[i].surface = I915_READ(DSPSURF(i)); 15423 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15424 } 15425 15426 error->pipe[i].source = I915_READ(PIPESRC(i)); 15427 15428 if (HAS_GMCH_DISPLAY(dev_priv)) 15429 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15430 } 15431 15432 /* Note: this does not include DSI transcoders. */ 15433 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 15434 if (HAS_DDI(dev_priv)) 15435 error->num_transcoders++; /* Account for eDP. */ 15436 15437 for (i = 0; i < error->num_transcoders; i++) { 15438 enum transcoder cpu_transcoder = transcoders[i]; 15439 15440 error->transcoder[i].power_domain_on = 15441 __intel_display_power_is_enabled(dev_priv, 15442 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15443 if (!error->transcoder[i].power_domain_on) 15444 continue; 15445 15446 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15447 15448 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15449 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15450 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15451 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15452 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15453 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15454 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15455 } 15456 15457 return error; 15458 } 15459 15460 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15461 15462 void 15463 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15464 struct intel_display_error_state *error) 15465 { 15466 struct drm_i915_private *dev_priv = m->i915; 15467 int i; 15468 15469 if (!error) 15470 return; 15471 15472 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 15473 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15474 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15475 error->power_well_driver); 15476 for_each_pipe(dev_priv, i) { 15477 err_printf(m, "Pipe [%d]:\n", i); 15478 err_printf(m, " Power: %s\n", 15479 onoff(error->pipe[i].power_domain_on)); 15480 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15481 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15482 15483 err_printf(m, "Plane [%d]:\n", i); 15484 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15485 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15486 if (INTEL_GEN(dev_priv) <= 3) { 15487 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15488 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15489 } 15490 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15491 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15492 if (INTEL_GEN(dev_priv) >= 4) { 15493 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15494 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15495 } 15496 15497 err_printf(m, "Cursor [%d]:\n", i); 15498 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15499 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15500 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15501 } 15502 15503 for (i = 0; i < error->num_transcoders; i++) { 15504 err_printf(m, "CPU transcoder: %s\n", 15505 transcoder_name(error->transcoder[i].cpu_transcoder)); 15506 err_printf(m, " Power: %s\n", 15507 onoff(error->transcoder[i].power_domain_on)); 15508 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15509 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15510 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15511 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15512 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15513 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15514 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15515 } 15516 } 15517 15518 #endif 15519