1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/input.h> 30 #include <linux/i2c.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/vgaarb.h> 34 #include <drm/drm_edid.h> 35 #include <drm/drmP.h> 36 #include "intel_drv.h" 37 #include "intel_frontbuffer.h" 38 #include <drm/i915_drm.h> 39 #include "i915_drv.h" 40 #include "i915_gem_clflush.h" 41 #include "intel_dsi.h" 42 #include "i915_trace.h" 43 #include <drm/drm_atomic.h> 44 #include <drm/drm_atomic_helper.h> 45 #include <drm/drm_dp_helper.h> 46 #include <drm/drm_crtc_helper.h> 47 #include <drm/drm_plane_helper.h> 48 #include <drm/drm_rect.h> 49 #include <linux/dma_remapping.h> 50 #include <linux/reservation.h> 51 52 static bool is_mmio_work(struct intel_flip_work *work) 53 { 54 return work->mmio_work.func; 55 } 56 57 /* Primary plane formats for gen <= 3 */ 58 static const uint32_t i8xx_primary_formats[] = { 59 DRM_FORMAT_C8, 60 DRM_FORMAT_RGB565, 61 DRM_FORMAT_XRGB1555, 62 DRM_FORMAT_XRGB8888, 63 }; 64 65 /* Primary plane formats for gen >= 4 */ 66 static const uint32_t i965_primary_formats[] = { 67 DRM_FORMAT_C8, 68 DRM_FORMAT_RGB565, 69 DRM_FORMAT_XRGB8888, 70 DRM_FORMAT_XBGR8888, 71 DRM_FORMAT_XRGB2101010, 72 DRM_FORMAT_XBGR2101010, 73 }; 74 75 static const uint32_t skl_primary_formats[] = { 76 DRM_FORMAT_C8, 77 DRM_FORMAT_RGB565, 78 DRM_FORMAT_XRGB8888, 79 DRM_FORMAT_XBGR8888, 80 DRM_FORMAT_ARGB8888, 81 DRM_FORMAT_ABGR8888, 82 DRM_FORMAT_XRGB2101010, 83 DRM_FORMAT_XBGR2101010, 84 DRM_FORMAT_YUYV, 85 DRM_FORMAT_YVYU, 86 DRM_FORMAT_UYVY, 87 DRM_FORMAT_VYUY, 88 }; 89 90 /* Cursor formats */ 91 static const uint32_t intel_cursor_formats[] = { 92 DRM_FORMAT_ARGB8888, 93 }; 94 95 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 96 struct intel_crtc_state *pipe_config); 97 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 98 struct intel_crtc_state *pipe_config); 99 100 static int intel_framebuffer_init(struct intel_framebuffer *ifb, 101 struct drm_i915_gem_object *obj, 102 struct drm_mode_fb_cmd2 *mode_cmd); 103 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 104 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 105 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 106 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 107 struct intel_link_m_n *m_n, 108 struct intel_link_m_n *m2_n2); 109 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 110 static void haswell_set_pipeconf(struct drm_crtc *crtc); 111 static void haswell_set_pipemisc(struct drm_crtc *crtc); 112 static void vlv_prepare_pll(struct intel_crtc *crtc, 113 const struct intel_crtc_state *pipe_config); 114 static void chv_prepare_pll(struct intel_crtc *crtc, 115 const struct intel_crtc_state *pipe_config); 116 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 117 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 118 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 119 struct intel_crtc_state *crtc_state); 120 static void skylake_pfit_enable(struct intel_crtc *crtc); 121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 122 static void ironlake_pfit_enable(struct intel_crtc *crtc); 123 static void intel_modeset_setup_hw_state(struct drm_device *dev, 124 struct drm_modeset_acquire_ctx *ctx); 125 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 126 127 struct intel_limit { 128 struct { 129 int min, max; 130 } dot, vco, n, m, m1, m2, p, p1; 131 132 struct { 133 int dot_limit; 134 int p2_slow, p2_fast; 135 } p2; 136 }; 137 138 /* returns HPLL frequency in kHz */ 139 int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) 140 { 141 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 142 143 /* Obtain SKU information */ 144 mutex_lock(&dev_priv->sb_lock); 145 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 146 CCK_FUSE_HPLL_FREQ_MASK; 147 mutex_unlock(&dev_priv->sb_lock); 148 149 return vco_freq[hpll_freq] * 1000; 150 } 151 152 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 153 const char *name, u32 reg, int ref_freq) 154 { 155 u32 val; 156 int divider; 157 158 mutex_lock(&dev_priv->sb_lock); 159 val = vlv_cck_read(dev_priv, reg); 160 mutex_unlock(&dev_priv->sb_lock); 161 162 divider = val & CCK_FREQUENCY_VALUES; 163 164 WARN((val & CCK_FREQUENCY_STATUS) != 165 (divider << CCK_FREQUENCY_STATUS_SHIFT), 166 "%s change in progress\n", name); 167 168 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 169 } 170 171 int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 172 const char *name, u32 reg) 173 { 174 if (dev_priv->hpll_freq == 0) 175 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); 176 177 return vlv_get_cck_clock(dev_priv, name, reg, 178 dev_priv->hpll_freq); 179 } 180 181 static void intel_update_czclk(struct drm_i915_private *dev_priv) 182 { 183 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 184 return; 185 186 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 187 CCK_CZ_CLOCK_CONTROL); 188 189 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 190 } 191 192 static inline u32 /* units of 100MHz */ 193 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 194 const struct intel_crtc_state *pipe_config) 195 { 196 if (HAS_DDI(dev_priv)) 197 return pipe_config->port_clock; /* SPLL */ 198 else if (IS_GEN5(dev_priv)) 199 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 200 else 201 return 270000; 202 } 203 204 static const struct intel_limit intel_limits_i8xx_dac = { 205 .dot = { .min = 25000, .max = 350000 }, 206 .vco = { .min = 908000, .max = 1512000 }, 207 .n = { .min = 2, .max = 16 }, 208 .m = { .min = 96, .max = 140 }, 209 .m1 = { .min = 18, .max = 26 }, 210 .m2 = { .min = 6, .max = 16 }, 211 .p = { .min = 4, .max = 128 }, 212 .p1 = { .min = 2, .max = 33 }, 213 .p2 = { .dot_limit = 165000, 214 .p2_slow = 4, .p2_fast = 2 }, 215 }; 216 217 static const struct intel_limit intel_limits_i8xx_dvo = { 218 .dot = { .min = 25000, .max = 350000 }, 219 .vco = { .min = 908000, .max = 1512000 }, 220 .n = { .min = 2, .max = 16 }, 221 .m = { .min = 96, .max = 140 }, 222 .m1 = { .min = 18, .max = 26 }, 223 .m2 = { .min = 6, .max = 16 }, 224 .p = { .min = 4, .max = 128 }, 225 .p1 = { .min = 2, .max = 33 }, 226 .p2 = { .dot_limit = 165000, 227 .p2_slow = 4, .p2_fast = 4 }, 228 }; 229 230 static const struct intel_limit intel_limits_i8xx_lvds = { 231 .dot = { .min = 25000, .max = 350000 }, 232 .vco = { .min = 908000, .max = 1512000 }, 233 .n = { .min = 2, .max = 16 }, 234 .m = { .min = 96, .max = 140 }, 235 .m1 = { .min = 18, .max = 26 }, 236 .m2 = { .min = 6, .max = 16 }, 237 .p = { .min = 4, .max = 128 }, 238 .p1 = { .min = 1, .max = 6 }, 239 .p2 = { .dot_limit = 165000, 240 .p2_slow = 14, .p2_fast = 7 }, 241 }; 242 243 static const struct intel_limit intel_limits_i9xx_sdvo = { 244 .dot = { .min = 20000, .max = 400000 }, 245 .vco = { .min = 1400000, .max = 2800000 }, 246 .n = { .min = 1, .max = 6 }, 247 .m = { .min = 70, .max = 120 }, 248 .m1 = { .min = 8, .max = 18 }, 249 .m2 = { .min = 3, .max = 7 }, 250 .p = { .min = 5, .max = 80 }, 251 .p1 = { .min = 1, .max = 8 }, 252 .p2 = { .dot_limit = 200000, 253 .p2_slow = 10, .p2_fast = 5 }, 254 }; 255 256 static const struct intel_limit intel_limits_i9xx_lvds = { 257 .dot = { .min = 20000, .max = 400000 }, 258 .vco = { .min = 1400000, .max = 2800000 }, 259 .n = { .min = 1, .max = 6 }, 260 .m = { .min = 70, .max = 120 }, 261 .m1 = { .min = 8, .max = 18 }, 262 .m2 = { .min = 3, .max = 7 }, 263 .p = { .min = 7, .max = 98 }, 264 .p1 = { .min = 1, .max = 8 }, 265 .p2 = { .dot_limit = 112000, 266 .p2_slow = 14, .p2_fast = 7 }, 267 }; 268 269 270 static const struct intel_limit intel_limits_g4x_sdvo = { 271 .dot = { .min = 25000, .max = 270000 }, 272 .vco = { .min = 1750000, .max = 3500000}, 273 .n = { .min = 1, .max = 4 }, 274 .m = { .min = 104, .max = 138 }, 275 .m1 = { .min = 17, .max = 23 }, 276 .m2 = { .min = 5, .max = 11 }, 277 .p = { .min = 10, .max = 30 }, 278 .p1 = { .min = 1, .max = 3}, 279 .p2 = { .dot_limit = 270000, 280 .p2_slow = 10, 281 .p2_fast = 10 282 }, 283 }; 284 285 static const struct intel_limit intel_limits_g4x_hdmi = { 286 .dot = { .min = 22000, .max = 400000 }, 287 .vco = { .min = 1750000, .max = 3500000}, 288 .n = { .min = 1, .max = 4 }, 289 .m = { .min = 104, .max = 138 }, 290 .m1 = { .min = 16, .max = 23 }, 291 .m2 = { .min = 5, .max = 11 }, 292 .p = { .min = 5, .max = 80 }, 293 .p1 = { .min = 1, .max = 8}, 294 .p2 = { .dot_limit = 165000, 295 .p2_slow = 10, .p2_fast = 5 }, 296 }; 297 298 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 299 .dot = { .min = 20000, .max = 115000 }, 300 .vco = { .min = 1750000, .max = 3500000 }, 301 .n = { .min = 1, .max = 3 }, 302 .m = { .min = 104, .max = 138 }, 303 .m1 = { .min = 17, .max = 23 }, 304 .m2 = { .min = 5, .max = 11 }, 305 .p = { .min = 28, .max = 112 }, 306 .p1 = { .min = 2, .max = 8 }, 307 .p2 = { .dot_limit = 0, 308 .p2_slow = 14, .p2_fast = 14 309 }, 310 }; 311 312 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 313 .dot = { .min = 80000, .max = 224000 }, 314 .vco = { .min = 1750000, .max = 3500000 }, 315 .n = { .min = 1, .max = 3 }, 316 .m = { .min = 104, .max = 138 }, 317 .m1 = { .min = 17, .max = 23 }, 318 .m2 = { .min = 5, .max = 11 }, 319 .p = { .min = 14, .max = 42 }, 320 .p1 = { .min = 2, .max = 6 }, 321 .p2 = { .dot_limit = 0, 322 .p2_slow = 7, .p2_fast = 7 323 }, 324 }; 325 326 static const struct intel_limit intel_limits_pineview_sdvo = { 327 .dot = { .min = 20000, .max = 400000}, 328 .vco = { .min = 1700000, .max = 3500000 }, 329 /* Pineview's Ncounter is a ring counter */ 330 .n = { .min = 3, .max = 6 }, 331 .m = { .min = 2, .max = 256 }, 332 /* Pineview only has one combined m divider, which we treat as m2. */ 333 .m1 = { .min = 0, .max = 0 }, 334 .m2 = { .min = 0, .max = 254 }, 335 .p = { .min = 5, .max = 80 }, 336 .p1 = { .min = 1, .max = 8 }, 337 .p2 = { .dot_limit = 200000, 338 .p2_slow = 10, .p2_fast = 5 }, 339 }; 340 341 static const struct intel_limit intel_limits_pineview_lvds = { 342 .dot = { .min = 20000, .max = 400000 }, 343 .vco = { .min = 1700000, .max = 3500000 }, 344 .n = { .min = 3, .max = 6 }, 345 .m = { .min = 2, .max = 256 }, 346 .m1 = { .min = 0, .max = 0 }, 347 .m2 = { .min = 0, .max = 254 }, 348 .p = { .min = 7, .max = 112 }, 349 .p1 = { .min = 1, .max = 8 }, 350 .p2 = { .dot_limit = 112000, 351 .p2_slow = 14, .p2_fast = 14 }, 352 }; 353 354 /* Ironlake / Sandybridge 355 * 356 * We calculate clock using (register_value + 2) for N/M1/M2, so here 357 * the range value for them is (actual_value - 2). 358 */ 359 static const struct intel_limit intel_limits_ironlake_dac = { 360 .dot = { .min = 25000, .max = 350000 }, 361 .vco = { .min = 1760000, .max = 3510000 }, 362 .n = { .min = 1, .max = 5 }, 363 .m = { .min = 79, .max = 127 }, 364 .m1 = { .min = 12, .max = 22 }, 365 .m2 = { .min = 5, .max = 9 }, 366 .p = { .min = 5, .max = 80 }, 367 .p1 = { .min = 1, .max = 8 }, 368 .p2 = { .dot_limit = 225000, 369 .p2_slow = 10, .p2_fast = 5 }, 370 }; 371 372 static const struct intel_limit intel_limits_ironlake_single_lvds = { 373 .dot = { .min = 25000, .max = 350000 }, 374 .vco = { .min = 1760000, .max = 3510000 }, 375 .n = { .min = 1, .max = 3 }, 376 .m = { .min = 79, .max = 118 }, 377 .m1 = { .min = 12, .max = 22 }, 378 .m2 = { .min = 5, .max = 9 }, 379 .p = { .min = 28, .max = 112 }, 380 .p1 = { .min = 2, .max = 8 }, 381 .p2 = { .dot_limit = 225000, 382 .p2_slow = 14, .p2_fast = 14 }, 383 }; 384 385 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 386 .dot = { .min = 25000, .max = 350000 }, 387 .vco = { .min = 1760000, .max = 3510000 }, 388 .n = { .min = 1, .max = 3 }, 389 .m = { .min = 79, .max = 127 }, 390 .m1 = { .min = 12, .max = 22 }, 391 .m2 = { .min = 5, .max = 9 }, 392 .p = { .min = 14, .max = 56 }, 393 .p1 = { .min = 2, .max = 8 }, 394 .p2 = { .dot_limit = 225000, 395 .p2_slow = 7, .p2_fast = 7 }, 396 }; 397 398 /* LVDS 100mhz refclk limits. */ 399 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 400 .dot = { .min = 25000, .max = 350000 }, 401 .vco = { .min = 1760000, .max = 3510000 }, 402 .n = { .min = 1, .max = 2 }, 403 .m = { .min = 79, .max = 126 }, 404 .m1 = { .min = 12, .max = 22 }, 405 .m2 = { .min = 5, .max = 9 }, 406 .p = { .min = 28, .max = 112 }, 407 .p1 = { .min = 2, .max = 8 }, 408 .p2 = { .dot_limit = 225000, 409 .p2_slow = 14, .p2_fast = 14 }, 410 }; 411 412 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 413 .dot = { .min = 25000, .max = 350000 }, 414 .vco = { .min = 1760000, .max = 3510000 }, 415 .n = { .min = 1, .max = 3 }, 416 .m = { .min = 79, .max = 126 }, 417 .m1 = { .min = 12, .max = 22 }, 418 .m2 = { .min = 5, .max = 9 }, 419 .p = { .min = 14, .max = 42 }, 420 .p1 = { .min = 2, .max = 6 }, 421 .p2 = { .dot_limit = 225000, 422 .p2_slow = 7, .p2_fast = 7 }, 423 }; 424 425 static const struct intel_limit intel_limits_vlv = { 426 /* 427 * These are the data rate limits (measured in fast clocks) 428 * since those are the strictest limits we have. The fast 429 * clock and actual rate limits are more relaxed, so checking 430 * them would make no difference. 431 */ 432 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 433 .vco = { .min = 4000000, .max = 6000000 }, 434 .n = { .min = 1, .max = 7 }, 435 .m1 = { .min = 2, .max = 3 }, 436 .m2 = { .min = 11, .max = 156 }, 437 .p1 = { .min = 2, .max = 3 }, 438 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 439 }; 440 441 static const struct intel_limit intel_limits_chv = { 442 /* 443 * These are the data rate limits (measured in fast clocks) 444 * since those are the strictest limits we have. The fast 445 * clock and actual rate limits are more relaxed, so checking 446 * them would make no difference. 447 */ 448 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 449 .vco = { .min = 4800000, .max = 6480000 }, 450 .n = { .min = 1, .max = 1 }, 451 .m1 = { .min = 2, .max = 2 }, 452 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 453 .p1 = { .min = 2, .max = 4 }, 454 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 455 }; 456 457 static const struct intel_limit intel_limits_bxt = { 458 /* FIXME: find real dot limits */ 459 .dot = { .min = 0, .max = INT_MAX }, 460 .vco = { .min = 4800000, .max = 6700000 }, 461 .n = { .min = 1, .max = 1 }, 462 .m1 = { .min = 2, .max = 2 }, 463 /* FIXME: find real m2 limits */ 464 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 465 .p1 = { .min = 2, .max = 4 }, 466 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 467 }; 468 469 static bool 470 needs_modeset(struct drm_crtc_state *state) 471 { 472 return drm_atomic_crtc_needs_modeset(state); 473 } 474 475 /* 476 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 477 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 478 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 479 * The helpers' return value is the rate of the clock that is fed to the 480 * display engine's pipe which can be the above fast dot clock rate or a 481 * divided-down version of it. 482 */ 483 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 484 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 485 { 486 clock->m = clock->m2 + 2; 487 clock->p = clock->p1 * clock->p2; 488 if (WARN_ON(clock->n == 0 || clock->p == 0)) 489 return 0; 490 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 491 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 492 493 return clock->dot; 494 } 495 496 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 497 { 498 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 499 } 500 501 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 502 { 503 clock->m = i9xx_dpll_compute_m(clock); 504 clock->p = clock->p1 * clock->p2; 505 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 506 return 0; 507 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 508 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 509 510 return clock->dot; 511 } 512 513 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 514 { 515 clock->m = clock->m1 * clock->m2; 516 clock->p = clock->p1 * clock->p2; 517 if (WARN_ON(clock->n == 0 || clock->p == 0)) 518 return 0; 519 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 520 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 521 522 return clock->dot / 5; 523 } 524 525 int chv_calc_dpll_params(int refclk, struct dpll *clock) 526 { 527 clock->m = clock->m1 * clock->m2; 528 clock->p = clock->p1 * clock->p2; 529 if (WARN_ON(clock->n == 0 || clock->p == 0)) 530 return 0; 531 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 532 clock->n << 22); 533 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 534 535 return clock->dot / 5; 536 } 537 538 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 539 /** 540 * Returns whether the given set of divisors are valid for a given refclk with 541 * the given connectors. 542 */ 543 544 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 545 const struct intel_limit *limit, 546 const struct dpll *clock) 547 { 548 if (clock->n < limit->n.min || limit->n.max < clock->n) 549 INTELPllInvalid("n out of range\n"); 550 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 551 INTELPllInvalid("p1 out of range\n"); 552 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 553 INTELPllInvalid("m2 out of range\n"); 554 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 555 INTELPllInvalid("m1 out of range\n"); 556 557 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) && 558 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv)) 559 if (clock->m1 <= clock->m2) 560 INTELPllInvalid("m1 <= m2\n"); 561 562 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 563 !IS_GEN9_LP(dev_priv)) { 564 if (clock->p < limit->p.min || limit->p.max < clock->p) 565 INTELPllInvalid("p out of range\n"); 566 if (clock->m < limit->m.min || limit->m.max < clock->m) 567 INTELPllInvalid("m out of range\n"); 568 } 569 570 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 571 INTELPllInvalid("vco out of range\n"); 572 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 573 * connector, etc., rather than just a single range. 574 */ 575 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 576 INTELPllInvalid("dot out of range\n"); 577 578 return true; 579 } 580 581 static int 582 i9xx_select_p2_div(const struct intel_limit *limit, 583 const struct intel_crtc_state *crtc_state, 584 int target) 585 { 586 struct drm_device *dev = crtc_state->base.crtc->dev; 587 588 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 589 /* 590 * For LVDS just rely on its current settings for dual-channel. 591 * We haven't figured out how to reliably set up different 592 * single/dual channel state, if we even can. 593 */ 594 if (intel_is_dual_link_lvds(dev)) 595 return limit->p2.p2_fast; 596 else 597 return limit->p2.p2_slow; 598 } else { 599 if (target < limit->p2.dot_limit) 600 return limit->p2.p2_slow; 601 else 602 return limit->p2.p2_fast; 603 } 604 } 605 606 /* 607 * Returns a set of divisors for the desired target clock with the given 608 * refclk, or FALSE. The returned values represent the clock equation: 609 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 610 * 611 * Target and reference clocks are specified in kHz. 612 * 613 * If match_clock is provided, then best_clock P divider must match the P 614 * divider from @match_clock used for LVDS downclocking. 615 */ 616 static bool 617 i9xx_find_best_dpll(const struct intel_limit *limit, 618 struct intel_crtc_state *crtc_state, 619 int target, int refclk, struct dpll *match_clock, 620 struct dpll *best_clock) 621 { 622 struct drm_device *dev = crtc_state->base.crtc->dev; 623 struct dpll clock; 624 int err = target; 625 626 memset(best_clock, 0, sizeof(*best_clock)); 627 628 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 629 630 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 631 clock.m1++) { 632 for (clock.m2 = limit->m2.min; 633 clock.m2 <= limit->m2.max; clock.m2++) { 634 if (clock.m2 >= clock.m1) 635 break; 636 for (clock.n = limit->n.min; 637 clock.n <= limit->n.max; clock.n++) { 638 for (clock.p1 = limit->p1.min; 639 clock.p1 <= limit->p1.max; clock.p1++) { 640 int this_err; 641 642 i9xx_calc_dpll_params(refclk, &clock); 643 if (!intel_PLL_is_valid(to_i915(dev), 644 limit, 645 &clock)) 646 continue; 647 if (match_clock && 648 clock.p != match_clock->p) 649 continue; 650 651 this_err = abs(clock.dot - target); 652 if (this_err < err) { 653 *best_clock = clock; 654 err = this_err; 655 } 656 } 657 } 658 } 659 } 660 661 return (err != target); 662 } 663 664 /* 665 * Returns a set of divisors for the desired target clock with the given 666 * refclk, or FALSE. The returned values represent the clock equation: 667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 668 * 669 * Target and reference clocks are specified in kHz. 670 * 671 * If match_clock is provided, then best_clock P divider must match the P 672 * divider from @match_clock used for LVDS downclocking. 673 */ 674 static bool 675 pnv_find_best_dpll(const struct intel_limit *limit, 676 struct intel_crtc_state *crtc_state, 677 int target, int refclk, struct dpll *match_clock, 678 struct dpll *best_clock) 679 { 680 struct drm_device *dev = crtc_state->base.crtc->dev; 681 struct dpll clock; 682 int err = target; 683 684 memset(best_clock, 0, sizeof(*best_clock)); 685 686 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 687 688 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 689 clock.m1++) { 690 for (clock.m2 = limit->m2.min; 691 clock.m2 <= limit->m2.max; clock.m2++) { 692 for (clock.n = limit->n.min; 693 clock.n <= limit->n.max; clock.n++) { 694 for (clock.p1 = limit->p1.min; 695 clock.p1 <= limit->p1.max; clock.p1++) { 696 int this_err; 697 698 pnv_calc_dpll_params(refclk, &clock); 699 if (!intel_PLL_is_valid(to_i915(dev), 700 limit, 701 &clock)) 702 continue; 703 if (match_clock && 704 clock.p != match_clock->p) 705 continue; 706 707 this_err = abs(clock.dot - target); 708 if (this_err < err) { 709 *best_clock = clock; 710 err = this_err; 711 } 712 } 713 } 714 } 715 } 716 717 return (err != target); 718 } 719 720 /* 721 * Returns a set of divisors for the desired target clock with the given 722 * refclk, or FALSE. The returned values represent the clock equation: 723 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 724 * 725 * Target and reference clocks are specified in kHz. 726 * 727 * If match_clock is provided, then best_clock P divider must match the P 728 * divider from @match_clock used for LVDS downclocking. 729 */ 730 static bool 731 g4x_find_best_dpll(const struct intel_limit *limit, 732 struct intel_crtc_state *crtc_state, 733 int target, int refclk, struct dpll *match_clock, 734 struct dpll *best_clock) 735 { 736 struct drm_device *dev = crtc_state->base.crtc->dev; 737 struct dpll clock; 738 int max_n; 739 bool found = false; 740 /* approximately equals target * 0.00585 */ 741 int err_most = (target >> 8) + (target >> 9); 742 743 memset(best_clock, 0, sizeof(*best_clock)); 744 745 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 746 747 max_n = limit->n.max; 748 /* based on hardware requirement, prefer smaller n to precision */ 749 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 750 /* based on hardware requirement, prefere larger m1,m2 */ 751 for (clock.m1 = limit->m1.max; 752 clock.m1 >= limit->m1.min; clock.m1--) { 753 for (clock.m2 = limit->m2.max; 754 clock.m2 >= limit->m2.min; clock.m2--) { 755 for (clock.p1 = limit->p1.max; 756 clock.p1 >= limit->p1.min; clock.p1--) { 757 int this_err; 758 759 i9xx_calc_dpll_params(refclk, &clock); 760 if (!intel_PLL_is_valid(to_i915(dev), 761 limit, 762 &clock)) 763 continue; 764 765 this_err = abs(clock.dot - target); 766 if (this_err < err_most) { 767 *best_clock = clock; 768 err_most = this_err; 769 max_n = clock.n; 770 found = true; 771 } 772 } 773 } 774 } 775 } 776 return found; 777 } 778 779 /* 780 * Check if the calculated PLL configuration is more optimal compared to the 781 * best configuration and error found so far. Return the calculated error. 782 */ 783 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 784 const struct dpll *calculated_clock, 785 const struct dpll *best_clock, 786 unsigned int best_error_ppm, 787 unsigned int *error_ppm) 788 { 789 /* 790 * For CHV ignore the error and consider only the P value. 791 * Prefer a bigger P value based on HW requirements. 792 */ 793 if (IS_CHERRYVIEW(to_i915(dev))) { 794 *error_ppm = 0; 795 796 return calculated_clock->p > best_clock->p; 797 } 798 799 if (WARN_ON_ONCE(!target_freq)) 800 return false; 801 802 *error_ppm = div_u64(1000000ULL * 803 abs(target_freq - calculated_clock->dot), 804 target_freq); 805 /* 806 * Prefer a better P value over a better (smaller) error if the error 807 * is small. Ensure this preference for future configurations too by 808 * setting the error to 0. 809 */ 810 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 811 *error_ppm = 0; 812 813 return true; 814 } 815 816 return *error_ppm + 10 < best_error_ppm; 817 } 818 819 /* 820 * Returns a set of divisors for the desired target clock with the given 821 * refclk, or FALSE. The returned values represent the clock equation: 822 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 823 */ 824 static bool 825 vlv_find_best_dpll(const struct intel_limit *limit, 826 struct intel_crtc_state *crtc_state, 827 int target, int refclk, struct dpll *match_clock, 828 struct dpll *best_clock) 829 { 830 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 831 struct drm_device *dev = crtc->base.dev; 832 struct dpll clock; 833 unsigned int bestppm = 1000000; 834 /* min update 19.2 MHz */ 835 int max_n = min(limit->n.max, refclk / 19200); 836 bool found = false; 837 838 target *= 5; /* fast clock */ 839 840 memset(best_clock, 0, sizeof(*best_clock)); 841 842 /* based on hardware requirement, prefer smaller n to precision */ 843 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 844 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 845 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 846 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 847 clock.p = clock.p1 * clock.p2; 848 /* based on hardware requirement, prefer bigger m1,m2 values */ 849 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 850 unsigned int ppm; 851 852 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 853 refclk * clock.m1); 854 855 vlv_calc_dpll_params(refclk, &clock); 856 857 if (!intel_PLL_is_valid(to_i915(dev), 858 limit, 859 &clock)) 860 continue; 861 862 if (!vlv_PLL_is_optimal(dev, target, 863 &clock, 864 best_clock, 865 bestppm, &ppm)) 866 continue; 867 868 *best_clock = clock; 869 bestppm = ppm; 870 found = true; 871 } 872 } 873 } 874 } 875 876 return found; 877 } 878 879 /* 880 * Returns a set of divisors for the desired target clock with the given 881 * refclk, or FALSE. The returned values represent the clock equation: 882 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 883 */ 884 static bool 885 chv_find_best_dpll(const struct intel_limit *limit, 886 struct intel_crtc_state *crtc_state, 887 int target, int refclk, struct dpll *match_clock, 888 struct dpll *best_clock) 889 { 890 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 891 struct drm_device *dev = crtc->base.dev; 892 unsigned int best_error_ppm; 893 struct dpll clock; 894 uint64_t m2; 895 int found = false; 896 897 memset(best_clock, 0, sizeof(*best_clock)); 898 best_error_ppm = 1000000; 899 900 /* 901 * Based on hardware doc, the n always set to 1, and m1 always 902 * set to 2. If requires to support 200Mhz refclk, we need to 903 * revisit this because n may not 1 anymore. 904 */ 905 clock.n = 1, clock.m1 = 2; 906 target *= 5; /* fast clock */ 907 908 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 909 for (clock.p2 = limit->p2.p2_fast; 910 clock.p2 >= limit->p2.p2_slow; 911 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 912 unsigned int error_ppm; 913 914 clock.p = clock.p1 * clock.p2; 915 916 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 917 clock.n) << 22, refclk * clock.m1); 918 919 if (m2 > INT_MAX/clock.m1) 920 continue; 921 922 clock.m2 = m2; 923 924 chv_calc_dpll_params(refclk, &clock); 925 926 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock)) 927 continue; 928 929 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 930 best_error_ppm, &error_ppm)) 931 continue; 932 933 *best_clock = clock; 934 best_error_ppm = error_ppm; 935 found = true; 936 } 937 } 938 939 return found; 940 } 941 942 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 943 struct dpll *best_clock) 944 { 945 int refclk = 100000; 946 const struct intel_limit *limit = &intel_limits_bxt; 947 948 return chv_find_best_dpll(limit, crtc_state, 949 target_clock, refclk, NULL, best_clock); 950 } 951 952 bool intel_crtc_active(struct intel_crtc *crtc) 953 { 954 /* Be paranoid as we can arrive here with only partial 955 * state retrieved from the hardware during setup. 956 * 957 * We can ditch the adjusted_mode.crtc_clock check as soon 958 * as Haswell has gained clock readout/fastboot support. 959 * 960 * We can ditch the crtc->primary->fb check as soon as we can 961 * properly reconstruct framebuffers. 962 * 963 * FIXME: The intel_crtc->active here should be switched to 964 * crtc->state->active once we have proper CRTC states wired up 965 * for atomic. 966 */ 967 return crtc->active && crtc->base.primary->state->fb && 968 crtc->config->base.adjusted_mode.crtc_clock; 969 } 970 971 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 972 enum i915_pipe pipe) 973 { 974 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 975 976 return crtc->config->cpu_transcoder; 977 } 978 979 static bool pipe_dsl_stopped(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 980 { 981 i915_reg_t reg = PIPEDSL(pipe); 982 u32 line1, line2; 983 u32 line_mask; 984 985 if (IS_GEN2(dev_priv)) 986 line_mask = DSL_LINEMASK_GEN2; 987 else 988 line_mask = DSL_LINEMASK_GEN3; 989 990 line1 = I915_READ(reg) & line_mask; 991 msleep(5); 992 line2 = I915_READ(reg) & line_mask; 993 994 return line1 == line2; 995 } 996 997 /* 998 * intel_wait_for_pipe_off - wait for pipe to turn off 999 * @crtc: crtc whose pipe to wait for 1000 * 1001 * After disabling a pipe, we can't wait for vblank in the usual way, 1002 * spinning on the vblank interrupt status bit, since we won't actually 1003 * see an interrupt when the pipe is disabled. 1004 * 1005 * On Gen4 and above: 1006 * wait for the pipe register state bit to turn off 1007 * 1008 * Otherwise: 1009 * wait for the display line value to settle (it usually 1010 * ends up stopping at the start of the next frame). 1011 * 1012 */ 1013 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1014 { 1015 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1016 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1017 enum i915_pipe pipe = crtc->pipe; 1018 1019 if (INTEL_GEN(dev_priv) >= 4) { 1020 i915_reg_t reg = PIPECONF(cpu_transcoder); 1021 1022 /* Wait for the Pipe State to go off */ 1023 if (intel_wait_for_register(dev_priv, 1024 reg, I965_PIPECONF_ACTIVE, 0, 1025 100)) 1026 WARN(1, "pipe_off wait timed out\n"); 1027 } else { 1028 /* Wait for the display line to settle */ 1029 if (wait_for(pipe_dsl_stopped(dev_priv, pipe), 100)) 1030 WARN(1, "pipe_off wait timed out\n"); 1031 } 1032 } 1033 1034 /* Only for pre-ILK configs */ 1035 void assert_pll(struct drm_i915_private *dev_priv, 1036 enum i915_pipe pipe, bool state) 1037 { 1038 u32 val; 1039 bool cur_state; 1040 1041 val = I915_READ(DPLL(pipe)); 1042 cur_state = !!(val & DPLL_VCO_ENABLE); 1043 I915_STATE_WARN(cur_state != state, 1044 "PLL state assertion failure (expected %s, current %s)\n", 1045 onoff(state), onoff(cur_state)); 1046 } 1047 1048 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1049 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1050 { 1051 u32 val; 1052 bool cur_state; 1053 1054 mutex_lock(&dev_priv->sb_lock); 1055 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1056 mutex_unlock(&dev_priv->sb_lock); 1057 1058 cur_state = val & DSI_PLL_VCO_EN; 1059 I915_STATE_WARN(cur_state != state, 1060 "DSI PLL state assertion failure (expected %s, current %s)\n", 1061 onoff(state), onoff(cur_state)); 1062 } 1063 1064 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1065 enum i915_pipe pipe, bool state) 1066 { 1067 bool cur_state; 1068 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1069 pipe); 1070 1071 if (HAS_DDI(dev_priv)) { 1072 /* DDI does not have a specific FDI_TX register */ 1073 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1074 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1075 } else { 1076 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1077 cur_state = !!(val & FDI_TX_ENABLE); 1078 } 1079 I915_STATE_WARN(cur_state != state, 1080 "FDI TX state assertion failure (expected %s, current %s)\n", 1081 onoff(state), onoff(cur_state)); 1082 } 1083 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1084 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1085 1086 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1087 enum i915_pipe pipe, bool state) 1088 { 1089 u32 val; 1090 bool cur_state; 1091 1092 val = I915_READ(FDI_RX_CTL(pipe)); 1093 cur_state = !!(val & FDI_RX_ENABLE); 1094 I915_STATE_WARN(cur_state != state, 1095 "FDI RX state assertion failure (expected %s, current %s)\n", 1096 onoff(state), onoff(cur_state)); 1097 } 1098 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1099 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1100 1101 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1102 enum i915_pipe pipe) 1103 { 1104 u32 val; 1105 1106 /* ILK FDI PLL is always enabled */ 1107 if (IS_GEN5(dev_priv)) 1108 return; 1109 1110 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1111 if (HAS_DDI(dev_priv)) 1112 return; 1113 1114 val = I915_READ(FDI_TX_CTL(pipe)); 1115 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1116 } 1117 1118 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1119 enum i915_pipe pipe, bool state) 1120 { 1121 u32 val; 1122 bool cur_state; 1123 1124 val = I915_READ(FDI_RX_CTL(pipe)); 1125 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1126 I915_STATE_WARN(cur_state != state, 1127 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1128 onoff(state), onoff(cur_state)); 1129 } 1130 1131 void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1132 { 1133 i915_reg_t pp_reg; 1134 u32 val; 1135 enum i915_pipe panel_pipe = PIPE_A; 1136 bool locked = true; 1137 1138 if (WARN_ON(HAS_DDI(dev_priv))) 1139 return; 1140 1141 if (HAS_PCH_SPLIT(dev_priv)) { 1142 u32 port_sel; 1143 1144 pp_reg = PP_CONTROL(0); 1145 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK; 1146 1147 if (port_sel == PANEL_PORT_SELECT_LVDS && 1148 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1149 panel_pipe = PIPE_B; 1150 /* XXX: else fix for eDP */ 1151 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1152 /* presumably write lock depends on pipe, not port select */ 1153 pp_reg = PP_CONTROL(pipe); 1154 panel_pipe = pipe; 1155 } else { 1156 pp_reg = PP_CONTROL(0); 1157 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1158 panel_pipe = PIPE_B; 1159 } 1160 1161 val = I915_READ(pp_reg); 1162 if (!(val & PANEL_POWER_ON) || 1163 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1164 locked = false; 1165 1166 I915_STATE_WARN(panel_pipe == pipe && locked, 1167 "panel assertion failure, pipe %c regs locked\n", 1168 pipe_name(pipe)); 1169 } 1170 1171 static void assert_cursor(struct drm_i915_private *dev_priv, 1172 enum i915_pipe pipe, bool state) 1173 { 1174 bool cur_state; 1175 1176 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 1177 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1178 else 1179 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1180 1181 I915_STATE_WARN(cur_state != state, 1182 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1183 pipe_name(pipe), onoff(state), onoff(cur_state)); 1184 } 1185 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1186 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1187 1188 void assert_pipe(struct drm_i915_private *dev_priv, 1189 enum i915_pipe pipe, bool state) 1190 { 1191 bool cur_state; 1192 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1193 pipe); 1194 enum intel_display_power_domain power_domain; 1195 1196 /* if we need the pipe quirk it must be always on */ 1197 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1198 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1199 state = true; 1200 1201 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1202 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1203 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1204 cur_state = !!(val & PIPECONF_ENABLE); 1205 1206 intel_display_power_put(dev_priv, power_domain); 1207 } else { 1208 cur_state = false; 1209 } 1210 1211 I915_STATE_WARN(cur_state != state, 1212 "pipe %c assertion failure (expected %s, current %s)\n", 1213 pipe_name(pipe), onoff(state), onoff(cur_state)); 1214 } 1215 1216 static void assert_plane(struct drm_i915_private *dev_priv, 1217 enum plane plane, bool state) 1218 { 1219 u32 val; 1220 bool cur_state; 1221 1222 val = I915_READ(DSPCNTR(plane)); 1223 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1224 I915_STATE_WARN(cur_state != state, 1225 "plane %c assertion failure (expected %s, current %s)\n", 1226 plane_name(plane), onoff(state), onoff(cur_state)); 1227 } 1228 1229 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1230 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1231 1232 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1233 enum i915_pipe pipe) 1234 { 1235 int i; 1236 1237 /* Primary planes are fixed to pipes on gen4+ */ 1238 if (INTEL_GEN(dev_priv) >= 4) { 1239 u32 val = I915_READ(DSPCNTR(pipe)); 1240 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1241 "plane %c assertion failure, should be disabled but not\n", 1242 plane_name(pipe)); 1243 return; 1244 } 1245 1246 /* Need to check both planes against the pipe */ 1247 for_each_pipe(dev_priv, i) { 1248 u32 val = I915_READ(DSPCNTR(i)); 1249 enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1250 DISPPLANE_SEL_PIPE_SHIFT; 1251 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1252 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1253 plane_name(i), pipe_name(pipe)); 1254 } 1255 } 1256 1257 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1258 enum i915_pipe pipe) 1259 { 1260 int sprite; 1261 1262 if (INTEL_GEN(dev_priv) >= 9) { 1263 for_each_sprite(dev_priv, pipe, sprite) { 1264 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1265 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1266 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1267 sprite, pipe_name(pipe)); 1268 } 1269 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 1270 for_each_sprite(dev_priv, pipe, sprite) { 1271 u32 val = I915_READ(SPCNTR(pipe, PLANE_SPRITE0 + sprite)); 1272 I915_STATE_WARN(val & SP_ENABLE, 1273 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1274 sprite_name(pipe, sprite), pipe_name(pipe)); 1275 } 1276 } else if (INTEL_GEN(dev_priv) >= 7) { 1277 u32 val = I915_READ(SPRCTL(pipe)); 1278 I915_STATE_WARN(val & SPRITE_ENABLE, 1279 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1280 plane_name(pipe), pipe_name(pipe)); 1281 } else if (INTEL_GEN(dev_priv) >= 5) { 1282 u32 val = I915_READ(DVSCNTR(pipe)); 1283 I915_STATE_WARN(val & DVS_ENABLE, 1284 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1285 plane_name(pipe), pipe_name(pipe)); 1286 } 1287 } 1288 1289 static void assert_vblank_disabled(struct drm_crtc *crtc) 1290 { 1291 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1292 drm_crtc_vblank_put(crtc); 1293 } 1294 1295 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1296 enum i915_pipe pipe) 1297 { 1298 u32 val; 1299 bool enabled; 1300 1301 val = I915_READ(PCH_TRANSCONF(pipe)); 1302 enabled = !!(val & TRANS_ENABLE); 1303 I915_STATE_WARN(enabled, 1304 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1305 pipe_name(pipe)); 1306 } 1307 1308 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1309 enum i915_pipe pipe, u32 port_sel, u32 val) 1310 { 1311 if ((val & DP_PORT_EN) == 0) 1312 return false; 1313 1314 if (HAS_PCH_CPT(dev_priv)) { 1315 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1316 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1317 return false; 1318 } else if (IS_CHERRYVIEW(dev_priv)) { 1319 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1320 return false; 1321 } else { 1322 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1323 return false; 1324 } 1325 return true; 1326 } 1327 1328 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1329 enum i915_pipe pipe, u32 val) 1330 { 1331 if ((val & SDVO_ENABLE) == 0) 1332 return false; 1333 1334 if (HAS_PCH_CPT(dev_priv)) { 1335 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1336 return false; 1337 } else if (IS_CHERRYVIEW(dev_priv)) { 1338 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1339 return false; 1340 } else { 1341 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1342 return false; 1343 } 1344 return true; 1345 } 1346 1347 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1348 enum i915_pipe pipe, u32 val) 1349 { 1350 if ((val & LVDS_PORT_EN) == 0) 1351 return false; 1352 1353 if (HAS_PCH_CPT(dev_priv)) { 1354 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1355 return false; 1356 } else { 1357 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1358 return false; 1359 } 1360 return true; 1361 } 1362 1363 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1364 enum i915_pipe pipe, u32 val) 1365 { 1366 if ((val & ADPA_DAC_ENABLE) == 0) 1367 return false; 1368 if (HAS_PCH_CPT(dev_priv)) { 1369 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1370 return false; 1371 } else { 1372 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1373 return false; 1374 } 1375 return true; 1376 } 1377 1378 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1379 enum i915_pipe pipe, i915_reg_t reg, 1380 u32 port_sel) 1381 { 1382 u32 val = I915_READ(reg); 1383 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1384 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1385 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1386 1387 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1388 && (val & DP_PIPEB_SELECT), 1389 "IBX PCH dp port still using transcoder B\n"); 1390 } 1391 1392 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1393 enum i915_pipe pipe, i915_reg_t reg) 1394 { 1395 u32 val = I915_READ(reg); 1396 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1397 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1398 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1399 1400 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1401 && (val & SDVO_PIPE_B_SELECT), 1402 "IBX PCH hdmi port still using transcoder B\n"); 1403 } 1404 1405 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1406 enum i915_pipe pipe) 1407 { 1408 u32 val; 1409 1410 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1411 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1412 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1413 1414 val = I915_READ(PCH_ADPA); 1415 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1416 "PCH VGA enabled on transcoder %c, should be disabled\n", 1417 pipe_name(pipe)); 1418 1419 val = I915_READ(PCH_LVDS); 1420 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1421 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1422 pipe_name(pipe)); 1423 1424 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1425 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1426 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1427 } 1428 1429 static void _vlv_enable_pll(struct intel_crtc *crtc, 1430 const struct intel_crtc_state *pipe_config) 1431 { 1432 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1433 enum i915_pipe pipe = crtc->pipe; 1434 1435 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1436 POSTING_READ(DPLL(pipe)); 1437 udelay(150); 1438 1439 if (intel_wait_for_register(dev_priv, 1440 DPLL(pipe), 1441 DPLL_LOCK_VLV, 1442 DPLL_LOCK_VLV, 1443 1)) 1444 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1445 } 1446 1447 static void vlv_enable_pll(struct intel_crtc *crtc, 1448 const struct intel_crtc_state *pipe_config) 1449 { 1450 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1451 enum i915_pipe pipe = crtc->pipe; 1452 1453 assert_pipe_disabled(dev_priv, pipe); 1454 1455 /* PLL is protected by panel, make sure we can write it */ 1456 assert_panel_unlocked(dev_priv, pipe); 1457 1458 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1459 _vlv_enable_pll(crtc, pipe_config); 1460 1461 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1462 POSTING_READ(DPLL_MD(pipe)); 1463 } 1464 1465 1466 static void _chv_enable_pll(struct intel_crtc *crtc, 1467 const struct intel_crtc_state *pipe_config) 1468 { 1469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1470 enum i915_pipe pipe = crtc->pipe; 1471 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1472 u32 tmp; 1473 1474 mutex_lock(&dev_priv->sb_lock); 1475 1476 /* Enable back the 10bit clock to display controller */ 1477 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1478 tmp |= DPIO_DCLKP_EN; 1479 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1480 1481 mutex_unlock(&dev_priv->sb_lock); 1482 1483 /* 1484 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1485 */ 1486 udelay(1); 1487 1488 /* Enable PLL */ 1489 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1490 1491 /* Check PLL is locked */ 1492 if (intel_wait_for_register(dev_priv, 1493 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1494 1)) 1495 DRM_ERROR("PLL %d failed to lock\n", pipe); 1496 } 1497 1498 static void chv_enable_pll(struct intel_crtc *crtc, 1499 const struct intel_crtc_state *pipe_config) 1500 { 1501 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1502 enum i915_pipe pipe = crtc->pipe; 1503 1504 assert_pipe_disabled(dev_priv, pipe); 1505 1506 /* PLL is protected by panel, make sure we can write it */ 1507 assert_panel_unlocked(dev_priv, pipe); 1508 1509 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1510 _chv_enable_pll(crtc, pipe_config); 1511 1512 if (pipe != PIPE_A) { 1513 /* 1514 * WaPixelRepeatModeFixForC0:chv 1515 * 1516 * DPLLCMD is AWOL. Use chicken bits to propagate 1517 * the value from DPLLBMD to either pipe B or C. 1518 */ 1519 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); 1520 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1521 I915_WRITE(CBR4_VLV, 0); 1522 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1523 1524 /* 1525 * DPLLB VGA mode also seems to cause problems. 1526 * We should always have it disabled. 1527 */ 1528 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1529 } else { 1530 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1531 POSTING_READ(DPLL_MD(pipe)); 1532 } 1533 } 1534 1535 static int intel_num_dvo_pipes(struct drm_i915_private *dev_priv) 1536 { 1537 struct intel_crtc *crtc; 1538 int count = 0; 1539 1540 for_each_intel_crtc(&dev_priv->drm, crtc) { 1541 count += crtc->base.state->active && 1542 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1543 } 1544 1545 return count; 1546 } 1547 1548 static void i9xx_enable_pll(struct intel_crtc *crtc) 1549 { 1550 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1551 i915_reg_t reg = DPLL(crtc->pipe); 1552 u32 dpll = crtc->config->dpll_hw_state.dpll; 1553 1554 assert_pipe_disabled(dev_priv, crtc->pipe); 1555 1556 /* PLL is protected by panel, make sure we can write it */ 1557 if (IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 1558 assert_panel_unlocked(dev_priv, crtc->pipe); 1559 1560 /* Enable DVO 2x clock on both PLLs if necessary */ 1561 if (IS_I830(dev_priv) && intel_num_dvo_pipes(dev_priv) > 0) { 1562 /* 1563 * It appears to be important that we don't enable this 1564 * for the current pipe before otherwise configuring the 1565 * PLL. No idea how this should be handled if multiple 1566 * DVO outputs are enabled simultaneosly. 1567 */ 1568 dpll |= DPLL_DVO_2X_MODE; 1569 I915_WRITE(DPLL(!crtc->pipe), 1570 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1571 } 1572 1573 /* 1574 * Apparently we need to have VGA mode enabled prior to changing 1575 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1576 * dividers, even though the register value does change. 1577 */ 1578 I915_WRITE(reg, 0); 1579 1580 I915_WRITE(reg, dpll); 1581 1582 /* Wait for the clocks to stabilize. */ 1583 POSTING_READ(reg); 1584 udelay(150); 1585 1586 if (INTEL_GEN(dev_priv) >= 4) { 1587 I915_WRITE(DPLL_MD(crtc->pipe), 1588 crtc->config->dpll_hw_state.dpll_md); 1589 } else { 1590 /* The pixel multiplier can only be updated once the 1591 * DPLL is enabled and the clocks are stable. 1592 * 1593 * So write it again. 1594 */ 1595 I915_WRITE(reg, dpll); 1596 } 1597 1598 /* We do this three times for luck */ 1599 I915_WRITE(reg, dpll); 1600 POSTING_READ(reg); 1601 udelay(150); /* wait for warmup */ 1602 I915_WRITE(reg, dpll); 1603 POSTING_READ(reg); 1604 udelay(150); /* wait for warmup */ 1605 I915_WRITE(reg, dpll); 1606 POSTING_READ(reg); 1607 udelay(150); /* wait for warmup */ 1608 } 1609 1610 /** 1611 * i9xx_disable_pll - disable a PLL 1612 * @dev_priv: i915 private structure 1613 * @pipe: pipe PLL to disable 1614 * 1615 * Disable the PLL for @pipe, making sure the pipe is off first. 1616 * 1617 * Note! This is for pre-ILK only. 1618 */ 1619 static void i9xx_disable_pll(struct intel_crtc *crtc) 1620 { 1621 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1622 enum i915_pipe pipe = crtc->pipe; 1623 1624 /* Disable DVO 2x clock on both PLLs if necessary */ 1625 if (IS_I830(dev_priv) && 1626 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1627 !intel_num_dvo_pipes(dev_priv)) { 1628 I915_WRITE(DPLL(PIPE_B), 1629 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1630 I915_WRITE(DPLL(PIPE_A), 1631 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1632 } 1633 1634 /* Don't disable pipe or pipe PLLs if needed */ 1635 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1636 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1637 return; 1638 1639 /* Make sure the pipe isn't still relying on us */ 1640 assert_pipe_disabled(dev_priv, pipe); 1641 1642 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1643 POSTING_READ(DPLL(pipe)); 1644 } 1645 1646 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1647 { 1648 u32 val; 1649 1650 /* Make sure the pipe isn't still relying on us */ 1651 assert_pipe_disabled(dev_priv, pipe); 1652 1653 val = DPLL_INTEGRATED_REF_CLK_VLV | 1654 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1655 if (pipe != PIPE_A) 1656 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1657 1658 I915_WRITE(DPLL(pipe), val); 1659 POSTING_READ(DPLL(pipe)); 1660 } 1661 1662 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1663 { 1664 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1665 u32 val; 1666 1667 /* Make sure the pipe isn't still relying on us */ 1668 assert_pipe_disabled(dev_priv, pipe); 1669 1670 val = DPLL_SSC_REF_CLK_CHV | 1671 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1672 if (pipe != PIPE_A) 1673 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1674 1675 I915_WRITE(DPLL(pipe), val); 1676 POSTING_READ(DPLL(pipe)); 1677 1678 mutex_lock(&dev_priv->sb_lock); 1679 1680 /* Disable 10bit clock to display controller */ 1681 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1682 val &= ~DPIO_DCLKP_EN; 1683 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1684 1685 mutex_unlock(&dev_priv->sb_lock); 1686 } 1687 1688 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1689 struct intel_digital_port *dport, 1690 unsigned int expected_mask) 1691 { 1692 u32 port_mask; 1693 i915_reg_t dpll_reg; 1694 1695 switch (dport->port) { 1696 case PORT_B: 1697 port_mask = DPLL_PORTB_READY_MASK; 1698 dpll_reg = DPLL(0); 1699 break; 1700 case PORT_C: 1701 port_mask = DPLL_PORTC_READY_MASK; 1702 dpll_reg = DPLL(0); 1703 expected_mask <<= 4; 1704 break; 1705 case PORT_D: 1706 port_mask = DPLL_PORTD_READY_MASK; 1707 dpll_reg = DPIO_PHY_STATUS; 1708 break; 1709 default: 1710 BUG(); 1711 } 1712 1713 if (intel_wait_for_register(dev_priv, 1714 dpll_reg, port_mask, expected_mask, 1715 1000)) 1716 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1717 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1718 } 1719 1720 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1721 enum i915_pipe pipe) 1722 { 1723 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 1724 pipe); 1725 i915_reg_t reg; 1726 uint32_t val, pipeconf_val; 1727 1728 /* Make sure PCH DPLL is enabled */ 1729 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1730 1731 /* FDI must be feeding us bits for PCH ports */ 1732 assert_fdi_tx_enabled(dev_priv, pipe); 1733 assert_fdi_rx_enabled(dev_priv, pipe); 1734 1735 if (HAS_PCH_CPT(dev_priv)) { 1736 /* Workaround: Set the timing override bit before enabling the 1737 * pch transcoder. */ 1738 reg = TRANS_CHICKEN2(pipe); 1739 val = I915_READ(reg); 1740 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1741 I915_WRITE(reg, val); 1742 } 1743 1744 reg = PCH_TRANSCONF(pipe); 1745 val = I915_READ(reg); 1746 pipeconf_val = I915_READ(PIPECONF(pipe)); 1747 1748 if (HAS_PCH_IBX(dev_priv)) { 1749 /* 1750 * Make the BPC in transcoder be consistent with 1751 * that in pipeconf reg. For HDMI we must use 8bpc 1752 * here for both 8bpc and 12bpc. 1753 */ 1754 val &= ~PIPECONF_BPC_MASK; 1755 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1756 val |= PIPECONF_8BPC; 1757 else 1758 val |= pipeconf_val & PIPECONF_BPC_MASK; 1759 } 1760 1761 val &= ~TRANS_INTERLACE_MASK; 1762 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1763 if (HAS_PCH_IBX(dev_priv) && 1764 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1765 val |= TRANS_LEGACY_INTERLACED_ILK; 1766 else 1767 val |= TRANS_INTERLACED; 1768 else 1769 val |= TRANS_PROGRESSIVE; 1770 1771 I915_WRITE(reg, val | TRANS_ENABLE); 1772 if (intel_wait_for_register(dev_priv, 1773 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1774 100)) 1775 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1776 } 1777 1778 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1779 enum transcoder cpu_transcoder) 1780 { 1781 u32 val, pipeconf_val; 1782 1783 /* FDI must be feeding us bits for PCH ports */ 1784 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1785 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1786 1787 /* Workaround: set timing override bit. */ 1788 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1789 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1790 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1791 1792 val = TRANS_ENABLE; 1793 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1794 1795 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1796 PIPECONF_INTERLACED_ILK) 1797 val |= TRANS_INTERLACED; 1798 else 1799 val |= TRANS_PROGRESSIVE; 1800 1801 I915_WRITE(LPT_TRANSCONF, val); 1802 if (intel_wait_for_register(dev_priv, 1803 LPT_TRANSCONF, 1804 TRANS_STATE_ENABLE, 1805 TRANS_STATE_ENABLE, 1806 100)) 1807 DRM_ERROR("Failed to enable PCH transcoder\n"); 1808 } 1809 1810 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1811 enum i915_pipe pipe) 1812 { 1813 i915_reg_t reg; 1814 uint32_t val; 1815 1816 /* FDI relies on the transcoder */ 1817 assert_fdi_tx_disabled(dev_priv, pipe); 1818 assert_fdi_rx_disabled(dev_priv, pipe); 1819 1820 /* Ports must be off as well */ 1821 assert_pch_ports_disabled(dev_priv, pipe); 1822 1823 reg = PCH_TRANSCONF(pipe); 1824 val = I915_READ(reg); 1825 val &= ~TRANS_ENABLE; 1826 I915_WRITE(reg, val); 1827 /* wait for PCH transcoder off, transcoder state */ 1828 if (intel_wait_for_register(dev_priv, 1829 reg, TRANS_STATE_ENABLE, 0, 1830 50)) 1831 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1832 1833 if (HAS_PCH_CPT(dev_priv)) { 1834 /* Workaround: Clear the timing override chicken bit again. */ 1835 reg = TRANS_CHICKEN2(pipe); 1836 val = I915_READ(reg); 1837 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1838 I915_WRITE(reg, val); 1839 } 1840 } 1841 1842 void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1843 { 1844 u32 val; 1845 1846 val = I915_READ(LPT_TRANSCONF); 1847 val &= ~TRANS_ENABLE; 1848 I915_WRITE(LPT_TRANSCONF, val); 1849 /* wait for PCH transcoder off, transcoder state */ 1850 if (intel_wait_for_register(dev_priv, 1851 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1852 50)) 1853 DRM_ERROR("Failed to disable PCH transcoder\n"); 1854 1855 /* Workaround: clear timing override bit. */ 1856 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1857 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1858 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1859 } 1860 1861 enum transcoder intel_crtc_pch_transcoder(struct intel_crtc *crtc) 1862 { 1863 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1864 1865 WARN_ON(!crtc->config->has_pch_encoder); 1866 1867 if (HAS_PCH_LPT(dev_priv)) 1868 return TRANSCODER_A; 1869 else 1870 return (enum transcoder) crtc->pipe; 1871 } 1872 1873 /** 1874 * intel_enable_pipe - enable a pipe, asserting requirements 1875 * @crtc: crtc responsible for the pipe 1876 * 1877 * Enable @crtc's pipe, making sure that various hardware specific requirements 1878 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1879 */ 1880 static void intel_enable_pipe(struct intel_crtc *crtc) 1881 { 1882 struct drm_device *dev = crtc->base.dev; 1883 struct drm_i915_private *dev_priv = to_i915(dev); 1884 enum i915_pipe pipe = crtc->pipe; 1885 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1886 i915_reg_t reg; 1887 u32 val; 1888 1889 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1890 1891 assert_planes_disabled(dev_priv, pipe); 1892 assert_cursor_disabled(dev_priv, pipe); 1893 assert_sprites_disabled(dev_priv, pipe); 1894 1895 /* 1896 * A pipe without a PLL won't actually be able to drive bits from 1897 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1898 * need the check. 1899 */ 1900 if (HAS_GMCH_DISPLAY(dev_priv)) { 1901 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1902 assert_dsi_pll_enabled(dev_priv); 1903 else 1904 assert_pll_enabled(dev_priv, pipe); 1905 } else { 1906 if (crtc->config->has_pch_encoder) { 1907 /* if driving the PCH, we need FDI enabled */ 1908 assert_fdi_rx_pll_enabled(dev_priv, 1909 (enum i915_pipe) intel_crtc_pch_transcoder(crtc)); 1910 assert_fdi_tx_pll_enabled(dev_priv, 1911 (enum i915_pipe) cpu_transcoder); 1912 } 1913 /* FIXME: assert CPU port conditions for SNB+ */ 1914 } 1915 1916 reg = PIPECONF(cpu_transcoder); 1917 val = I915_READ(reg); 1918 if (val & PIPECONF_ENABLE) { 1919 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1920 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 1921 return; 1922 } 1923 1924 I915_WRITE(reg, val | PIPECONF_ENABLE); 1925 POSTING_READ(reg); 1926 1927 /* 1928 * Until the pipe starts DSL will read as 0, which would cause 1929 * an apparent vblank timestamp jump, which messes up also the 1930 * frame count when it's derived from the timestamps. So let's 1931 * wait for the pipe to start properly before we call 1932 * drm_crtc_vblank_on() 1933 */ 1934 if (dev->max_vblank_count == 0 && 1935 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1936 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 1937 } 1938 1939 /** 1940 * intel_disable_pipe - disable a pipe, asserting requirements 1941 * @crtc: crtc whose pipes is to be disabled 1942 * 1943 * Disable the pipe of @crtc, making sure that various hardware 1944 * specific requirements are met, if applicable, e.g. plane 1945 * disabled, panel fitter off, etc. 1946 * 1947 * Will wait until the pipe has shut down before returning. 1948 */ 1949 static void intel_disable_pipe(struct intel_crtc *crtc) 1950 { 1951 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1952 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1953 enum i915_pipe pipe = crtc->pipe; 1954 i915_reg_t reg; 1955 u32 val; 1956 1957 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 1958 1959 /* 1960 * Make sure planes won't keep trying to pump pixels to us, 1961 * or we might hang the display. 1962 */ 1963 assert_planes_disabled(dev_priv, pipe); 1964 assert_cursor_disabled(dev_priv, pipe); 1965 assert_sprites_disabled(dev_priv, pipe); 1966 1967 reg = PIPECONF(cpu_transcoder); 1968 val = I915_READ(reg); 1969 if ((val & PIPECONF_ENABLE) == 0) 1970 return; 1971 1972 /* 1973 * Double wide has implications for planes 1974 * so best keep it disabled when not needed. 1975 */ 1976 if (crtc->config->double_wide) 1977 val &= ~PIPECONF_DOUBLE_WIDE; 1978 1979 /* Don't disable pipe or pipe PLLs if needed */ 1980 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 1981 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1982 val &= ~PIPECONF_ENABLE; 1983 1984 I915_WRITE(reg, val); 1985 if ((val & PIPECONF_ENABLE) == 0) 1986 intel_wait_for_pipe_off(crtc); 1987 } 1988 1989 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 1990 { 1991 return IS_GEN2(dev_priv) ? 2048 : 4096; 1992 } 1993 1994 static unsigned int 1995 intel_tile_width_bytes(const struct drm_framebuffer *fb, int plane) 1996 { 1997 struct drm_i915_private *dev_priv = to_i915(fb->dev); 1998 unsigned int cpp = fb->format->cpp[plane]; 1999 2000 switch (fb->modifier) { 2001 case DRM_FORMAT_MOD_LINEAR: 2002 return cpp; 2003 case I915_FORMAT_MOD_X_TILED: 2004 if (IS_GEN2(dev_priv)) 2005 return 128; 2006 else 2007 return 512; 2008 case I915_FORMAT_MOD_Y_TILED: 2009 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2010 return 128; 2011 else 2012 return 512; 2013 case I915_FORMAT_MOD_Yf_TILED: 2014 switch (cpp) { 2015 case 1: 2016 return 64; 2017 case 2: 2018 case 4: 2019 return 128; 2020 case 8: 2021 case 16: 2022 return 256; 2023 default: 2024 MISSING_CASE(cpp); 2025 return cpp; 2026 } 2027 break; 2028 default: 2029 MISSING_CASE(fb->modifier); 2030 return cpp; 2031 } 2032 } 2033 2034 static unsigned int 2035 intel_tile_height(const struct drm_framebuffer *fb, int plane) 2036 { 2037 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 2038 return 1; 2039 else 2040 return intel_tile_size(to_i915(fb->dev)) / 2041 intel_tile_width_bytes(fb, plane); 2042 } 2043 2044 /* Return the tile dimensions in pixel units */ 2045 static void intel_tile_dims(const struct drm_framebuffer *fb, int plane, 2046 unsigned int *tile_width, 2047 unsigned int *tile_height) 2048 { 2049 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, plane); 2050 unsigned int cpp = fb->format->cpp[plane]; 2051 2052 *tile_width = tile_width_bytes / cpp; 2053 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes; 2054 } 2055 2056 unsigned int 2057 intel_fb_align_height(const struct drm_framebuffer *fb, 2058 int plane, unsigned int height) 2059 { 2060 unsigned int tile_height = intel_tile_height(fb, plane); 2061 2062 return ALIGN(height, tile_height); 2063 } 2064 2065 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2066 { 2067 unsigned int size = 0; 2068 int i; 2069 2070 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2071 size += rot_info->plane[i].width * rot_info->plane[i].height; 2072 2073 return size; 2074 } 2075 2076 static void 2077 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2078 const struct drm_framebuffer *fb, 2079 unsigned int rotation) 2080 { 2081 view->type = I915_GGTT_VIEW_NORMAL; 2082 if (drm_rotation_90_or_270(rotation)) { 2083 view->type = I915_GGTT_VIEW_ROTATED; 2084 view->rotated = to_intel_framebuffer(fb)->rot_info; 2085 } 2086 } 2087 2088 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2089 { 2090 if (INTEL_INFO(dev_priv)->gen >= 9) 2091 return 256 * 1024; 2092 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2093 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2094 return 128 * 1024; 2095 else if (INTEL_INFO(dev_priv)->gen >= 4) 2096 return 4 * 1024; 2097 else 2098 return 0; 2099 } 2100 2101 static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb, 2102 int plane) 2103 { 2104 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2105 2106 /* AUX_DIST needs only 4K alignment */ 2107 if (fb->format->format == DRM_FORMAT_NV12 && plane == 1) 2108 return 4096; 2109 2110 switch (fb->modifier) { 2111 case DRM_FORMAT_MOD_LINEAR: 2112 return intel_linear_alignment(dev_priv); 2113 case I915_FORMAT_MOD_X_TILED: 2114 if (INTEL_GEN(dev_priv) >= 9) 2115 return 256 * 1024; 2116 return 0; 2117 case I915_FORMAT_MOD_Y_TILED: 2118 case I915_FORMAT_MOD_Yf_TILED: 2119 return 1 * 1024 * 1024; 2120 default: 2121 MISSING_CASE(fb->modifier); 2122 return 0; 2123 } 2124 } 2125 2126 struct i915_vma * 2127 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2128 { 2129 struct drm_device *dev = fb->dev; 2130 struct drm_i915_private *dev_priv = to_i915(dev); 2131 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2132 struct i915_ggtt_view view; 2133 struct i915_vma *vma; 2134 u32 alignment; 2135 2136 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2137 2138 alignment = intel_surf_alignment(fb, 0); 2139 2140 intel_fill_fb_ggtt_view(&view, fb, rotation); 2141 2142 /* Note that the w/a also requires 64 PTE of padding following the 2143 * bo. We currently fill all unused PTE with the shadow page and so 2144 * we should always have valid PTE following the scanout preventing 2145 * the VT-d warning. 2146 */ 2147 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2148 alignment = 256 * 1024; 2149 2150 /* 2151 * Global gtt pte registers are special registers which actually forward 2152 * writes to a chunk of system memory. Which means that there is no risk 2153 * that the register values disappear as soon as we call 2154 * intel_runtime_pm_put(), so it is correct to wrap only the 2155 * pin/unpin/fence and not more. 2156 */ 2157 intel_runtime_pm_get(dev_priv); 2158 2159 vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2160 if (IS_ERR(vma)) 2161 goto err; 2162 2163 if (i915_vma_is_map_and_fenceable(vma)) { 2164 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2165 * fence, whereas 965+ only requires a fence if using 2166 * framebuffer compression. For simplicity, we always, when 2167 * possible, install a fence as the cost is not that onerous. 2168 * 2169 * If we fail to fence the tiled scanout, then either the 2170 * modeset will reject the change (which is highly unlikely as 2171 * the affected systems, all but one, do not have unmappable 2172 * space) or we will not be able to enable full powersaving 2173 * techniques (also likely not to apply due to various limits 2174 * FBC and the like impose on the size of the buffer, which 2175 * presumably we violated anyway with this unmappable buffer). 2176 * Anyway, it is presumably better to stumble onwards with 2177 * something and try to run the system in a "less than optimal" 2178 * mode that matches the user configuration. 2179 */ 2180 if (i915_vma_get_fence(vma) == 0) 2181 i915_vma_pin_fence(vma); 2182 } 2183 2184 i915_vma_get(vma); 2185 err: 2186 intel_runtime_pm_put(dev_priv); 2187 return vma; 2188 } 2189 2190 void intel_unpin_fb_vma(struct i915_vma *vma) 2191 { 2192 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2193 2194 i915_vma_unpin_fence(vma); 2195 i915_gem_object_unpin_from_display_plane(vma); 2196 i915_vma_put(vma); 2197 } 2198 2199 static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane, 2200 unsigned int rotation) 2201 { 2202 if (drm_rotation_90_or_270(rotation)) 2203 return to_intel_framebuffer(fb)->rotated[plane].pitch; 2204 else 2205 return fb->pitches[plane]; 2206 } 2207 2208 /* 2209 * Convert the x/y offsets into a linear offset. 2210 * Only valid with 0/180 degree rotation, which is fine since linear 2211 * offset is only used with linear buffers on pre-hsw and tiled buffers 2212 * with gen2/3, and 90/270 degree rotations isn't supported on any of them. 2213 */ 2214 u32 intel_fb_xy_to_linear(int x, int y, 2215 const struct intel_plane_state *state, 2216 int plane) 2217 { 2218 const struct drm_framebuffer *fb = state->base.fb; 2219 unsigned int cpp = fb->format->cpp[plane]; 2220 unsigned int pitch = fb->pitches[plane]; 2221 2222 return y * pitch + x * cpp; 2223 } 2224 2225 /* 2226 * Add the x/y offsets derived from fb->offsets[] to the user 2227 * specified plane src x/y offsets. The resulting x/y offsets 2228 * specify the start of scanout from the beginning of the gtt mapping. 2229 */ 2230 void intel_add_fb_offsets(int *x, int *y, 2231 const struct intel_plane_state *state, 2232 int plane) 2233 2234 { 2235 const struct intel_framebuffer *intel_fb = to_intel_framebuffer(state->base.fb); 2236 unsigned int rotation = state->base.rotation; 2237 2238 if (drm_rotation_90_or_270(rotation)) { 2239 *x += intel_fb->rotated[plane].x; 2240 *y += intel_fb->rotated[plane].y; 2241 } else { 2242 *x += intel_fb->normal[plane].x; 2243 *y += intel_fb->normal[plane].y; 2244 } 2245 } 2246 2247 /* 2248 * Input tile dimensions and pitch must already be 2249 * rotated to match x and y, and in pixel units. 2250 */ 2251 static u32 _intel_adjust_tile_offset(int *x, int *y, 2252 unsigned int tile_width, 2253 unsigned int tile_height, 2254 unsigned int tile_size, 2255 unsigned int pitch_tiles, 2256 u32 old_offset, 2257 u32 new_offset) 2258 { 2259 unsigned int pitch_pixels = pitch_tiles * tile_width; 2260 unsigned int tiles; 2261 2262 WARN_ON(old_offset & (tile_size - 1)); 2263 WARN_ON(new_offset & (tile_size - 1)); 2264 WARN_ON(new_offset > old_offset); 2265 2266 tiles = (old_offset - new_offset) / tile_size; 2267 2268 *y += tiles / pitch_tiles * tile_height; 2269 *x += tiles % pitch_tiles * tile_width; 2270 2271 /* minimize x in case it got needlessly big */ 2272 *y += *x / pitch_pixels * tile_height; 2273 *x %= pitch_pixels; 2274 2275 return new_offset; 2276 } 2277 2278 /* 2279 * Adjust the tile offset by moving the difference into 2280 * the x/y offsets. 2281 */ 2282 static u32 intel_adjust_tile_offset(int *x, int *y, 2283 const struct intel_plane_state *state, int plane, 2284 u32 old_offset, u32 new_offset) 2285 { 2286 const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); 2287 const struct drm_framebuffer *fb = state->base.fb; 2288 unsigned int cpp = fb->format->cpp[plane]; 2289 unsigned int rotation = state->base.rotation; 2290 unsigned int pitch = intel_fb_pitch(fb, plane, rotation); 2291 2292 WARN_ON(new_offset > old_offset); 2293 2294 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2295 unsigned int tile_size, tile_width, tile_height; 2296 unsigned int pitch_tiles; 2297 2298 tile_size = intel_tile_size(dev_priv); 2299 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2300 2301 if (drm_rotation_90_or_270(rotation)) { 2302 pitch_tiles = pitch / tile_height; 2303 swap(tile_width, tile_height); 2304 } else { 2305 pitch_tiles = pitch / (tile_width * cpp); 2306 } 2307 2308 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2309 tile_size, pitch_tiles, 2310 old_offset, new_offset); 2311 } else { 2312 old_offset += *y * pitch + *x * cpp; 2313 2314 *y = (old_offset - new_offset) / pitch; 2315 *x = ((old_offset - new_offset) - *y * pitch) / cpp; 2316 } 2317 2318 return new_offset; 2319 } 2320 2321 /* 2322 * Computes the linear offset to the base tile and adjusts 2323 * x, y. bytes per pixel is assumed to be a power-of-two. 2324 * 2325 * In the 90/270 rotated case, x and y are assumed 2326 * to be already rotated to match the rotated GTT view, and 2327 * pitch is the tile_height aligned framebuffer height. 2328 * 2329 * This function is used when computing the derived information 2330 * under intel_framebuffer, so using any of that information 2331 * here is not allowed. Anything under drm_framebuffer can be 2332 * used. This is why the user has to pass in the pitch since it 2333 * is specified in the rotated orientation. 2334 */ 2335 static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, 2336 int *x, int *y, 2337 const struct drm_framebuffer *fb, int plane, 2338 unsigned int pitch, 2339 unsigned int rotation, 2340 u32 alignment) 2341 { 2342 uint64_t fb_modifier = fb->modifier; 2343 unsigned int cpp = fb->format->cpp[plane]; 2344 u32 offset, offset_aligned; 2345 2346 if (alignment) 2347 alignment--; 2348 2349 if (fb_modifier != DRM_FORMAT_MOD_LINEAR) { 2350 unsigned int tile_size, tile_width, tile_height; 2351 unsigned int tile_rows, tiles, pitch_tiles; 2352 2353 tile_size = intel_tile_size(dev_priv); 2354 intel_tile_dims(fb, plane, &tile_width, &tile_height); 2355 2356 if (drm_rotation_90_or_270(rotation)) { 2357 pitch_tiles = pitch / tile_height; 2358 swap(tile_width, tile_height); 2359 } else { 2360 pitch_tiles = pitch / (tile_width * cpp); 2361 } 2362 2363 tile_rows = *y / tile_height; 2364 *y %= tile_height; 2365 2366 tiles = *x / tile_width; 2367 *x %= tile_width; 2368 2369 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2370 offset_aligned = offset & ~alignment; 2371 2372 _intel_adjust_tile_offset(x, y, tile_width, tile_height, 2373 tile_size, pitch_tiles, 2374 offset, offset_aligned); 2375 } else { 2376 offset = *y * pitch + *x * cpp; 2377 offset_aligned = offset & ~alignment; 2378 2379 *y = (offset & alignment) / pitch; 2380 *x = ((offset & alignment) - *y * pitch) / cpp; 2381 } 2382 2383 return offset_aligned; 2384 } 2385 2386 u32 intel_compute_tile_offset(int *x, int *y, 2387 const struct intel_plane_state *state, 2388 int plane) 2389 { 2390 const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); 2391 const struct drm_framebuffer *fb = state->base.fb; 2392 unsigned int rotation = state->base.rotation; 2393 int pitch = intel_fb_pitch(fb, plane, rotation); 2394 u32 alignment = intel_surf_alignment(fb, plane); 2395 2396 return _intel_compute_tile_offset(dev_priv, x, y, fb, plane, pitch, 2397 rotation, alignment); 2398 } 2399 2400 /* Convert the fb->offset[] linear offset into x/y offsets */ 2401 static void intel_fb_offset_to_xy(int *x, int *y, 2402 const struct drm_framebuffer *fb, int plane) 2403 { 2404 unsigned int cpp = fb->format->cpp[plane]; 2405 unsigned int pitch = fb->pitches[plane]; 2406 u32 linear_offset = fb->offsets[plane]; 2407 2408 *y = linear_offset / pitch; 2409 *x = linear_offset % pitch / cpp; 2410 } 2411 2412 static unsigned int intel_fb_modifier_to_tiling(uint64_t fb_modifier) 2413 { 2414 switch (fb_modifier) { 2415 case I915_FORMAT_MOD_X_TILED: 2416 return I915_TILING_X; 2417 case I915_FORMAT_MOD_Y_TILED: 2418 return I915_TILING_Y; 2419 default: 2420 return I915_TILING_NONE; 2421 } 2422 } 2423 2424 static int 2425 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2426 struct drm_framebuffer *fb) 2427 { 2428 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 2429 struct intel_rotation_info *rot_info = &intel_fb->rot_info; 2430 u32 gtt_offset_rotated = 0; 2431 unsigned int max_size = 0; 2432 int i, num_planes = fb->format->num_planes; 2433 unsigned int tile_size = intel_tile_size(dev_priv); 2434 2435 for (i = 0; i < num_planes; i++) { 2436 unsigned int width, height; 2437 unsigned int cpp, size; 2438 u32 offset; 2439 int x, y; 2440 2441 cpp = fb->format->cpp[i]; 2442 width = drm_framebuffer_plane_width(fb->width, fb, i); 2443 height = drm_framebuffer_plane_height(fb->height, fb, i); 2444 2445 intel_fb_offset_to_xy(&x, &y, fb, i); 2446 2447 /* 2448 * The fence (if used) is aligned to the start of the object 2449 * so having the framebuffer wrap around across the edge of the 2450 * fenced region doesn't really work. We have no API to configure 2451 * the fence start offset within the object (nor could we probably 2452 * on gen2/3). So it's just easier if we just require that the 2453 * fb layout agrees with the fence layout. We already check that the 2454 * fb stride matches the fence stride elsewhere. 2455 */ 2456 if (i915_gem_object_is_tiled(intel_fb->obj) && 2457 (x + width) * cpp > fb->pitches[i]) { 2458 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n", 2459 i, fb->offsets[i]); 2460 return -EINVAL; 2461 } 2462 2463 /* 2464 * First pixel of the framebuffer from 2465 * the start of the normal gtt mapping. 2466 */ 2467 intel_fb->normal[i].x = x; 2468 intel_fb->normal[i].y = y; 2469 2470 offset = _intel_compute_tile_offset(dev_priv, &x, &y, 2471 fb, i, fb->pitches[i], 2472 DRM_ROTATE_0, tile_size); 2473 offset /= tile_size; 2474 2475 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 2476 unsigned int tile_width, tile_height; 2477 unsigned int pitch_tiles; 2478 struct drm_rect r; 2479 2480 intel_tile_dims(fb, i, &tile_width, &tile_height); 2481 2482 rot_info->plane[i].offset = offset; 2483 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp); 2484 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width); 2485 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height); 2486 2487 intel_fb->rotated[i].pitch = 2488 rot_info->plane[i].height * tile_height; 2489 2490 /* how many tiles does this plane need */ 2491 size = rot_info->plane[i].stride * rot_info->plane[i].height; 2492 /* 2493 * If the plane isn't horizontally tile aligned, 2494 * we need one more tile. 2495 */ 2496 if (x != 0) 2497 size++; 2498 2499 /* rotate the x/y offsets to match the GTT view */ 2500 r.x1 = x; 2501 r.y1 = y; 2502 r.x2 = x + width; 2503 r.y2 = y + height; 2504 drm_rect_rotate(&r, 2505 rot_info->plane[i].width * tile_width, 2506 rot_info->plane[i].height * tile_height, 2507 DRM_ROTATE_270); 2508 x = r.x1; 2509 y = r.y1; 2510 2511 /* rotate the tile dimensions to match the GTT view */ 2512 pitch_tiles = intel_fb->rotated[i].pitch / tile_height; 2513 swap(tile_width, tile_height); 2514 2515 /* 2516 * We only keep the x/y offsets, so push all of the 2517 * gtt offset into the x/y offsets. 2518 */ 2519 _intel_adjust_tile_offset(&x, &y, 2520 tile_width, tile_height, 2521 tile_size, pitch_tiles, 2522 gtt_offset_rotated * tile_size, 0); 2523 2524 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; 2525 2526 /* 2527 * First pixel of the framebuffer from 2528 * the start of the rotated gtt mapping. 2529 */ 2530 intel_fb->rotated[i].x = x; 2531 intel_fb->rotated[i].y = y; 2532 } else { 2533 size = DIV_ROUND_UP((y + height) * fb->pitches[i] + 2534 x * cpp, tile_size); 2535 } 2536 2537 /* how many tiles in total needed in the bo */ 2538 max_size = max(max_size, offset + size); 2539 } 2540 2541 if (max_size * tile_size > intel_fb->obj->base.size) { 2542 DRM_DEBUG_KMS("fb too big for bo (need %u bytes, have %zu bytes)\n", 2543 max_size * tile_size, intel_fb->obj->base.size); 2544 return -EINVAL; 2545 } 2546 2547 return 0; 2548 } 2549 2550 static int i9xx_format_to_fourcc(int format) 2551 { 2552 switch (format) { 2553 case DISPPLANE_8BPP: 2554 return DRM_FORMAT_C8; 2555 case DISPPLANE_BGRX555: 2556 return DRM_FORMAT_XRGB1555; 2557 case DISPPLANE_BGRX565: 2558 return DRM_FORMAT_RGB565; 2559 default: 2560 case DISPPLANE_BGRX888: 2561 return DRM_FORMAT_XRGB8888; 2562 case DISPPLANE_RGBX888: 2563 return DRM_FORMAT_XBGR8888; 2564 case DISPPLANE_BGRX101010: 2565 return DRM_FORMAT_XRGB2101010; 2566 case DISPPLANE_RGBX101010: 2567 return DRM_FORMAT_XBGR2101010; 2568 } 2569 } 2570 2571 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2572 { 2573 switch (format) { 2574 case PLANE_CTL_FORMAT_RGB_565: 2575 return DRM_FORMAT_RGB565; 2576 default: 2577 case PLANE_CTL_FORMAT_XRGB_8888: 2578 if (rgb_order) { 2579 if (alpha) 2580 return DRM_FORMAT_ABGR8888; 2581 else 2582 return DRM_FORMAT_XBGR8888; 2583 } else { 2584 if (alpha) 2585 return DRM_FORMAT_ARGB8888; 2586 else 2587 return DRM_FORMAT_XRGB8888; 2588 } 2589 case PLANE_CTL_FORMAT_XRGB_2101010: 2590 if (rgb_order) 2591 return DRM_FORMAT_XBGR2101010; 2592 else 2593 return DRM_FORMAT_XRGB2101010; 2594 } 2595 } 2596 2597 static bool 2598 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2599 struct intel_initial_plane_config *plane_config) 2600 { 2601 struct drm_device *dev = crtc->base.dev; 2602 struct drm_i915_private *dev_priv = to_i915(dev); 2603 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2604 struct drm_i915_gem_object *obj = NULL; 2605 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2606 struct drm_framebuffer *fb = &plane_config->fb->base; 2607 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2608 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2609 PAGE_SIZE); 2610 2611 size_aligned -= base_aligned; 2612 2613 if (plane_config->size == 0) 2614 return false; 2615 2616 /* If the FB is too big, just don't use it since fbdev is not very 2617 * important and we should probably use that space with FBC or other 2618 * features. */ 2619 if (size_aligned * 2 > ggtt->stolen_usable_size) 2620 return false; 2621 2622 mutex_lock(&dev->struct_mutex); 2623 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, 2624 base_aligned, 2625 base_aligned, 2626 size_aligned); 2627 mutex_unlock(&dev->struct_mutex); 2628 if (!obj) 2629 return false; 2630 2631 if (plane_config->tiling == I915_TILING_X) 2632 obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; 2633 2634 mode_cmd.pixel_format = fb->format->format; 2635 mode_cmd.width = fb->width; 2636 mode_cmd.height = fb->height; 2637 mode_cmd.pitches[0] = fb->pitches[0]; 2638 mode_cmd.modifier[0] = fb->modifier; 2639 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2640 2641 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) { 2642 DRM_DEBUG_KMS("intel fb init failed\n"); 2643 goto out_unref_obj; 2644 } 2645 2646 2647 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2648 return true; 2649 2650 out_unref_obj: 2651 i915_gem_object_put(obj); 2652 return false; 2653 } 2654 2655 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2656 static void 2657 update_state_fb(struct drm_plane *plane) 2658 { 2659 if (plane->fb == plane->state->fb) 2660 return; 2661 2662 if (plane->state->fb) 2663 drm_framebuffer_unreference(plane->state->fb); 2664 plane->state->fb = plane->fb; 2665 if (plane->state->fb) 2666 drm_framebuffer_reference(plane->state->fb); 2667 } 2668 2669 static void 2670 intel_set_plane_visible(struct intel_crtc_state *crtc_state, 2671 struct intel_plane_state *plane_state, 2672 bool visible) 2673 { 2674 struct intel_plane *plane = to_intel_plane(plane_state->base.plane); 2675 2676 plane_state->base.visible = visible; 2677 2678 /* FIXME pre-g4x don't work like this */ 2679 if (visible) { 2680 crtc_state->base.plane_mask |= BIT(drm_plane_index(&plane->base)); 2681 crtc_state->active_planes |= BIT(plane->id); 2682 } else { 2683 crtc_state->base.plane_mask &= ~BIT(drm_plane_index(&plane->base)); 2684 crtc_state->active_planes &= ~BIT(plane->id); 2685 } 2686 2687 DRM_DEBUG_KMS("%s active planes 0x%x\n", 2688 crtc_state->base.crtc->name, 2689 crtc_state->active_planes); 2690 } 2691 2692 static void 2693 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2694 struct intel_initial_plane_config *plane_config) 2695 { 2696 struct drm_device *dev = intel_crtc->base.dev; 2697 struct drm_i915_private *dev_priv = to_i915(dev); 2698 struct drm_crtc *c; 2699 struct drm_i915_gem_object *obj; 2700 struct drm_plane *primary = intel_crtc->base.primary; 2701 struct drm_plane_state *plane_state = primary->state; 2702 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2703 struct intel_plane *intel_plane = to_intel_plane(primary); 2704 struct intel_plane_state *intel_state = 2705 to_intel_plane_state(plane_state); 2706 struct drm_framebuffer *fb; 2707 2708 if (!plane_config->fb) 2709 return; 2710 2711 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2712 fb = &plane_config->fb->base; 2713 goto valid_fb; 2714 } 2715 2716 kfree(plane_config->fb); 2717 2718 /* 2719 * Failed to alloc the obj, check to see if we should share 2720 * an fb with another CRTC instead 2721 */ 2722 for_each_crtc(dev, c) { 2723 struct intel_plane_state *state; 2724 2725 if (c == &intel_crtc->base) 2726 continue; 2727 2728 if (!to_intel_crtc(c)->active) 2729 continue; 2730 2731 state = to_intel_plane_state(c->primary->state); 2732 if (!state->vma) 2733 continue; 2734 2735 if (intel_plane_ggtt_offset(state) == plane_config->base) { 2736 fb = c->primary->fb; 2737 drm_framebuffer_reference(fb); 2738 goto valid_fb; 2739 } 2740 } 2741 2742 /* 2743 * We've failed to reconstruct the BIOS FB. Current display state 2744 * indicates that the primary plane is visible, but has a NULL FB, 2745 * which will lead to problems later if we don't fix it up. The 2746 * simplest solution is to just disable the primary plane now and 2747 * pretend the BIOS never had it enabled. 2748 */ 2749 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2750 to_intel_plane_state(plane_state), 2751 false); 2752 intel_pre_disable_primary_noatomic(&intel_crtc->base); 2753 trace_intel_disable_plane(primary, intel_crtc); 2754 intel_plane->disable_plane(primary, &intel_crtc->base); 2755 2756 return; 2757 2758 valid_fb: 2759 mutex_lock(&dev->struct_mutex); 2760 intel_state->vma = 2761 intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2762 mutex_unlock(&dev->struct_mutex); 2763 if (IS_ERR(intel_state->vma)) { 2764 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", 2765 intel_crtc->pipe, PTR_ERR(intel_state->vma)); 2766 2767 intel_state->vma = NULL; 2768 drm_framebuffer_unreference(fb); 2769 return; 2770 } 2771 2772 plane_state->src_x = 0; 2773 plane_state->src_y = 0; 2774 plane_state->src_w = fb->width << 16; 2775 plane_state->src_h = fb->height << 16; 2776 2777 plane_state->crtc_x = 0; 2778 plane_state->crtc_y = 0; 2779 plane_state->crtc_w = fb->width; 2780 plane_state->crtc_h = fb->height; 2781 2782 intel_state->base.src = drm_plane_state_src(plane_state); 2783 intel_state->base.dst = drm_plane_state_dest(plane_state); 2784 2785 obj = intel_fb_obj(fb); 2786 if (i915_gem_object_is_tiled(obj)) 2787 dev_priv->preserve_bios_swizzle = true; 2788 2789 drm_framebuffer_reference(fb); 2790 primary->fb = primary->state->fb = fb; 2791 primary->crtc = primary->state->crtc = &intel_crtc->base; 2792 2793 intel_set_plane_visible(to_intel_crtc_state(crtc_state), 2794 to_intel_plane_state(plane_state), 2795 true); 2796 2797 atomic_or(to_intel_plane(primary)->frontbuffer_bit, 2798 &obj->frontbuffer_bits); 2799 } 2800 2801 static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, 2802 unsigned int rotation) 2803 { 2804 int cpp = fb->format->cpp[plane]; 2805 2806 switch (fb->modifier) { 2807 case DRM_FORMAT_MOD_LINEAR: 2808 case I915_FORMAT_MOD_X_TILED: 2809 switch (cpp) { 2810 case 8: 2811 return 4096; 2812 case 4: 2813 case 2: 2814 case 1: 2815 return 8192; 2816 default: 2817 MISSING_CASE(cpp); 2818 break; 2819 } 2820 break; 2821 case I915_FORMAT_MOD_Y_TILED: 2822 case I915_FORMAT_MOD_Yf_TILED: 2823 switch (cpp) { 2824 case 8: 2825 return 2048; 2826 case 4: 2827 return 4096; 2828 case 2: 2829 case 1: 2830 return 8192; 2831 default: 2832 MISSING_CASE(cpp); 2833 break; 2834 } 2835 break; 2836 default: 2837 MISSING_CASE(fb->modifier); 2838 } 2839 2840 return 2048; 2841 } 2842 2843 static int skl_check_main_surface(struct intel_plane_state *plane_state) 2844 { 2845 const struct drm_framebuffer *fb = plane_state->base.fb; 2846 unsigned int rotation = plane_state->base.rotation; 2847 int x = plane_state->base.src.x1 >> 16; 2848 int y = plane_state->base.src.y1 >> 16; 2849 int w = drm_rect_width(&plane_state->base.src) >> 16; 2850 int h = drm_rect_height(&plane_state->base.src) >> 16; 2851 int max_width = skl_max_plane_width(fb, 0, rotation); 2852 int max_height = 4096; 2853 u32 alignment, offset, aux_offset = plane_state->aux.offset; 2854 2855 if (w > max_width || h > max_height) { 2856 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", 2857 w, h, max_width, max_height); 2858 return -EINVAL; 2859 } 2860 2861 intel_add_fb_offsets(&x, &y, plane_state, 0); 2862 offset = intel_compute_tile_offset(&x, &y, plane_state, 0); 2863 alignment = intel_surf_alignment(fb, 0); 2864 2865 /* 2866 * AUX surface offset is specified as the distance from the 2867 * main surface offset, and it must be non-negative. Make 2868 * sure that is what we will get. 2869 */ 2870 if (offset > aux_offset) 2871 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2872 offset, aux_offset & ~(alignment - 1)); 2873 2874 /* 2875 * When using an X-tiled surface, the plane blows up 2876 * if the x offset + width exceed the stride. 2877 * 2878 * TODO: linear and Y-tiled seem fine, Yf untested, 2879 */ 2880 if (fb->modifier == I915_FORMAT_MOD_X_TILED) { 2881 int cpp = fb->format->cpp[0]; 2882 2883 while ((x + w) * cpp > fb->pitches[0]) { 2884 if (offset == 0) { 2885 DRM_DEBUG_KMS("Unable to find suitable display surface offset\n"); 2886 return -EINVAL; 2887 } 2888 2889 offset = intel_adjust_tile_offset(&x, &y, plane_state, 0, 2890 offset, offset - alignment); 2891 } 2892 } 2893 2894 plane_state->main.offset = offset; 2895 plane_state->main.x = x; 2896 plane_state->main.y = y; 2897 2898 return 0; 2899 } 2900 2901 static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state) 2902 { 2903 const struct drm_framebuffer *fb = plane_state->base.fb; 2904 unsigned int rotation = plane_state->base.rotation; 2905 int max_width = skl_max_plane_width(fb, 1, rotation); 2906 int max_height = 4096; 2907 int x = plane_state->base.src.x1 >> 17; 2908 int y = plane_state->base.src.y1 >> 17; 2909 int w = drm_rect_width(&plane_state->base.src) >> 17; 2910 int h = drm_rect_height(&plane_state->base.src) >> 17; 2911 u32 offset; 2912 2913 intel_add_fb_offsets(&x, &y, plane_state, 1); 2914 offset = intel_compute_tile_offset(&x, &y, plane_state, 1); 2915 2916 /* FIXME not quite sure how/if these apply to the chroma plane */ 2917 if (w > max_width || h > max_height) { 2918 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n", 2919 w, h, max_width, max_height); 2920 return -EINVAL; 2921 } 2922 2923 plane_state->aux.offset = offset; 2924 plane_state->aux.x = x; 2925 plane_state->aux.y = y; 2926 2927 return 0; 2928 } 2929 2930 int skl_check_plane_surface(struct intel_plane_state *plane_state) 2931 { 2932 const struct drm_framebuffer *fb = plane_state->base.fb; 2933 unsigned int rotation = plane_state->base.rotation; 2934 int ret; 2935 2936 if (!plane_state->base.visible) 2937 return 0; 2938 2939 /* Rotate src coordinates to match rotated GTT view */ 2940 if (drm_rotation_90_or_270(rotation)) 2941 drm_rect_rotate(&plane_state->base.src, 2942 fb->width << 16, fb->height << 16, 2943 DRM_ROTATE_270); 2944 2945 /* 2946 * Handle the AUX surface first since 2947 * the main surface setup depends on it. 2948 */ 2949 if (fb->format->format == DRM_FORMAT_NV12) { 2950 ret = skl_check_nv12_aux_surface(plane_state); 2951 if (ret) 2952 return ret; 2953 } else { 2954 plane_state->aux.offset = ~0xfff; 2955 plane_state->aux.x = 0; 2956 plane_state->aux.y = 0; 2957 } 2958 2959 ret = skl_check_main_surface(plane_state); 2960 if (ret) 2961 return ret; 2962 2963 return 0; 2964 } 2965 2966 static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, 2967 const struct intel_plane_state *plane_state) 2968 { 2969 struct drm_i915_private *dev_priv = 2970 to_i915(plane_state->base.plane->dev); 2971 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 2972 const struct drm_framebuffer *fb = plane_state->base.fb; 2973 unsigned int rotation = plane_state->base.rotation; 2974 u32 dspcntr; 2975 2976 dspcntr = DISPLAY_PLANE_ENABLE | DISPPLANE_GAMMA_ENABLE; 2977 2978 if (IS_G4X(dev_priv) || IS_GEN5(dev_priv) || 2979 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 2980 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2981 2982 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 2983 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2984 2985 if (INTEL_GEN(dev_priv) < 4) { 2986 if (crtc->pipe == PIPE_B) 2987 dspcntr |= DISPPLANE_SEL_PIPE_B; 2988 } 2989 2990 switch (fb->format->format) { 2991 case DRM_FORMAT_C8: 2992 dspcntr |= DISPPLANE_8BPP; 2993 break; 2994 case DRM_FORMAT_XRGB1555: 2995 dspcntr |= DISPPLANE_BGRX555; 2996 break; 2997 case DRM_FORMAT_RGB565: 2998 dspcntr |= DISPPLANE_BGRX565; 2999 break; 3000 case DRM_FORMAT_XRGB8888: 3001 dspcntr |= DISPPLANE_BGRX888; 3002 break; 3003 case DRM_FORMAT_XBGR8888: 3004 dspcntr |= DISPPLANE_RGBX888; 3005 break; 3006 case DRM_FORMAT_XRGB2101010: 3007 dspcntr |= DISPPLANE_BGRX101010; 3008 break; 3009 case DRM_FORMAT_XBGR2101010: 3010 dspcntr |= DISPPLANE_RGBX101010; 3011 break; 3012 default: 3013 MISSING_CASE(fb->format->format); 3014 return 0; 3015 } 3016 3017 if (INTEL_GEN(dev_priv) >= 4 && 3018 fb->modifier == I915_FORMAT_MOD_X_TILED) 3019 dspcntr |= DISPPLANE_TILED; 3020 3021 if (rotation & DRM_ROTATE_180) 3022 dspcntr |= DISPPLANE_ROTATE_180; 3023 3024 if (rotation & DRM_REFLECT_X) 3025 dspcntr |= DISPPLANE_MIRROR; 3026 3027 return dspcntr; 3028 } 3029 3030 int i9xx_check_plane_surface(struct intel_plane_state *plane_state) 3031 { 3032 struct drm_i915_private *dev_priv = 3033 to_i915(plane_state->base.plane->dev); 3034 int src_x = plane_state->base.src.x1 >> 16; 3035 int src_y = plane_state->base.src.y1 >> 16; 3036 u32 offset; 3037 3038 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); 3039 3040 if (INTEL_GEN(dev_priv) >= 4) 3041 offset = intel_compute_tile_offset(&src_x, &src_y, 3042 plane_state, 0); 3043 else 3044 offset = 0; 3045 3046 /* HSW/BDW do this automagically in hardware */ 3047 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { 3048 unsigned int rotation = plane_state->base.rotation; 3049 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3050 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3051 3052 if (rotation & DRM_ROTATE_180) { 3053 src_x += src_w - 1; 3054 src_y += src_h - 1; 3055 } else if (rotation & DRM_REFLECT_X) { 3056 src_x += src_w - 1; 3057 } 3058 } 3059 3060 plane_state->main.offset = offset; 3061 plane_state->main.x = src_x; 3062 plane_state->main.y = src_y; 3063 3064 return 0; 3065 } 3066 3067 static void i9xx_update_primary_plane(struct drm_plane *primary, 3068 const struct intel_crtc_state *crtc_state, 3069 const struct intel_plane_state *plane_state) 3070 { 3071 struct drm_i915_private *dev_priv = to_i915(primary->dev); 3072 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3073 struct drm_framebuffer *fb = plane_state->base.fb; 3074 int plane = intel_crtc->plane; 3075 u32 linear_offset; 3076 u32 dspcntr = plane_state->ctl; 3077 i915_reg_t reg = DSPCNTR(plane); 3078 int x = plane_state->main.x; 3079 int y = plane_state->main.y; 3080 unsigned long irqflags; 3081 3082 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); 3083 3084 if (INTEL_GEN(dev_priv) >= 4) 3085 intel_crtc->dspaddr_offset = plane_state->main.offset; 3086 else 3087 intel_crtc->dspaddr_offset = linear_offset; 3088 3089 intel_crtc->adjusted_x = x; 3090 intel_crtc->adjusted_y = y; 3091 3092 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3093 3094 if (INTEL_GEN(dev_priv) < 4) { 3095 /* pipesrc and dspsize control the size that is scaled from, 3096 * which should always be the user's requested size. 3097 */ 3098 I915_WRITE_FW(DSPSIZE(plane), 3099 ((crtc_state->pipe_src_h - 1) << 16) | 3100 (crtc_state->pipe_src_w - 1)); 3101 I915_WRITE_FW(DSPPOS(plane), 0); 3102 } else if (IS_CHERRYVIEW(dev_priv) && plane == PLANE_B) { 3103 I915_WRITE_FW(PRIMSIZE(plane), 3104 ((crtc_state->pipe_src_h - 1) << 16) | 3105 (crtc_state->pipe_src_w - 1)); 3106 I915_WRITE_FW(PRIMPOS(plane), 0); 3107 I915_WRITE_FW(PRIMCNSTALPHA(plane), 0); 3108 } 3109 3110 I915_WRITE_FW(reg, dspcntr); 3111 3112 I915_WRITE_FW(DSPSTRIDE(plane), fb->pitches[0]); 3113 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 3114 I915_WRITE_FW(DSPSURF(plane), 3115 intel_plane_ggtt_offset(plane_state) + 3116 intel_crtc->dspaddr_offset); 3117 I915_WRITE_FW(DSPOFFSET(plane), (y << 16) | x); 3118 } else if (INTEL_GEN(dev_priv) >= 4) { 3119 I915_WRITE_FW(DSPSURF(plane), 3120 intel_plane_ggtt_offset(plane_state) + 3121 intel_crtc->dspaddr_offset); 3122 I915_WRITE_FW(DSPTILEOFF(plane), (y << 16) | x); 3123 I915_WRITE_FW(DSPLINOFF(plane), linear_offset); 3124 } else { 3125 I915_WRITE_FW(DSPADDR(plane), 3126 intel_plane_ggtt_offset(plane_state) + 3127 intel_crtc->dspaddr_offset); 3128 } 3129 POSTING_READ_FW(reg); 3130 3131 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3132 } 3133 3134 static void i9xx_disable_primary_plane(struct drm_plane *primary, 3135 struct drm_crtc *crtc) 3136 { 3137 struct drm_device *dev = crtc->dev; 3138 struct drm_i915_private *dev_priv = to_i915(dev); 3139 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3140 int plane = intel_crtc->plane; 3141 unsigned long irqflags; 3142 3143 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3144 3145 I915_WRITE_FW(DSPCNTR(plane), 0); 3146 if (INTEL_INFO(dev_priv)->gen >= 4) 3147 I915_WRITE_FW(DSPSURF(plane), 0); 3148 else 3149 I915_WRITE_FW(DSPADDR(plane), 0); 3150 POSTING_READ_FW(DSPCNTR(plane)); 3151 3152 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3153 } 3154 3155 static u32 3156 intel_fb_stride_alignment(const struct drm_framebuffer *fb, int plane) 3157 { 3158 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) 3159 return 64; 3160 else 3161 return intel_tile_width_bytes(fb, plane); 3162 } 3163 3164 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 3165 { 3166 struct drm_device *dev = intel_crtc->base.dev; 3167 struct drm_i915_private *dev_priv = to_i915(dev); 3168 3169 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 3170 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 3171 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 3172 } 3173 3174 /* 3175 * This function detaches (aka. unbinds) unused scalers in hardware 3176 */ 3177 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 3178 { 3179 struct intel_crtc_scaler_state *scaler_state; 3180 int i; 3181 3182 scaler_state = &intel_crtc->config->scaler_state; 3183 3184 /* loop through and disable scalers that aren't in use */ 3185 for (i = 0; i < intel_crtc->num_scalers; i++) { 3186 if (!scaler_state->scalers[i].in_use) 3187 skl_detach_scaler(intel_crtc, i); 3188 } 3189 } 3190 3191 u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, 3192 unsigned int rotation) 3193 { 3194 u32 stride; 3195 3196 if (plane >= fb->format->num_planes) 3197 return 0; 3198 3199 stride = intel_fb_pitch(fb, plane, rotation); 3200 3201 /* 3202 * The stride is either expressed as a multiple of 64 bytes chunks for 3203 * linear buffers or in number of tiles for tiled buffers. 3204 */ 3205 if (drm_rotation_90_or_270(rotation)) 3206 stride /= intel_tile_height(fb, plane); 3207 else 3208 stride /= intel_fb_stride_alignment(fb, plane); 3209 3210 return stride; 3211 } 3212 3213 static u32 skl_plane_ctl_format(uint32_t pixel_format) 3214 { 3215 switch (pixel_format) { 3216 case DRM_FORMAT_C8: 3217 return PLANE_CTL_FORMAT_INDEXED; 3218 case DRM_FORMAT_RGB565: 3219 return PLANE_CTL_FORMAT_RGB_565; 3220 case DRM_FORMAT_XBGR8888: 3221 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 3222 case DRM_FORMAT_XRGB8888: 3223 return PLANE_CTL_FORMAT_XRGB_8888; 3224 /* 3225 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3226 * to be already pre-multiplied. We need to add a knob (or a different 3227 * DRM_FORMAT) for user-space to configure that. 3228 */ 3229 case DRM_FORMAT_ABGR8888: 3230 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3231 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3232 case DRM_FORMAT_ARGB8888: 3233 return PLANE_CTL_FORMAT_XRGB_8888 | 3234 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3235 case DRM_FORMAT_XRGB2101010: 3236 return PLANE_CTL_FORMAT_XRGB_2101010; 3237 case DRM_FORMAT_XBGR2101010: 3238 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3239 case DRM_FORMAT_YUYV: 3240 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3241 case DRM_FORMAT_YVYU: 3242 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3243 case DRM_FORMAT_UYVY: 3244 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3245 case DRM_FORMAT_VYUY: 3246 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3247 default: 3248 MISSING_CASE(pixel_format); 3249 } 3250 3251 return 0; 3252 } 3253 3254 static u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3255 { 3256 switch (fb_modifier) { 3257 case DRM_FORMAT_MOD_LINEAR: 3258 break; 3259 case I915_FORMAT_MOD_X_TILED: 3260 return PLANE_CTL_TILED_X; 3261 case I915_FORMAT_MOD_Y_TILED: 3262 return PLANE_CTL_TILED_Y; 3263 case I915_FORMAT_MOD_Yf_TILED: 3264 return PLANE_CTL_TILED_YF; 3265 default: 3266 MISSING_CASE(fb_modifier); 3267 } 3268 3269 return 0; 3270 } 3271 3272 static u32 skl_plane_ctl_rotation(unsigned int rotation) 3273 { 3274 switch (rotation) { 3275 case DRM_ROTATE_0: 3276 break; 3277 /* 3278 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 3279 * while i915 HW rotation is clockwise, thats why this swapping. 3280 */ 3281 case DRM_ROTATE_90: 3282 return PLANE_CTL_ROTATE_270; 3283 case DRM_ROTATE_180: 3284 return PLANE_CTL_ROTATE_180; 3285 case DRM_ROTATE_270: 3286 return PLANE_CTL_ROTATE_90; 3287 default: 3288 MISSING_CASE(rotation); 3289 } 3290 3291 return 0; 3292 } 3293 3294 u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, 3295 const struct intel_plane_state *plane_state) 3296 { 3297 struct drm_i915_private *dev_priv = 3298 to_i915(plane_state->base.plane->dev); 3299 const struct drm_framebuffer *fb = plane_state->base.fb; 3300 unsigned int rotation = plane_state->base.rotation; 3301 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; 3302 u32 plane_ctl; 3303 3304 plane_ctl = PLANE_CTL_ENABLE; 3305 3306 if (!IS_GEMINILAKE(dev_priv)) { 3307 plane_ctl |= 3308 PLANE_CTL_PIPE_GAMMA_ENABLE | 3309 PLANE_CTL_PIPE_CSC_ENABLE | 3310 PLANE_CTL_PLANE_GAMMA_DISABLE; 3311 } 3312 3313 plane_ctl |= skl_plane_ctl_format(fb->format->format); 3314 plane_ctl |= skl_plane_ctl_tiling(fb->modifier); 3315 plane_ctl |= skl_plane_ctl_rotation(rotation); 3316 3317 if (key->flags & I915_SET_COLORKEY_DESTINATION) 3318 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; 3319 else if (key->flags & I915_SET_COLORKEY_SOURCE) 3320 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; 3321 3322 return plane_ctl; 3323 } 3324 3325 static void skylake_update_primary_plane(struct drm_plane *plane, 3326 const struct intel_crtc_state *crtc_state, 3327 const struct intel_plane_state *plane_state) 3328 { 3329 struct drm_device *dev = plane->dev; 3330 struct drm_i915_private *dev_priv = to_i915(dev); 3331 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3332 struct drm_framebuffer *fb = plane_state->base.fb; 3333 enum plane_id plane_id = to_intel_plane(plane)->id; 3334 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 3335 u32 plane_ctl = plane_state->ctl; 3336 unsigned int rotation = plane_state->base.rotation; 3337 u32 stride = skl_plane_stride(fb, 0, rotation); 3338 u32 surf_addr = plane_state->main.offset; 3339 int scaler_id = plane_state->scaler_id; 3340 int src_x = plane_state->main.x; 3341 int src_y = plane_state->main.y; 3342 int src_w = drm_rect_width(&plane_state->base.src) >> 16; 3343 int src_h = drm_rect_height(&plane_state->base.src) >> 16; 3344 int dst_x = plane_state->base.dst.x1; 3345 int dst_y = plane_state->base.dst.y1; 3346 int dst_w = drm_rect_width(&plane_state->base.dst); 3347 int dst_h = drm_rect_height(&plane_state->base.dst); 3348 unsigned long irqflags; 3349 3350 /* Sizes are 0 based */ 3351 src_w--; 3352 src_h--; 3353 dst_w--; 3354 dst_h--; 3355 3356 intel_crtc->dspaddr_offset = surf_addr; 3357 3358 intel_crtc->adjusted_x = src_x; 3359 intel_crtc->adjusted_y = src_y; 3360 3361 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3362 3363 if (IS_GEMINILAKE(dev_priv)) { 3364 I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), 3365 PLANE_COLOR_PIPE_GAMMA_ENABLE | 3366 PLANE_COLOR_PIPE_CSC_ENABLE | 3367 PLANE_COLOR_PLANE_GAMMA_DISABLE); 3368 } 3369 3370 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl); 3371 I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (src_y << 16) | src_x); 3372 I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride); 3373 I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w); 3374 3375 if (scaler_id >= 0) { 3376 uint32_t ps_ctrl = 0; 3377 3378 WARN_ON(!dst_w || !dst_h); 3379 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane_id) | 3380 crtc_state->scaler_state.scalers[scaler_id].mode; 3381 I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3382 I915_WRITE_FW(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3383 I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3384 I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3385 I915_WRITE_FW(PLANE_POS(pipe, plane_id), 0); 3386 } else { 3387 I915_WRITE_FW(PLANE_POS(pipe, plane_id), (dst_y << 16) | dst_x); 3388 } 3389 3390 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 3391 intel_plane_ggtt_offset(plane_state) + surf_addr); 3392 3393 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3394 3395 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3396 } 3397 3398 static void skylake_disable_primary_plane(struct drm_plane *primary, 3399 struct drm_crtc *crtc) 3400 { 3401 struct drm_device *dev = crtc->dev; 3402 struct drm_i915_private *dev_priv = to_i915(dev); 3403 enum plane_id plane_id = to_intel_plane(primary)->id; 3404 enum i915_pipe pipe = to_intel_plane(primary)->pipe; 3405 unsigned long irqflags; 3406 3407 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 3408 3409 I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0); 3410 I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0); 3411 POSTING_READ_FW(PLANE_SURF(pipe, plane_id)); 3412 3413 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 3414 } 3415 3416 static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3417 { 3418 struct intel_crtc *crtc; 3419 3420 for_each_intel_crtc(&dev_priv->drm, crtc) 3421 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3422 } 3423 3424 static void intel_update_primary_planes(struct drm_device *dev) 3425 { 3426 struct drm_crtc *crtc; 3427 3428 for_each_crtc(dev, crtc) { 3429 struct intel_plane *plane = to_intel_plane(crtc->primary); 3430 struct intel_plane_state *plane_state = 3431 to_intel_plane_state(plane->base.state); 3432 3433 if (plane_state->base.visible) { 3434 trace_intel_update_plane(&plane->base, 3435 to_intel_crtc(crtc)); 3436 3437 plane->update_plane(&plane->base, 3438 to_intel_crtc_state(crtc->state), 3439 plane_state); 3440 } 3441 } 3442 } 3443 3444 static int 3445 __intel_display_resume(struct drm_device *dev, 3446 struct drm_atomic_state *state, 3447 struct drm_modeset_acquire_ctx *ctx) 3448 { 3449 struct drm_crtc_state *crtc_state; 3450 struct drm_crtc *crtc; 3451 int i, ret; 3452 3453 intel_modeset_setup_hw_state(dev, ctx); 3454 i915_redisable_vga(to_i915(dev)); 3455 3456 if (!state) 3457 return 0; 3458 3459 /* 3460 * We've duplicated the state, pointers to the old state are invalid. 3461 * 3462 * Don't attempt to use the old state until we commit the duplicated state. 3463 */ 3464 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 3465 /* 3466 * Force recalculation even if we restore 3467 * current state. With fast modeset this may not result 3468 * in a modeset when the state is compatible. 3469 */ 3470 crtc_state->mode_changed = true; 3471 } 3472 3473 /* ignore any reset values/BIOS leftovers in the WM registers */ 3474 if (!HAS_GMCH_DISPLAY(to_i915(dev))) 3475 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3476 3477 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 3478 3479 WARN_ON(ret == -EDEADLK); 3480 return ret; 3481 } 3482 3483 static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) 3484 { 3485 return intel_has_gpu_reset(dev_priv) && 3486 INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv); 3487 } 3488 3489 void intel_prepare_reset(struct drm_i915_private *dev_priv) 3490 { 3491 struct drm_device *dev = &dev_priv->drm; 3492 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3493 struct drm_atomic_state *state; 3494 int ret; 3495 3496 /* 3497 * Need mode_config.mutex so that we don't 3498 * trample ongoing ->detect() and whatnot. 3499 */ 3500 mutex_lock(&dev->mode_config.mutex); 3501 drm_modeset_acquire_init(ctx, 0); 3502 while (1) { 3503 ret = drm_modeset_lock_all_ctx(dev, ctx); 3504 if (ret != -EDEADLK) 3505 break; 3506 3507 drm_modeset_backoff(ctx); 3508 } 3509 3510 /* reset doesn't touch the display, but flips might get nuked anyway, */ 3511 if (!i915.force_reset_modeset_test && 3512 !gpu_reset_clobbers_display(dev_priv)) 3513 return; 3514 3515 /* 3516 * Disabling the crtcs gracefully seems nicer. Also the 3517 * g33 docs say we should at least disable all the planes. 3518 */ 3519 state = drm_atomic_helper_duplicate_state(dev, ctx); 3520 if (IS_ERR(state)) { 3521 ret = PTR_ERR(state); 3522 DRM_ERROR("Duplicating state failed with %i\n", ret); 3523 return; 3524 } 3525 3526 ret = drm_atomic_helper_disable_all(dev, ctx); 3527 if (ret) { 3528 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3529 drm_atomic_state_put(state); 3530 return; 3531 } 3532 3533 dev_priv->modeset_restore_state = state; 3534 state->acquire_ctx = ctx; 3535 } 3536 3537 void intel_finish_reset(struct drm_i915_private *dev_priv) 3538 { 3539 struct drm_device *dev = &dev_priv->drm; 3540 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3541 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3542 int ret; 3543 3544 /* 3545 * Flips in the rings will be nuked by the reset, 3546 * so complete all pending flips so that user space 3547 * will get its events and not get stuck. 3548 */ 3549 intel_complete_page_flips(dev_priv); 3550 3551 dev_priv->modeset_restore_state = NULL; 3552 3553 /* reset doesn't touch the display */ 3554 if (!gpu_reset_clobbers_display(dev_priv)) { 3555 if (!state) { 3556 /* 3557 * Flips in the rings have been nuked by the reset, 3558 * so update the base address of all primary 3559 * planes to the the last fb to make sure we're 3560 * showing the correct fb after a reset. 3561 * 3562 * FIXME: Atomic will make this obsolete since we won't schedule 3563 * CS-based flips (which might get lost in gpu resets) any more. 3564 */ 3565 intel_update_primary_planes(dev); 3566 } else { 3567 ret = __intel_display_resume(dev, state, ctx); 3568 if (ret) 3569 DRM_ERROR("Restoring old state failed with %i\n", ret); 3570 } 3571 } else { 3572 /* 3573 * The display has been reset as well, 3574 * so need a full re-initialization. 3575 */ 3576 intel_runtime_pm_disable_interrupts(dev_priv); 3577 intel_runtime_pm_enable_interrupts(dev_priv); 3578 3579 intel_pps_unlock_regs_wa(dev_priv); 3580 intel_modeset_init_hw(dev); 3581 3582 spin_lock_irq(&dev_priv->irq_lock); 3583 if (dev_priv->display.hpd_irq_setup) 3584 dev_priv->display.hpd_irq_setup(dev_priv); 3585 spin_unlock_irq(&dev_priv->irq_lock); 3586 3587 ret = __intel_display_resume(dev, state, ctx); 3588 if (ret) 3589 DRM_ERROR("Restoring old state failed with %i\n", ret); 3590 3591 intel_hpd_init(dev_priv); 3592 } 3593 3594 if (state) 3595 drm_atomic_state_put(state); 3596 drm_modeset_drop_locks(ctx); 3597 drm_modeset_acquire_fini(ctx); 3598 mutex_unlock(&dev->mode_config.mutex); 3599 } 3600 3601 static bool abort_flip_on_reset(struct intel_crtc *crtc) 3602 { 3603 struct i915_gpu_error *error = &to_i915(crtc->base.dev)->gpu_error; 3604 3605 if (i915_reset_backoff(error)) 3606 return true; 3607 3608 if (crtc->reset_count != i915_reset_count(error)) 3609 return true; 3610 3611 return false; 3612 } 3613 3614 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3615 { 3616 struct drm_device *dev = crtc->dev; 3617 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3618 bool pending; 3619 3620 if (abort_flip_on_reset(intel_crtc)) 3621 return false; 3622 3623 spin_lock_irq(&dev->event_lock); 3624 pending = to_intel_crtc(crtc)->flip_work != NULL; 3625 spin_unlock_irq(&dev->event_lock); 3626 3627 return pending; 3628 } 3629 3630 static void intel_update_pipe_config(struct intel_crtc *crtc, 3631 struct intel_crtc_state *old_crtc_state) 3632 { 3633 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3634 struct intel_crtc_state *pipe_config = 3635 to_intel_crtc_state(crtc->base.state); 3636 3637 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3638 crtc->base.mode = crtc->base.state->mode; 3639 3640 /* 3641 * Update pipe size and adjust fitter if needed: the reason for this is 3642 * that in compute_mode_changes we check the native mode (not the pfit 3643 * mode) to see if we can flip rather than do a full mode set. In the 3644 * fastboot case, we'll flip, but if we don't update the pipesrc and 3645 * pfit state, we'll end up with a big fb scanned out into the wrong 3646 * sized surface. 3647 */ 3648 3649 I915_WRITE(PIPESRC(crtc->pipe), 3650 ((pipe_config->pipe_src_w - 1) << 16) | 3651 (pipe_config->pipe_src_h - 1)); 3652 3653 /* on skylake this is done by detaching scalers */ 3654 if (INTEL_GEN(dev_priv) >= 9) { 3655 skl_detach_scalers(crtc); 3656 3657 if (pipe_config->pch_pfit.enabled) 3658 skylake_pfit_enable(crtc); 3659 } else if (HAS_PCH_SPLIT(dev_priv)) { 3660 if (pipe_config->pch_pfit.enabled) 3661 ironlake_pfit_enable(crtc); 3662 else if (old_crtc_state->pch_pfit.enabled) 3663 ironlake_pfit_disable(crtc, true); 3664 } 3665 } 3666 3667 static void intel_fdi_normal_train(struct intel_crtc *crtc) 3668 { 3669 struct drm_device *dev = crtc->base.dev; 3670 struct drm_i915_private *dev_priv = to_i915(dev); 3671 int pipe = crtc->pipe; 3672 i915_reg_t reg; 3673 u32 temp; 3674 3675 /* enable normal train */ 3676 reg = FDI_TX_CTL(pipe); 3677 temp = I915_READ(reg); 3678 if (IS_IVYBRIDGE(dev_priv)) { 3679 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3680 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3681 } else { 3682 temp &= ~FDI_LINK_TRAIN_NONE; 3683 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3684 } 3685 I915_WRITE(reg, temp); 3686 3687 reg = FDI_RX_CTL(pipe); 3688 temp = I915_READ(reg); 3689 if (HAS_PCH_CPT(dev_priv)) { 3690 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3691 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3692 } else { 3693 temp &= ~FDI_LINK_TRAIN_NONE; 3694 temp |= FDI_LINK_TRAIN_NONE; 3695 } 3696 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3697 3698 /* wait one idle pattern time */ 3699 POSTING_READ(reg); 3700 udelay(1000); 3701 3702 /* IVB wants error correction enabled */ 3703 if (IS_IVYBRIDGE(dev_priv)) 3704 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3705 FDI_FE_ERRC_ENABLE); 3706 } 3707 3708 /* The FDI link training functions for ILK/Ibexpeak. */ 3709 static void ironlake_fdi_link_train(struct intel_crtc *crtc, 3710 const struct intel_crtc_state *crtc_state) 3711 { 3712 struct drm_device *dev = crtc->base.dev; 3713 struct drm_i915_private *dev_priv = to_i915(dev); 3714 int pipe = crtc->pipe; 3715 i915_reg_t reg; 3716 u32 temp, tries; 3717 3718 /* FDI needs bits from pipe first */ 3719 assert_pipe_enabled(dev_priv, pipe); 3720 3721 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3722 for train result */ 3723 reg = FDI_RX_IMR(pipe); 3724 temp = I915_READ(reg); 3725 temp &= ~FDI_RX_SYMBOL_LOCK; 3726 temp &= ~FDI_RX_BIT_LOCK; 3727 I915_WRITE(reg, temp); 3728 I915_READ(reg); 3729 udelay(150); 3730 3731 /* enable CPU FDI TX and PCH FDI RX */ 3732 reg = FDI_TX_CTL(pipe); 3733 temp = I915_READ(reg); 3734 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3735 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3736 temp &= ~FDI_LINK_TRAIN_NONE; 3737 temp |= FDI_LINK_TRAIN_PATTERN_1; 3738 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3739 3740 reg = FDI_RX_CTL(pipe); 3741 temp = I915_READ(reg); 3742 temp &= ~FDI_LINK_TRAIN_NONE; 3743 temp |= FDI_LINK_TRAIN_PATTERN_1; 3744 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3745 3746 POSTING_READ(reg); 3747 udelay(150); 3748 3749 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3750 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3751 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3752 FDI_RX_PHASE_SYNC_POINTER_EN); 3753 3754 reg = FDI_RX_IIR(pipe); 3755 for (tries = 0; tries < 5; tries++) { 3756 temp = I915_READ(reg); 3757 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3758 3759 if ((temp & FDI_RX_BIT_LOCK)) { 3760 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3761 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3762 break; 3763 } 3764 } 3765 if (tries == 5) 3766 DRM_ERROR("FDI train 1 fail!\n"); 3767 3768 /* Train 2 */ 3769 reg = FDI_TX_CTL(pipe); 3770 temp = I915_READ(reg); 3771 temp &= ~FDI_LINK_TRAIN_NONE; 3772 temp |= FDI_LINK_TRAIN_PATTERN_2; 3773 I915_WRITE(reg, temp); 3774 3775 reg = FDI_RX_CTL(pipe); 3776 temp = I915_READ(reg); 3777 temp &= ~FDI_LINK_TRAIN_NONE; 3778 temp |= FDI_LINK_TRAIN_PATTERN_2; 3779 I915_WRITE(reg, temp); 3780 3781 POSTING_READ(reg); 3782 udelay(150); 3783 3784 reg = FDI_RX_IIR(pipe); 3785 for (tries = 0; tries < 5; tries++) { 3786 temp = I915_READ(reg); 3787 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3788 3789 if (temp & FDI_RX_SYMBOL_LOCK) { 3790 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3791 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3792 break; 3793 } 3794 } 3795 if (tries == 5) 3796 DRM_ERROR("FDI train 2 fail!\n"); 3797 3798 DRM_DEBUG_KMS("FDI train done\n"); 3799 3800 } 3801 3802 static const int snb_b_fdi_train_param[] = { 3803 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3804 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3805 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3806 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3807 }; 3808 3809 /* The FDI link training functions for SNB/Cougarpoint. */ 3810 static void gen6_fdi_link_train(struct intel_crtc *crtc, 3811 const struct intel_crtc_state *crtc_state) 3812 { 3813 struct drm_device *dev = crtc->base.dev; 3814 struct drm_i915_private *dev_priv = to_i915(dev); 3815 int pipe = crtc->pipe; 3816 i915_reg_t reg; 3817 u32 temp, i, retry; 3818 3819 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3820 for train result */ 3821 reg = FDI_RX_IMR(pipe); 3822 temp = I915_READ(reg); 3823 temp &= ~FDI_RX_SYMBOL_LOCK; 3824 temp &= ~FDI_RX_BIT_LOCK; 3825 I915_WRITE(reg, temp); 3826 3827 POSTING_READ(reg); 3828 udelay(150); 3829 3830 /* enable CPU FDI TX and PCH FDI RX */ 3831 reg = FDI_TX_CTL(pipe); 3832 temp = I915_READ(reg); 3833 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3834 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3835 temp &= ~FDI_LINK_TRAIN_NONE; 3836 temp |= FDI_LINK_TRAIN_PATTERN_1; 3837 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3838 /* SNB-B */ 3839 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3840 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3841 3842 I915_WRITE(FDI_RX_MISC(pipe), 3843 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3844 3845 reg = FDI_RX_CTL(pipe); 3846 temp = I915_READ(reg); 3847 if (HAS_PCH_CPT(dev_priv)) { 3848 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3849 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3850 } else { 3851 temp &= ~FDI_LINK_TRAIN_NONE; 3852 temp |= FDI_LINK_TRAIN_PATTERN_1; 3853 } 3854 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3855 3856 POSTING_READ(reg); 3857 udelay(150); 3858 3859 for (i = 0; i < 4; i++) { 3860 reg = FDI_TX_CTL(pipe); 3861 temp = I915_READ(reg); 3862 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3863 temp |= snb_b_fdi_train_param[i]; 3864 I915_WRITE(reg, temp); 3865 3866 POSTING_READ(reg); 3867 udelay(500); 3868 3869 for (retry = 0; retry < 5; retry++) { 3870 reg = FDI_RX_IIR(pipe); 3871 temp = I915_READ(reg); 3872 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3873 if (temp & FDI_RX_BIT_LOCK) { 3874 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3875 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3876 break; 3877 } 3878 udelay(50); 3879 } 3880 if (retry < 5) 3881 break; 3882 } 3883 if (i == 4) 3884 DRM_ERROR("FDI train 1 fail!\n"); 3885 3886 /* Train 2 */ 3887 reg = FDI_TX_CTL(pipe); 3888 temp = I915_READ(reg); 3889 temp &= ~FDI_LINK_TRAIN_NONE; 3890 temp |= FDI_LINK_TRAIN_PATTERN_2; 3891 if (IS_GEN6(dev_priv)) { 3892 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3893 /* SNB-B */ 3894 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3895 } 3896 I915_WRITE(reg, temp); 3897 3898 reg = FDI_RX_CTL(pipe); 3899 temp = I915_READ(reg); 3900 if (HAS_PCH_CPT(dev_priv)) { 3901 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3902 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3903 } else { 3904 temp &= ~FDI_LINK_TRAIN_NONE; 3905 temp |= FDI_LINK_TRAIN_PATTERN_2; 3906 } 3907 I915_WRITE(reg, temp); 3908 3909 POSTING_READ(reg); 3910 udelay(150); 3911 3912 for (i = 0; i < 4; i++) { 3913 reg = FDI_TX_CTL(pipe); 3914 temp = I915_READ(reg); 3915 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3916 temp |= snb_b_fdi_train_param[i]; 3917 I915_WRITE(reg, temp); 3918 3919 POSTING_READ(reg); 3920 udelay(500); 3921 3922 for (retry = 0; retry < 5; retry++) { 3923 reg = FDI_RX_IIR(pipe); 3924 temp = I915_READ(reg); 3925 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3926 if (temp & FDI_RX_SYMBOL_LOCK) { 3927 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3928 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3929 break; 3930 } 3931 udelay(50); 3932 } 3933 if (retry < 5) 3934 break; 3935 } 3936 if (i == 4) 3937 DRM_ERROR("FDI train 2 fail!\n"); 3938 3939 DRM_DEBUG_KMS("FDI train done.\n"); 3940 } 3941 3942 /* Manual link training for Ivy Bridge A0 parts */ 3943 static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, 3944 const struct intel_crtc_state *crtc_state) 3945 { 3946 struct drm_device *dev = crtc->base.dev; 3947 struct drm_i915_private *dev_priv = to_i915(dev); 3948 int pipe = crtc->pipe; 3949 i915_reg_t reg; 3950 u32 temp, i, j; 3951 3952 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3953 for train result */ 3954 reg = FDI_RX_IMR(pipe); 3955 temp = I915_READ(reg); 3956 temp &= ~FDI_RX_SYMBOL_LOCK; 3957 temp &= ~FDI_RX_BIT_LOCK; 3958 I915_WRITE(reg, temp); 3959 3960 POSTING_READ(reg); 3961 udelay(150); 3962 3963 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3964 I915_READ(FDI_RX_IIR(pipe))); 3965 3966 /* Try each vswing and preemphasis setting twice before moving on */ 3967 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3968 /* disable first in case we need to retry */ 3969 reg = FDI_TX_CTL(pipe); 3970 temp = I915_READ(reg); 3971 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3972 temp &= ~FDI_TX_ENABLE; 3973 I915_WRITE(reg, temp); 3974 3975 reg = FDI_RX_CTL(pipe); 3976 temp = I915_READ(reg); 3977 temp &= ~FDI_LINK_TRAIN_AUTO; 3978 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3979 temp &= ~FDI_RX_ENABLE; 3980 I915_WRITE(reg, temp); 3981 3982 /* enable CPU FDI TX and PCH FDI RX */ 3983 reg = FDI_TX_CTL(pipe); 3984 temp = I915_READ(reg); 3985 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3986 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes); 3987 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3988 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3989 temp |= snb_b_fdi_train_param[j/2]; 3990 temp |= FDI_COMPOSITE_SYNC; 3991 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3992 3993 I915_WRITE(FDI_RX_MISC(pipe), 3994 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3995 3996 reg = FDI_RX_CTL(pipe); 3997 temp = I915_READ(reg); 3998 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3999 temp |= FDI_COMPOSITE_SYNC; 4000 I915_WRITE(reg, temp | FDI_RX_ENABLE); 4001 4002 POSTING_READ(reg); 4003 udelay(1); /* should be 0.5us */ 4004 4005 for (i = 0; i < 4; i++) { 4006 reg = FDI_RX_IIR(pipe); 4007 temp = I915_READ(reg); 4008 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4009 4010 if (temp & FDI_RX_BIT_LOCK || 4011 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 4012 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 4013 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 4014 i); 4015 break; 4016 } 4017 udelay(1); /* should be 0.5us */ 4018 } 4019 if (i == 4) { 4020 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 4021 continue; 4022 } 4023 4024 /* Train 2 */ 4025 reg = FDI_TX_CTL(pipe); 4026 temp = I915_READ(reg); 4027 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 4028 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 4029 I915_WRITE(reg, temp); 4030 4031 reg = FDI_RX_CTL(pipe); 4032 temp = I915_READ(reg); 4033 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4034 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 4035 I915_WRITE(reg, temp); 4036 4037 POSTING_READ(reg); 4038 udelay(2); /* should be 1.5us */ 4039 4040 for (i = 0; i < 4; i++) { 4041 reg = FDI_RX_IIR(pipe); 4042 temp = I915_READ(reg); 4043 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 4044 4045 if (temp & FDI_RX_SYMBOL_LOCK || 4046 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 4047 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 4048 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 4049 i); 4050 goto train_done; 4051 } 4052 udelay(2); /* should be 1.5us */ 4053 } 4054 if (i == 4) 4055 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 4056 } 4057 4058 train_done: 4059 DRM_DEBUG_KMS("FDI train done.\n"); 4060 } 4061 4062 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 4063 { 4064 struct drm_device *dev = intel_crtc->base.dev; 4065 struct drm_i915_private *dev_priv = to_i915(dev); 4066 int pipe = intel_crtc->pipe; 4067 i915_reg_t reg; 4068 u32 temp; 4069 4070 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 4071 reg = FDI_RX_CTL(pipe); 4072 temp = I915_READ(reg); 4073 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 4074 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 4075 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4076 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 4077 4078 POSTING_READ(reg); 4079 udelay(200); 4080 4081 /* Switch from Rawclk to PCDclk */ 4082 temp = I915_READ(reg); 4083 I915_WRITE(reg, temp | FDI_PCDCLK); 4084 4085 POSTING_READ(reg); 4086 udelay(200); 4087 4088 /* Enable CPU FDI TX PLL, always on for Ironlake */ 4089 reg = FDI_TX_CTL(pipe); 4090 temp = I915_READ(reg); 4091 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 4092 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 4093 4094 POSTING_READ(reg); 4095 udelay(100); 4096 } 4097 } 4098 4099 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 4100 { 4101 struct drm_device *dev = intel_crtc->base.dev; 4102 struct drm_i915_private *dev_priv = to_i915(dev); 4103 int pipe = intel_crtc->pipe; 4104 i915_reg_t reg; 4105 u32 temp; 4106 4107 /* Switch from PCDclk to Rawclk */ 4108 reg = FDI_RX_CTL(pipe); 4109 temp = I915_READ(reg); 4110 I915_WRITE(reg, temp & ~FDI_PCDCLK); 4111 4112 /* Disable CPU FDI TX PLL */ 4113 reg = FDI_TX_CTL(pipe); 4114 temp = I915_READ(reg); 4115 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 4116 4117 POSTING_READ(reg); 4118 udelay(100); 4119 4120 reg = FDI_RX_CTL(pipe); 4121 temp = I915_READ(reg); 4122 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 4123 4124 /* Wait for the clocks to turn off. */ 4125 POSTING_READ(reg); 4126 udelay(100); 4127 } 4128 4129 static void ironlake_fdi_disable(struct drm_crtc *crtc) 4130 { 4131 struct drm_device *dev = crtc->dev; 4132 struct drm_i915_private *dev_priv = to_i915(dev); 4133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4134 int pipe = intel_crtc->pipe; 4135 i915_reg_t reg; 4136 u32 temp; 4137 4138 /* disable CPU FDI tx and PCH FDI rx */ 4139 reg = FDI_TX_CTL(pipe); 4140 temp = I915_READ(reg); 4141 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 4142 POSTING_READ(reg); 4143 4144 reg = FDI_RX_CTL(pipe); 4145 temp = I915_READ(reg); 4146 temp &= ~(0x7 << 16); 4147 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4148 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 4149 4150 POSTING_READ(reg); 4151 udelay(100); 4152 4153 /* Ironlake workaround, disable clock pointer after downing FDI */ 4154 if (HAS_PCH_IBX(dev_priv)) 4155 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 4156 4157 /* still set train pattern 1 */ 4158 reg = FDI_TX_CTL(pipe); 4159 temp = I915_READ(reg); 4160 temp &= ~FDI_LINK_TRAIN_NONE; 4161 temp |= FDI_LINK_TRAIN_PATTERN_1; 4162 I915_WRITE(reg, temp); 4163 4164 reg = FDI_RX_CTL(pipe); 4165 temp = I915_READ(reg); 4166 if (HAS_PCH_CPT(dev_priv)) { 4167 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 4168 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 4169 } else { 4170 temp &= ~FDI_LINK_TRAIN_NONE; 4171 temp |= FDI_LINK_TRAIN_PATTERN_1; 4172 } 4173 /* BPC in FDI rx is consistent with that in PIPECONF */ 4174 temp &= ~(0x07 << 16); 4175 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 4176 I915_WRITE(reg, temp); 4177 4178 POSTING_READ(reg); 4179 udelay(100); 4180 } 4181 4182 bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) 4183 { 4184 struct intel_crtc *crtc; 4185 4186 /* Note that we don't need to be called with mode_config.lock here 4187 * as our list of CRTC objects is static for the lifetime of the 4188 * device and so cannot disappear as we iterate. Similarly, we can 4189 * happily treat the predicates as racy, atomic checks as userspace 4190 * cannot claim and pin a new fb without at least acquring the 4191 * struct_mutex and so serialising with us. 4192 */ 4193 for_each_intel_crtc(&dev_priv->drm, crtc) { 4194 if (atomic_read(&crtc->unpin_work_count) == 0) 4195 continue; 4196 4197 if (crtc->flip_work) 4198 intel_wait_for_vblank(dev_priv, crtc->pipe); 4199 4200 return true; 4201 } 4202 4203 return false; 4204 } 4205 4206 static void page_flip_completed(struct intel_crtc *intel_crtc) 4207 { 4208 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 4209 struct intel_flip_work *work = intel_crtc->flip_work; 4210 4211 intel_crtc->flip_work = NULL; 4212 4213 if (work->event) 4214 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 4215 4216 drm_crtc_vblank_put(&intel_crtc->base); 4217 4218 wake_up_all(&dev_priv->pending_flip_queue); 4219 trace_i915_flip_complete(intel_crtc->plane, 4220 work->pending_flip_obj); 4221 4222 queue_work(dev_priv->wq, &work->unpin_work); 4223 } 4224 4225 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 4226 { 4227 struct drm_device *dev = crtc->dev; 4228 struct drm_i915_private *dev_priv = to_i915(dev); 4229 long ret; 4230 4231 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 4232 4233 ret = wait_event_interruptible_timeout( 4234 dev_priv->pending_flip_queue, 4235 !intel_crtc_has_pending_flip(crtc), 4236 60*HZ); 4237 4238 if (ret < 0) 4239 return ret; 4240 4241 if (ret == 0) { 4242 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4243 struct intel_flip_work *work; 4244 4245 spin_lock_irq(&dev->event_lock); 4246 work = intel_crtc->flip_work; 4247 if (work && !is_mmio_work(work)) { 4248 WARN_ONCE(1, "Removing stuck page flip\n"); 4249 page_flip_completed(intel_crtc); 4250 } 4251 spin_unlock_irq(&dev->event_lock); 4252 } 4253 4254 return 0; 4255 } 4256 4257 void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 4258 { 4259 u32 temp; 4260 4261 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 4262 4263 mutex_lock(&dev_priv->sb_lock); 4264 4265 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4266 temp |= SBI_SSCCTL_DISABLE; 4267 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4268 4269 mutex_unlock(&dev_priv->sb_lock); 4270 } 4271 4272 /* Program iCLKIP clock to the desired frequency */ 4273 static void lpt_program_iclkip(struct intel_crtc *crtc) 4274 { 4275 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4276 int clock = crtc->config->base.adjusted_mode.crtc_clock; 4277 u32 divsel, phaseinc, auxdiv, phasedir = 0; 4278 u32 temp; 4279 4280 lpt_disable_iclkip(dev_priv); 4281 4282 /* The iCLK virtual clock root frequency is in MHz, 4283 * but the adjusted_mode->crtc_clock in in KHz. To get the 4284 * divisors, it is necessary to divide one by another, so we 4285 * convert the virtual clock precision to KHz here for higher 4286 * precision. 4287 */ 4288 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 4289 u32 iclk_virtual_root_freq = 172800 * 1000; 4290 u32 iclk_pi_range = 64; 4291 u32 desired_divisor; 4292 4293 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4294 clock << auxdiv); 4295 divsel = (desired_divisor / iclk_pi_range) - 2; 4296 phaseinc = desired_divisor % iclk_pi_range; 4297 4298 /* 4299 * Near 20MHz is a corner case which is 4300 * out of range for the 7-bit divisor 4301 */ 4302 if (divsel <= 0x7f) 4303 break; 4304 } 4305 4306 /* This should not happen with any sane values */ 4307 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 4308 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 4309 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 4310 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 4311 4312 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 4313 clock, 4314 auxdiv, 4315 divsel, 4316 phasedir, 4317 phaseinc); 4318 4319 mutex_lock(&dev_priv->sb_lock); 4320 4321 /* Program SSCDIVINTPHASE6 */ 4322 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4323 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4324 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4325 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4326 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4327 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4328 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4329 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4330 4331 /* Program SSCAUXDIV */ 4332 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4333 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4334 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4335 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4336 4337 /* Enable modulator and associated divider */ 4338 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4339 temp &= ~SBI_SSCCTL_DISABLE; 4340 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4341 4342 mutex_unlock(&dev_priv->sb_lock); 4343 4344 /* Wait for initialization time */ 4345 udelay(24); 4346 4347 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4348 } 4349 4350 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4351 { 4352 u32 divsel, phaseinc, auxdiv; 4353 u32 iclk_virtual_root_freq = 172800 * 1000; 4354 u32 iclk_pi_range = 64; 4355 u32 desired_divisor; 4356 u32 temp; 4357 4358 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4359 return 0; 4360 4361 mutex_lock(&dev_priv->sb_lock); 4362 4363 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4364 if (temp & SBI_SSCCTL_DISABLE) { 4365 mutex_unlock(&dev_priv->sb_lock); 4366 return 0; 4367 } 4368 4369 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4370 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4371 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4372 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4373 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4374 4375 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4376 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4377 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4378 4379 mutex_unlock(&dev_priv->sb_lock); 4380 4381 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4382 4383 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4384 desired_divisor << auxdiv); 4385 } 4386 4387 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4388 enum i915_pipe pch_transcoder) 4389 { 4390 struct drm_device *dev = crtc->base.dev; 4391 struct drm_i915_private *dev_priv = to_i915(dev); 4392 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4393 4394 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4395 I915_READ(HTOTAL(cpu_transcoder))); 4396 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4397 I915_READ(HBLANK(cpu_transcoder))); 4398 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4399 I915_READ(HSYNC(cpu_transcoder))); 4400 4401 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4402 I915_READ(VTOTAL(cpu_transcoder))); 4403 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4404 I915_READ(VBLANK(cpu_transcoder))); 4405 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4406 I915_READ(VSYNC(cpu_transcoder))); 4407 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4408 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4409 } 4410 4411 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4412 { 4413 struct drm_i915_private *dev_priv = to_i915(dev); 4414 uint32_t temp; 4415 4416 temp = I915_READ(SOUTH_CHICKEN1); 4417 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4418 return; 4419 4420 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4421 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4422 4423 temp &= ~FDI_BC_BIFURCATION_SELECT; 4424 if (enable) 4425 temp |= FDI_BC_BIFURCATION_SELECT; 4426 4427 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4428 I915_WRITE(SOUTH_CHICKEN1, temp); 4429 POSTING_READ(SOUTH_CHICKEN1); 4430 } 4431 4432 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4433 { 4434 struct drm_device *dev = intel_crtc->base.dev; 4435 4436 switch (intel_crtc->pipe) { 4437 case PIPE_A: 4438 break; 4439 case PIPE_B: 4440 if (intel_crtc->config->fdi_lanes > 2) 4441 cpt_set_fdi_bc_bifurcation(dev, false); 4442 else 4443 cpt_set_fdi_bc_bifurcation(dev, true); 4444 4445 break; 4446 case PIPE_C: 4447 cpt_set_fdi_bc_bifurcation(dev, true); 4448 4449 break; 4450 default: 4451 BUG(); 4452 } 4453 } 4454 4455 /* Return which DP Port should be selected for Transcoder DP control */ 4456 static enum port 4457 intel_trans_dp_port_sel(struct intel_crtc *crtc) 4458 { 4459 struct drm_device *dev = crtc->base.dev; 4460 struct intel_encoder *encoder; 4461 4462 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 4463 if (encoder->type == INTEL_OUTPUT_DP || 4464 encoder->type == INTEL_OUTPUT_EDP) 4465 return enc_to_dig_port(&encoder->base)->port; 4466 } 4467 4468 return -1; 4469 } 4470 4471 /* 4472 * Enable PCH resources required for PCH ports: 4473 * - PCH PLLs 4474 * - FDI training & RX/TX 4475 * - update transcoder timings 4476 * - DP transcoding bits 4477 * - transcoder 4478 */ 4479 static void ironlake_pch_enable(const struct intel_crtc_state *crtc_state) 4480 { 4481 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4482 struct drm_device *dev = crtc->base.dev; 4483 struct drm_i915_private *dev_priv = to_i915(dev); 4484 int pipe = crtc->pipe; 4485 u32 temp; 4486 4487 assert_pch_transcoder_disabled(dev_priv, pipe); 4488 4489 if (IS_IVYBRIDGE(dev_priv)) 4490 ivybridge_update_fdi_bc_bifurcation(crtc); 4491 4492 /* Write the TU size bits before fdi link training, so that error 4493 * detection works. */ 4494 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4495 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4496 4497 /* For PCH output, training FDI link */ 4498 dev_priv->display.fdi_link_train(crtc, crtc_state); 4499 4500 /* We need to program the right clock selection before writing the pixel 4501 * mutliplier into the DPLL. */ 4502 if (HAS_PCH_CPT(dev_priv)) { 4503 u32 sel; 4504 4505 temp = I915_READ(PCH_DPLL_SEL); 4506 temp |= TRANS_DPLL_ENABLE(pipe); 4507 sel = TRANS_DPLLB_SEL(pipe); 4508 if (crtc_state->shared_dpll == 4509 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4510 temp |= sel; 4511 else 4512 temp &= ~sel; 4513 I915_WRITE(PCH_DPLL_SEL, temp); 4514 } 4515 4516 /* XXX: pch pll's can be enabled any time before we enable the PCH 4517 * transcoder, and we actually should do this to not upset any PCH 4518 * transcoder that already use the clock when we share it. 4519 * 4520 * Note that enable_shared_dpll tries to do the right thing, but 4521 * get_shared_dpll unconditionally resets the pll - we need that to have 4522 * the right LVDS enable sequence. */ 4523 intel_enable_shared_dpll(crtc); 4524 4525 /* set transcoder timing, panel must allow it */ 4526 assert_panel_unlocked(dev_priv, pipe); 4527 ironlake_pch_transcoder_set_timings(crtc, pipe); 4528 4529 intel_fdi_normal_train(crtc); 4530 4531 /* For PCH DP, enable TRANS_DP_CTL */ 4532 if (HAS_PCH_CPT(dev_priv) && 4533 intel_crtc_has_dp_encoder(crtc_state)) { 4534 const struct drm_display_mode *adjusted_mode = 4535 &crtc_state->base.adjusted_mode; 4536 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4537 i915_reg_t reg = TRANS_DP_CTL(pipe); 4538 temp = I915_READ(reg); 4539 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4540 TRANS_DP_SYNC_MASK | 4541 TRANS_DP_BPC_MASK); 4542 temp |= TRANS_DP_OUTPUT_ENABLE; 4543 temp |= bpc << 9; /* same format but at 11:9 */ 4544 4545 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4546 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4547 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4548 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4549 4550 switch (intel_trans_dp_port_sel(crtc)) { 4551 case PORT_B: 4552 temp |= TRANS_DP_PORT_SEL_B; 4553 break; 4554 case PORT_C: 4555 temp |= TRANS_DP_PORT_SEL_C; 4556 break; 4557 case PORT_D: 4558 temp |= TRANS_DP_PORT_SEL_D; 4559 break; 4560 default: 4561 BUG(); 4562 } 4563 4564 I915_WRITE(reg, temp); 4565 } 4566 4567 ironlake_enable_pch_transcoder(dev_priv, pipe); 4568 } 4569 4570 static void lpt_pch_enable(const struct intel_crtc_state *crtc_state) 4571 { 4572 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 4573 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 4574 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; 4575 4576 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4577 4578 lpt_program_iclkip(crtc); 4579 4580 /* Set transcoder timing. */ 4581 ironlake_pch_transcoder_set_timings(crtc, PIPE_A); 4582 4583 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4584 } 4585 4586 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4587 { 4588 struct drm_i915_private *dev_priv = to_i915(dev); 4589 i915_reg_t dslreg = PIPEDSL(pipe); 4590 u32 temp; 4591 4592 temp = I915_READ(dslreg); 4593 udelay(500); 4594 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4595 if (wait_for(I915_READ(dslreg) != temp, 5)) 4596 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4597 } 4598 } 4599 4600 static int 4601 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4602 unsigned int scaler_user, int *scaler_id, 4603 int src_w, int src_h, int dst_w, int dst_h) 4604 { 4605 struct intel_crtc_scaler_state *scaler_state = 4606 &crtc_state->scaler_state; 4607 struct intel_crtc *intel_crtc = 4608 to_intel_crtc(crtc_state->base.crtc); 4609 int need_scaling; 4610 4611 /* 4612 * Src coordinates are already rotated by 270 degrees for 4613 * the 90/270 degree plane rotation cases (to match the 4614 * GTT mapping), hence no need to account for rotation here. 4615 */ 4616 need_scaling = src_w != dst_w || src_h != dst_h; 4617 4618 /* 4619 * if plane is being disabled or scaler is no more required or force detach 4620 * - free scaler binded to this plane/crtc 4621 * - in order to do this, update crtc->scaler_usage 4622 * 4623 * Here scaler state in crtc_state is set free so that 4624 * scaler can be assigned to other user. Actual register 4625 * update to free the scaler is done in plane/panel-fit programming. 4626 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4627 */ 4628 if (force_detach || !need_scaling) { 4629 if (*scaler_id >= 0) { 4630 scaler_state->scaler_users &= ~(1 << scaler_user); 4631 scaler_state->scalers[*scaler_id].in_use = 0; 4632 4633 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4634 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4635 intel_crtc->pipe, scaler_user, *scaler_id, 4636 scaler_state->scaler_users); 4637 *scaler_id = -1; 4638 } 4639 return 0; 4640 } 4641 4642 /* range checks */ 4643 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4644 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4645 4646 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4647 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4648 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4649 "size is out of scaler range\n", 4650 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4651 return -EINVAL; 4652 } 4653 4654 /* mark this plane as a scaler user in crtc_state */ 4655 scaler_state->scaler_users |= (1 << scaler_user); 4656 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4657 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4658 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4659 scaler_state->scaler_users); 4660 4661 return 0; 4662 } 4663 4664 /** 4665 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4666 * 4667 * @state: crtc's scaler state 4668 * 4669 * Return 4670 * 0 - scaler_usage updated successfully 4671 * error - requested scaling cannot be supported or other error condition 4672 */ 4673 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4674 { 4675 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4676 4677 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4678 &state->scaler_state.scaler_id, 4679 state->pipe_src_w, state->pipe_src_h, 4680 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4681 } 4682 4683 /** 4684 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4685 * 4686 * @state: crtc's scaler state 4687 * @plane_state: atomic plane state to update 4688 * 4689 * Return 4690 * 0 - scaler_usage updated successfully 4691 * error - requested scaling cannot be supported or other error condition 4692 */ 4693 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4694 struct intel_plane_state *plane_state) 4695 { 4696 4697 struct intel_plane *intel_plane = 4698 to_intel_plane(plane_state->base.plane); 4699 struct drm_framebuffer *fb = plane_state->base.fb; 4700 int ret; 4701 4702 bool force_detach = !fb || !plane_state->base.visible; 4703 4704 ret = skl_update_scaler(crtc_state, force_detach, 4705 drm_plane_index(&intel_plane->base), 4706 &plane_state->scaler_id, 4707 drm_rect_width(&plane_state->base.src) >> 16, 4708 drm_rect_height(&plane_state->base.src) >> 16, 4709 drm_rect_width(&plane_state->base.dst), 4710 drm_rect_height(&plane_state->base.dst)); 4711 4712 if (ret || plane_state->scaler_id < 0) 4713 return ret; 4714 4715 /* check colorkey */ 4716 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4717 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4718 intel_plane->base.base.id, 4719 intel_plane->base.name); 4720 return -EINVAL; 4721 } 4722 4723 /* Check src format */ 4724 switch (fb->format->format) { 4725 case DRM_FORMAT_RGB565: 4726 case DRM_FORMAT_XBGR8888: 4727 case DRM_FORMAT_XRGB8888: 4728 case DRM_FORMAT_ABGR8888: 4729 case DRM_FORMAT_ARGB8888: 4730 case DRM_FORMAT_XRGB2101010: 4731 case DRM_FORMAT_XBGR2101010: 4732 case DRM_FORMAT_YUYV: 4733 case DRM_FORMAT_YVYU: 4734 case DRM_FORMAT_UYVY: 4735 case DRM_FORMAT_VYUY: 4736 break; 4737 default: 4738 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4739 intel_plane->base.base.id, intel_plane->base.name, 4740 fb->base.id, fb->format->format); 4741 return -EINVAL; 4742 } 4743 4744 return 0; 4745 } 4746 4747 static void skylake_scaler_disable(struct intel_crtc *crtc) 4748 { 4749 int i; 4750 4751 for (i = 0; i < crtc->num_scalers; i++) 4752 skl_detach_scaler(crtc, i); 4753 } 4754 4755 static void skylake_pfit_enable(struct intel_crtc *crtc) 4756 { 4757 struct drm_device *dev = crtc->base.dev; 4758 struct drm_i915_private *dev_priv = to_i915(dev); 4759 int pipe = crtc->pipe; 4760 struct intel_crtc_scaler_state *scaler_state = 4761 &crtc->config->scaler_state; 4762 4763 if (crtc->config->pch_pfit.enabled) { 4764 int id; 4765 4766 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) 4767 return; 4768 4769 id = scaler_state->scaler_id; 4770 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4771 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4772 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4773 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4774 } 4775 } 4776 4777 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4778 { 4779 struct drm_device *dev = crtc->base.dev; 4780 struct drm_i915_private *dev_priv = to_i915(dev); 4781 int pipe = crtc->pipe; 4782 4783 if (crtc->config->pch_pfit.enabled) { 4784 /* Force use of hard-coded filter coefficients 4785 * as some pre-programmed values are broken, 4786 * e.g. x201. 4787 */ 4788 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) 4789 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4790 PF_PIPE_SEL_IVB(pipe)); 4791 else 4792 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4793 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4794 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4795 } 4796 } 4797 4798 void hsw_enable_ips(struct intel_crtc *crtc) 4799 { 4800 struct drm_device *dev = crtc->base.dev; 4801 struct drm_i915_private *dev_priv = to_i915(dev); 4802 4803 if (!crtc->config->ips_enabled) 4804 return; 4805 4806 /* 4807 * We can only enable IPS after we enable a plane and wait for a vblank 4808 * This function is called from post_plane_update, which is run after 4809 * a vblank wait. 4810 */ 4811 4812 assert_plane_enabled(dev_priv, crtc->plane); 4813 if (IS_BROADWELL(dev_priv)) { 4814 mutex_lock(&dev_priv->rps.hw_lock); 4815 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4816 mutex_unlock(&dev_priv->rps.hw_lock); 4817 /* Quoting Art Runyan: "its not safe to expect any particular 4818 * value in IPS_CTL bit 31 after enabling IPS through the 4819 * mailbox." Moreover, the mailbox may return a bogus state, 4820 * so we need to just enable it and continue on. 4821 */ 4822 } else { 4823 I915_WRITE(IPS_CTL, IPS_ENABLE); 4824 /* The bit only becomes 1 in the next vblank, so this wait here 4825 * is essentially intel_wait_for_vblank. If we don't have this 4826 * and don't wait for vblanks until the end of crtc_enable, then 4827 * the HW state readout code will complain that the expected 4828 * IPS_CTL value is not the one we read. */ 4829 if (intel_wait_for_register(dev_priv, 4830 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4831 50)) 4832 DRM_ERROR("Timed out waiting for IPS enable\n"); 4833 } 4834 } 4835 4836 void hsw_disable_ips(struct intel_crtc *crtc) 4837 { 4838 struct drm_device *dev = crtc->base.dev; 4839 struct drm_i915_private *dev_priv = to_i915(dev); 4840 4841 if (!crtc->config->ips_enabled) 4842 return; 4843 4844 assert_plane_enabled(dev_priv, crtc->plane); 4845 if (IS_BROADWELL(dev_priv)) { 4846 mutex_lock(&dev_priv->rps.hw_lock); 4847 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4848 mutex_unlock(&dev_priv->rps.hw_lock); 4849 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4850 if (intel_wait_for_register(dev_priv, 4851 IPS_CTL, IPS_ENABLE, 0, 4852 42)) 4853 DRM_ERROR("Timed out waiting for IPS disable\n"); 4854 } else { 4855 I915_WRITE(IPS_CTL, 0); 4856 POSTING_READ(IPS_CTL); 4857 } 4858 4859 /* We need to wait for a vblank before we can disable the plane. */ 4860 intel_wait_for_vblank(dev_priv, crtc->pipe); 4861 } 4862 4863 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4864 { 4865 if (intel_crtc->overlay) { 4866 struct drm_device *dev = intel_crtc->base.dev; 4867 struct drm_i915_private *dev_priv = to_i915(dev); 4868 4869 mutex_lock(&dev->struct_mutex); 4870 dev_priv->mm.interruptible = false; 4871 (void) intel_overlay_switch_off(intel_crtc->overlay); 4872 dev_priv->mm.interruptible = true; 4873 mutex_unlock(&dev->struct_mutex); 4874 } 4875 4876 /* Let userspace switch the overlay on again. In most cases userspace 4877 * has to recompute where to put it anyway. 4878 */ 4879 } 4880 4881 /** 4882 * intel_post_enable_primary - Perform operations after enabling primary plane 4883 * @crtc: the CRTC whose primary plane was just enabled 4884 * 4885 * Performs potentially sleeping operations that must be done after the primary 4886 * plane is enabled, such as updating FBC and IPS. Note that this may be 4887 * called due to an explicit primary plane update, or due to an implicit 4888 * re-enable that is caused when a sprite plane is updated to no longer 4889 * completely hide the primary plane. 4890 */ 4891 static void 4892 intel_post_enable_primary(struct drm_crtc *crtc) 4893 { 4894 struct drm_device *dev = crtc->dev; 4895 struct drm_i915_private *dev_priv = to_i915(dev); 4896 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4897 int pipe = intel_crtc->pipe; 4898 4899 /* 4900 * FIXME IPS should be fine as long as one plane is 4901 * enabled, but in practice it seems to have problems 4902 * when going from primary only to sprite only and vice 4903 * versa. 4904 */ 4905 hsw_enable_ips(intel_crtc); 4906 4907 /* 4908 * Gen2 reports pipe underruns whenever all planes are disabled. 4909 * So don't enable underrun reporting before at least some planes 4910 * are enabled. 4911 * FIXME: Need to fix the logic to work when we turn off all planes 4912 * but leave the pipe running. 4913 */ 4914 if (IS_GEN2(dev_priv)) 4915 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4916 4917 /* Underruns don't always raise interrupts, so check manually. */ 4918 intel_check_cpu_fifo_underruns(dev_priv); 4919 intel_check_pch_fifo_underruns(dev_priv); 4920 } 4921 4922 /* FIXME move all this to pre_plane_update() with proper state tracking */ 4923 static void 4924 intel_pre_disable_primary(struct drm_crtc *crtc) 4925 { 4926 struct drm_device *dev = crtc->dev; 4927 struct drm_i915_private *dev_priv = to_i915(dev); 4928 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4929 int pipe = intel_crtc->pipe; 4930 4931 /* 4932 * Gen2 reports pipe underruns whenever all planes are disabled. 4933 * So diasble underrun reporting before all the planes get disabled. 4934 * FIXME: Need to fix the logic to work when we turn off all planes 4935 * but leave the pipe running. 4936 */ 4937 if (IS_GEN2(dev_priv)) 4938 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4939 4940 /* 4941 * FIXME IPS should be fine as long as one plane is 4942 * enabled, but in practice it seems to have problems 4943 * when going from primary only to sprite only and vice 4944 * versa. 4945 */ 4946 hsw_disable_ips(intel_crtc); 4947 } 4948 4949 /* FIXME get rid of this and use pre_plane_update */ 4950 static void 4951 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4952 { 4953 struct drm_device *dev = crtc->dev; 4954 struct drm_i915_private *dev_priv = to_i915(dev); 4955 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4956 int pipe = intel_crtc->pipe; 4957 4958 intel_pre_disable_primary(crtc); 4959 4960 /* 4961 * Vblank time updates from the shadow to live plane control register 4962 * are blocked if the memory self-refresh mode is active at that 4963 * moment. So to make sure the plane gets truly disabled, disable 4964 * first the self-refresh mode. The self-refresh enable bit in turn 4965 * will be checked/applied by the HW only at the next frame start 4966 * event which is after the vblank start event, so we need to have a 4967 * wait-for-vblank between disabling the plane and the pipe. 4968 */ 4969 if (HAS_GMCH_DISPLAY(dev_priv) && 4970 intel_set_memory_cxsr(dev_priv, false)) 4971 intel_wait_for_vblank(dev_priv, pipe); 4972 } 4973 4974 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4975 { 4976 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4977 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4978 struct intel_crtc_state *pipe_config = 4979 to_intel_crtc_state(crtc->base.state); 4980 struct drm_plane *primary = crtc->base.primary; 4981 struct drm_plane_state *old_pri_state = 4982 drm_atomic_get_existing_plane_state(old_state, primary); 4983 4984 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits); 4985 4986 if (pipe_config->update_wm_post && pipe_config->base.active) 4987 intel_update_watermarks(crtc); 4988 4989 if (old_pri_state) { 4990 struct intel_plane_state *primary_state = 4991 to_intel_plane_state(primary->state); 4992 struct intel_plane_state *old_primary_state = 4993 to_intel_plane_state(old_pri_state); 4994 4995 intel_fbc_post_update(crtc); 4996 4997 if (primary_state->base.visible && 4998 (needs_modeset(&pipe_config->base) || 4999 !old_primary_state->base.visible)) 5000 intel_post_enable_primary(&crtc->base); 5001 } 5002 } 5003 5004 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state, 5005 struct intel_crtc_state *pipe_config) 5006 { 5007 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 5008 struct drm_device *dev = crtc->base.dev; 5009 struct drm_i915_private *dev_priv = to_i915(dev); 5010 struct drm_atomic_state *old_state = old_crtc_state->base.state; 5011 struct drm_plane *primary = crtc->base.primary; 5012 struct drm_plane_state *old_pri_state = 5013 drm_atomic_get_existing_plane_state(old_state, primary); 5014 bool modeset = needs_modeset(&pipe_config->base); 5015 struct intel_atomic_state *old_intel_state = 5016 to_intel_atomic_state(old_state); 5017 5018 if (old_pri_state) { 5019 struct intel_plane_state *primary_state = 5020 to_intel_plane_state(primary->state); 5021 struct intel_plane_state *old_primary_state = 5022 to_intel_plane_state(old_pri_state); 5023 5024 intel_fbc_pre_update(crtc, pipe_config, primary_state); 5025 5026 if (old_primary_state->base.visible && 5027 (modeset || !primary_state->base.visible)) 5028 intel_pre_disable_primary(&crtc->base); 5029 } 5030 5031 /* 5032 * Vblank time updates from the shadow to live plane control register 5033 * are blocked if the memory self-refresh mode is active at that 5034 * moment. So to make sure the plane gets truly disabled, disable 5035 * first the self-refresh mode. The self-refresh enable bit in turn 5036 * will be checked/applied by the HW only at the next frame start 5037 * event which is after the vblank start event, so we need to have a 5038 * wait-for-vblank between disabling the plane and the pipe. 5039 */ 5040 if (HAS_GMCH_DISPLAY(dev_priv) && old_crtc_state->base.active && 5041 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) 5042 intel_wait_for_vblank(dev_priv, crtc->pipe); 5043 5044 /* 5045 * IVB workaround: must disable low power watermarks for at least 5046 * one frame before enabling scaling. LP watermarks can be re-enabled 5047 * when scaling is disabled. 5048 * 5049 * WaCxSRDisabledForSpriteScaling:ivb 5050 */ 5051 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev)) 5052 intel_wait_for_vblank(dev_priv, crtc->pipe); 5053 5054 /* 5055 * If we're doing a modeset, we're done. No need to do any pre-vblank 5056 * watermark programming here. 5057 */ 5058 if (needs_modeset(&pipe_config->base)) 5059 return; 5060 5061 /* 5062 * For platforms that support atomic watermarks, program the 5063 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 5064 * will be the intermediate values that are safe for both pre- and 5065 * post- vblank; when vblank happens, the 'active' values will be set 5066 * to the final 'target' values and we'll do this again to get the 5067 * optimal watermarks. For gen9+ platforms, the values we program here 5068 * will be the final target values which will get automatically latched 5069 * at vblank time; no further programming will be necessary. 5070 * 5071 * If a platform hasn't been transitioned to atomic watermarks yet, 5072 * we'll continue to update watermarks the old way, if flags tell 5073 * us to. 5074 */ 5075 if (dev_priv->display.initial_watermarks != NULL) 5076 dev_priv->display.initial_watermarks(old_intel_state, 5077 pipe_config); 5078 else if (pipe_config->update_wm_pre) 5079 intel_update_watermarks(crtc); 5080 } 5081 5082 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 5083 { 5084 struct drm_device *dev = crtc->dev; 5085 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5086 struct drm_plane *p; 5087 int pipe = intel_crtc->pipe; 5088 5089 intel_crtc_dpms_overlay_disable(intel_crtc); 5090 5091 drm_for_each_plane_mask(p, dev, plane_mask) 5092 to_intel_plane(p)->disable_plane(p, crtc); 5093 5094 /* 5095 * FIXME: Once we grow proper nuclear flip support out of this we need 5096 * to compute the mask of flip planes precisely. For the time being 5097 * consider this a flip to a NULL plane. 5098 */ 5099 intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe)); 5100 } 5101 5102 static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc, 5103 struct intel_crtc_state *crtc_state, 5104 struct drm_atomic_state *old_state) 5105 { 5106 struct drm_connector_state *conn_state; 5107 struct drm_connector *conn; 5108 int i; 5109 5110 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5111 struct intel_encoder *encoder = 5112 to_intel_encoder(conn_state->best_encoder); 5113 5114 if (conn_state->crtc != crtc) 5115 continue; 5116 5117 if (encoder->pre_pll_enable) 5118 encoder->pre_pll_enable(encoder, crtc_state, conn_state); 5119 } 5120 } 5121 5122 static void intel_encoders_pre_enable(struct drm_crtc *crtc, 5123 struct intel_crtc_state *crtc_state, 5124 struct drm_atomic_state *old_state) 5125 { 5126 struct drm_connector_state *conn_state; 5127 struct drm_connector *conn; 5128 int i; 5129 5130 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5131 struct intel_encoder *encoder = 5132 to_intel_encoder(conn_state->best_encoder); 5133 5134 if (conn_state->crtc != crtc) 5135 continue; 5136 5137 if (encoder->pre_enable) 5138 encoder->pre_enable(encoder, crtc_state, conn_state); 5139 } 5140 } 5141 5142 static void intel_encoders_enable(struct drm_crtc *crtc, 5143 struct intel_crtc_state *crtc_state, 5144 struct drm_atomic_state *old_state) 5145 { 5146 struct drm_connector_state *conn_state; 5147 struct drm_connector *conn; 5148 int i; 5149 5150 for_each_new_connector_in_state(old_state, conn, conn_state, i) { 5151 struct intel_encoder *encoder = 5152 to_intel_encoder(conn_state->best_encoder); 5153 5154 if (conn_state->crtc != crtc) 5155 continue; 5156 5157 encoder->enable(encoder, crtc_state, conn_state); 5158 intel_opregion_notify_encoder(encoder, true); 5159 } 5160 } 5161 5162 static void intel_encoders_disable(struct drm_crtc *crtc, 5163 struct intel_crtc_state *old_crtc_state, 5164 struct drm_atomic_state *old_state) 5165 { 5166 struct drm_connector_state *old_conn_state; 5167 struct drm_connector *conn; 5168 int i; 5169 5170 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5171 struct intel_encoder *encoder = 5172 to_intel_encoder(old_conn_state->best_encoder); 5173 5174 if (old_conn_state->crtc != crtc) 5175 continue; 5176 5177 intel_opregion_notify_encoder(encoder, false); 5178 encoder->disable(encoder, old_crtc_state, old_conn_state); 5179 } 5180 } 5181 5182 static void intel_encoders_post_disable(struct drm_crtc *crtc, 5183 struct intel_crtc_state *old_crtc_state, 5184 struct drm_atomic_state *old_state) 5185 { 5186 struct drm_connector_state *old_conn_state; 5187 struct drm_connector *conn; 5188 int i; 5189 5190 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5191 struct intel_encoder *encoder = 5192 to_intel_encoder(old_conn_state->best_encoder); 5193 5194 if (old_conn_state->crtc != crtc) 5195 continue; 5196 5197 if (encoder->post_disable) 5198 encoder->post_disable(encoder, old_crtc_state, old_conn_state); 5199 } 5200 } 5201 5202 static void intel_encoders_post_pll_disable(struct drm_crtc *crtc, 5203 struct intel_crtc_state *old_crtc_state, 5204 struct drm_atomic_state *old_state) 5205 { 5206 struct drm_connector_state *old_conn_state; 5207 struct drm_connector *conn; 5208 int i; 5209 5210 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) { 5211 struct intel_encoder *encoder = 5212 to_intel_encoder(old_conn_state->best_encoder); 5213 5214 if (old_conn_state->crtc != crtc) 5215 continue; 5216 5217 if (encoder->post_pll_disable) 5218 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state); 5219 } 5220 } 5221 5222 static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, 5223 struct drm_atomic_state *old_state) 5224 { 5225 struct drm_crtc *crtc = pipe_config->base.crtc; 5226 struct drm_device *dev = crtc->dev; 5227 struct drm_i915_private *dev_priv = to_i915(dev); 5228 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5229 int pipe = intel_crtc->pipe; 5230 struct intel_atomic_state *old_intel_state = 5231 to_intel_atomic_state(old_state); 5232 5233 if (WARN_ON(intel_crtc->active)) 5234 return; 5235 5236 /* 5237 * Sometimes spurious CPU pipe underruns happen during FDI 5238 * training, at least with VGA+HDMI cloning. Suppress them. 5239 * 5240 * On ILK we get an occasional spurious CPU pipe underruns 5241 * between eDP port A enable and vdd enable. Also PCH port 5242 * enable seems to result in the occasional CPU pipe underrun. 5243 * 5244 * Spurious PCH underruns also occur during PCH enabling. 5245 */ 5246 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 5247 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5248 if (intel_crtc->config->has_pch_encoder) 5249 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5250 5251 if (intel_crtc->config->has_pch_encoder) 5252 intel_prepare_shared_dpll(intel_crtc); 5253 5254 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5255 intel_dp_set_m_n(intel_crtc, M1_N1); 5256 5257 intel_set_pipe_timings(intel_crtc); 5258 intel_set_pipe_src_size(intel_crtc); 5259 5260 if (intel_crtc->config->has_pch_encoder) { 5261 intel_cpu_transcoder_set_m_n(intel_crtc, 5262 &intel_crtc->config->fdi_m_n, NULL); 5263 } 5264 5265 ironlake_set_pipeconf(crtc); 5266 5267 intel_crtc->active = true; 5268 5269 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5270 5271 if (intel_crtc->config->has_pch_encoder) { 5272 /* Note: FDI PLL enabling _must_ be done before we enable the 5273 * cpu pipes, hence this is separate from all the other fdi/pch 5274 * enabling. */ 5275 ironlake_fdi_pll_enable(intel_crtc); 5276 } else { 5277 assert_fdi_tx_disabled(dev_priv, pipe); 5278 assert_fdi_rx_disabled(dev_priv, pipe); 5279 } 5280 5281 ironlake_pfit_enable(intel_crtc); 5282 5283 /* 5284 * On ILK+ LUT must be loaded before the pipe is running but with 5285 * clocks enabled 5286 */ 5287 intel_color_load_luts(&pipe_config->base); 5288 5289 if (dev_priv->display.initial_watermarks != NULL) 5290 dev_priv->display.initial_watermarks(old_intel_state, intel_crtc->config); 5291 intel_enable_pipe(intel_crtc); 5292 5293 if (intel_crtc->config->has_pch_encoder) 5294 ironlake_pch_enable(pipe_config); 5295 5296 assert_vblank_disabled(crtc); 5297 drm_crtc_vblank_on(crtc); 5298 5299 intel_encoders_enable(crtc, pipe_config, old_state); 5300 5301 if (HAS_PCH_CPT(dev_priv)) 5302 cpt_verify_modeset(dev, intel_crtc->pipe); 5303 5304 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 5305 if (intel_crtc->config->has_pch_encoder) 5306 intel_wait_for_vblank(dev_priv, pipe); 5307 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5308 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5309 } 5310 5311 /* IPS only exists on ULT machines and is tied to pipe A. */ 5312 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 5313 { 5314 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A; 5315 } 5316 5317 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5318 struct drm_atomic_state *old_state) 5319 { 5320 struct drm_crtc *crtc = pipe_config->base.crtc; 5321 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5322 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5323 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 5324 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5325 struct intel_atomic_state *old_intel_state = 5326 to_intel_atomic_state(old_state); 5327 5328 if (WARN_ON(intel_crtc->active)) 5329 return; 5330 5331 if (intel_crtc->config->has_pch_encoder) 5332 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5333 false); 5334 5335 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5336 5337 if (intel_crtc->config->shared_dpll) 5338 intel_enable_shared_dpll(intel_crtc); 5339 5340 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5341 intel_dp_set_m_n(intel_crtc, M1_N1); 5342 5343 if (!transcoder_is_dsi(cpu_transcoder)) 5344 intel_set_pipe_timings(intel_crtc); 5345 5346 intel_set_pipe_src_size(intel_crtc); 5347 5348 if (cpu_transcoder != TRANSCODER_EDP && 5349 !transcoder_is_dsi(cpu_transcoder)) { 5350 I915_WRITE(PIPE_MULT(cpu_transcoder), 5351 intel_crtc->config->pixel_multiplier - 1); 5352 } 5353 5354 if (intel_crtc->config->has_pch_encoder) { 5355 intel_cpu_transcoder_set_m_n(intel_crtc, 5356 &intel_crtc->config->fdi_m_n, NULL); 5357 } 5358 5359 if (!transcoder_is_dsi(cpu_transcoder)) 5360 haswell_set_pipeconf(crtc); 5361 5362 haswell_set_pipemisc(crtc); 5363 5364 intel_color_set_csc(&pipe_config->base); 5365 5366 intel_crtc->active = true; 5367 5368 if (intel_crtc->config->has_pch_encoder) 5369 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5370 else 5371 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5372 5373 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5374 5375 if (intel_crtc->config->has_pch_encoder) 5376 dev_priv->display.fdi_link_train(intel_crtc, pipe_config); 5377 5378 if (!transcoder_is_dsi(cpu_transcoder)) 5379 intel_ddi_enable_pipe_clock(pipe_config); 5380 5381 if (INTEL_GEN(dev_priv) >= 9) 5382 skylake_pfit_enable(intel_crtc); 5383 else 5384 ironlake_pfit_enable(intel_crtc); 5385 5386 /* 5387 * On ILK+ LUT must be loaded before the pipe is running but with 5388 * clocks enabled 5389 */ 5390 intel_color_load_luts(&pipe_config->base); 5391 5392 intel_ddi_set_pipe_settings(pipe_config); 5393 if (!transcoder_is_dsi(cpu_transcoder)) 5394 intel_ddi_enable_transcoder_func(pipe_config); 5395 5396 if (dev_priv->display.initial_watermarks != NULL) 5397 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5398 5399 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5400 if (!transcoder_is_dsi(cpu_transcoder)) 5401 intel_enable_pipe(intel_crtc); 5402 5403 if (intel_crtc->config->has_pch_encoder) 5404 lpt_pch_enable(pipe_config); 5405 5406 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5407 intel_ddi_set_vc_payload_alloc(pipe_config, true); 5408 5409 assert_vblank_disabled(crtc); 5410 drm_crtc_vblank_on(crtc); 5411 5412 intel_encoders_enable(crtc, pipe_config, old_state); 5413 5414 if (intel_crtc->config->has_pch_encoder) { 5415 intel_wait_for_vblank(dev_priv, pipe); 5416 intel_wait_for_vblank(dev_priv, pipe); 5417 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5418 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5419 true); 5420 } 5421 5422 /* If we change the relative order between pipe/planes enabling, we need 5423 * to change the workaround. */ 5424 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5425 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { 5426 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5427 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe); 5428 } 5429 } 5430 5431 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5432 { 5433 struct drm_device *dev = crtc->base.dev; 5434 struct drm_i915_private *dev_priv = to_i915(dev); 5435 int pipe = crtc->pipe; 5436 5437 /* To avoid upsetting the power well on haswell only disable the pfit if 5438 * it's in use. The hw state code will make sure we get this right. */ 5439 if (force || crtc->config->pch_pfit.enabled) { 5440 I915_WRITE(PF_CTL(pipe), 0); 5441 I915_WRITE(PF_WIN_POS(pipe), 0); 5442 I915_WRITE(PF_WIN_SZ(pipe), 0); 5443 } 5444 } 5445 5446 static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, 5447 struct drm_atomic_state *old_state) 5448 { 5449 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5450 struct drm_device *dev = crtc->dev; 5451 struct drm_i915_private *dev_priv = to_i915(dev); 5452 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5453 int pipe = intel_crtc->pipe; 5454 5455 /* 5456 * Sometimes spurious CPU pipe underruns happen when the 5457 * pipe is already disabled, but FDI RX/TX is still enabled. 5458 * Happens at least with VGA+HDMI cloning. Suppress them. 5459 */ 5460 if (intel_crtc->config->has_pch_encoder) { 5461 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5462 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5463 } 5464 5465 intel_encoders_disable(crtc, old_crtc_state, old_state); 5466 5467 drm_crtc_vblank_off(crtc); 5468 assert_vblank_disabled(crtc); 5469 5470 intel_disable_pipe(intel_crtc); 5471 5472 ironlake_pfit_disable(intel_crtc, false); 5473 5474 if (intel_crtc->config->has_pch_encoder) 5475 ironlake_fdi_disable(crtc); 5476 5477 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5478 5479 if (intel_crtc->config->has_pch_encoder) { 5480 ironlake_disable_pch_transcoder(dev_priv, pipe); 5481 5482 if (HAS_PCH_CPT(dev_priv)) { 5483 i915_reg_t reg; 5484 u32 temp; 5485 5486 /* disable TRANS_DP_CTL */ 5487 reg = TRANS_DP_CTL(pipe); 5488 temp = I915_READ(reg); 5489 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5490 TRANS_DP_PORT_SEL_MASK); 5491 temp |= TRANS_DP_PORT_SEL_NONE; 5492 I915_WRITE(reg, temp); 5493 5494 /* disable DPLL_SEL */ 5495 temp = I915_READ(PCH_DPLL_SEL); 5496 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5497 I915_WRITE(PCH_DPLL_SEL, temp); 5498 } 5499 5500 ironlake_fdi_pll_disable(intel_crtc); 5501 } 5502 5503 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5504 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5505 } 5506 5507 static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, 5508 struct drm_atomic_state *old_state) 5509 { 5510 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5511 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5513 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5514 5515 if (intel_crtc->config->has_pch_encoder) 5516 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5517 false); 5518 5519 intel_encoders_disable(crtc, old_crtc_state, old_state); 5520 5521 drm_crtc_vblank_off(crtc); 5522 assert_vblank_disabled(crtc); 5523 5524 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5525 if (!transcoder_is_dsi(cpu_transcoder)) 5526 intel_disable_pipe(intel_crtc); 5527 5528 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DP_MST)) 5529 intel_ddi_set_vc_payload_alloc(intel_crtc->config, false); 5530 5531 if (!transcoder_is_dsi(cpu_transcoder)) 5532 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5533 5534 if (INTEL_GEN(dev_priv) >= 9) 5535 skylake_scaler_disable(intel_crtc); 5536 else 5537 ironlake_pfit_disable(intel_crtc, false); 5538 5539 if (!transcoder_is_dsi(cpu_transcoder)) 5540 intel_ddi_disable_pipe_clock(intel_crtc->config); 5541 5542 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5543 5544 if (old_crtc_state->has_pch_encoder) 5545 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5546 true); 5547 } 5548 5549 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5550 { 5551 struct drm_device *dev = crtc->base.dev; 5552 struct drm_i915_private *dev_priv = to_i915(dev); 5553 struct intel_crtc_state *pipe_config = crtc->config; 5554 5555 if (!pipe_config->gmch_pfit.control) 5556 return; 5557 5558 /* 5559 * The panel fitter should only be adjusted whilst the pipe is disabled, 5560 * according to register description and PRM. 5561 */ 5562 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5563 assert_pipe_disabled(dev_priv, crtc->pipe); 5564 5565 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5566 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5567 5568 /* Border color in case we don't scale up to the full screen. Black by 5569 * default, change to something else for debugging. */ 5570 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5571 } 5572 5573 enum intel_display_power_domain intel_port_to_power_domain(enum port port) 5574 { 5575 switch (port) { 5576 case PORT_A: 5577 return POWER_DOMAIN_PORT_DDI_A_LANES; 5578 case PORT_B: 5579 return POWER_DOMAIN_PORT_DDI_B_LANES; 5580 case PORT_C: 5581 return POWER_DOMAIN_PORT_DDI_C_LANES; 5582 case PORT_D: 5583 return POWER_DOMAIN_PORT_DDI_D_LANES; 5584 case PORT_E: 5585 return POWER_DOMAIN_PORT_DDI_E_LANES; 5586 default: 5587 MISSING_CASE(port); 5588 return POWER_DOMAIN_PORT_OTHER; 5589 } 5590 } 5591 5592 static u64 get_crtc_power_domains(struct drm_crtc *crtc, 5593 struct intel_crtc_state *crtc_state) 5594 { 5595 struct drm_device *dev = crtc->dev; 5596 struct drm_i915_private *dev_priv = to_i915(dev); 5597 struct drm_encoder *encoder; 5598 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5599 enum i915_pipe pipe = intel_crtc->pipe; 5600 u64 mask; 5601 enum transcoder transcoder = crtc_state->cpu_transcoder; 5602 5603 if (!crtc_state->base.active) 5604 return 0; 5605 5606 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5607 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5608 if (crtc_state->pch_pfit.enabled || 5609 crtc_state->pch_pfit.force_thru) 5610 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5611 5612 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5613 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5614 5615 mask |= BIT_ULL(intel_encoder->power_domain); 5616 } 5617 5618 if (HAS_DDI(dev_priv) && crtc_state->has_audio) 5619 mask |= BIT(POWER_DOMAIN_AUDIO); 5620 5621 if (crtc_state->shared_dpll) 5622 mask |= BIT_ULL(POWER_DOMAIN_PLLS); 5623 5624 return mask; 5625 } 5626 5627 static u64 5628 modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5629 struct intel_crtc_state *crtc_state) 5630 { 5631 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5633 enum intel_display_power_domain domain; 5634 u64 domains, new_domains, old_domains; 5635 5636 old_domains = intel_crtc->enabled_power_domains; 5637 intel_crtc->enabled_power_domains = new_domains = 5638 get_crtc_power_domains(crtc, crtc_state); 5639 5640 domains = new_domains & ~old_domains; 5641 5642 for_each_power_domain(domain, domains) 5643 intel_display_power_get(dev_priv, domain); 5644 5645 return old_domains & ~new_domains; 5646 } 5647 5648 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5649 u64 domains) 5650 { 5651 enum intel_display_power_domain domain; 5652 5653 for_each_power_domain(domain, domains) 5654 intel_display_power_put(dev_priv, domain); 5655 } 5656 5657 static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, 5658 struct drm_atomic_state *old_state) 5659 { 5660 struct intel_atomic_state *old_intel_state = 5661 to_intel_atomic_state(old_state); 5662 struct drm_crtc *crtc = pipe_config->base.crtc; 5663 struct drm_device *dev = crtc->dev; 5664 struct drm_i915_private *dev_priv = to_i915(dev); 5665 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5666 int pipe = intel_crtc->pipe; 5667 5668 if (WARN_ON(intel_crtc->active)) 5669 return; 5670 5671 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5672 intel_dp_set_m_n(intel_crtc, M1_N1); 5673 5674 intel_set_pipe_timings(intel_crtc); 5675 intel_set_pipe_src_size(intel_crtc); 5676 5677 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 5678 struct drm_i915_private *dev_priv = to_i915(dev); 5679 5680 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5681 I915_WRITE(CHV_CANVAS(pipe), 0); 5682 } 5683 5684 i9xx_set_pipeconf(intel_crtc); 5685 5686 intel_crtc->active = true; 5687 5688 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5689 5690 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state); 5691 5692 if (IS_CHERRYVIEW(dev_priv)) { 5693 chv_prepare_pll(intel_crtc, intel_crtc->config); 5694 chv_enable_pll(intel_crtc, intel_crtc->config); 5695 } else { 5696 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5697 vlv_enable_pll(intel_crtc, intel_crtc->config); 5698 } 5699 5700 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5701 5702 i9xx_pfit_enable(intel_crtc); 5703 5704 intel_color_load_luts(&pipe_config->base); 5705 5706 dev_priv->display.initial_watermarks(old_intel_state, 5707 pipe_config); 5708 intel_enable_pipe(intel_crtc); 5709 5710 assert_vblank_disabled(crtc); 5711 drm_crtc_vblank_on(crtc); 5712 5713 intel_encoders_enable(crtc, pipe_config, old_state); 5714 } 5715 5716 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5717 { 5718 struct drm_device *dev = crtc->base.dev; 5719 struct drm_i915_private *dev_priv = to_i915(dev); 5720 5721 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5722 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5723 } 5724 5725 static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config, 5726 struct drm_atomic_state *old_state) 5727 { 5728 struct drm_crtc *crtc = pipe_config->base.crtc; 5729 struct drm_device *dev = crtc->dev; 5730 struct drm_i915_private *dev_priv = to_i915(dev); 5731 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5732 enum i915_pipe pipe = intel_crtc->pipe; 5733 5734 if (WARN_ON(intel_crtc->active)) 5735 return; 5736 5737 i9xx_set_pll_dividers(intel_crtc); 5738 5739 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 5740 intel_dp_set_m_n(intel_crtc, M1_N1); 5741 5742 intel_set_pipe_timings(intel_crtc); 5743 intel_set_pipe_src_size(intel_crtc); 5744 5745 i9xx_set_pipeconf(intel_crtc); 5746 5747 intel_crtc->active = true; 5748 5749 if (!IS_GEN2(dev_priv)) 5750 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5751 5752 intel_encoders_pre_enable(crtc, pipe_config, old_state); 5753 5754 i9xx_enable_pll(intel_crtc); 5755 5756 i9xx_pfit_enable(intel_crtc); 5757 5758 intel_color_load_luts(&pipe_config->base); 5759 5760 intel_update_watermarks(intel_crtc); 5761 intel_enable_pipe(intel_crtc); 5762 5763 assert_vblank_disabled(crtc); 5764 drm_crtc_vblank_on(crtc); 5765 5766 intel_encoders_enable(crtc, pipe_config, old_state); 5767 } 5768 5769 static void i9xx_pfit_disable(struct intel_crtc *crtc) 5770 { 5771 struct drm_device *dev = crtc->base.dev; 5772 struct drm_i915_private *dev_priv = to_i915(dev); 5773 5774 if (!crtc->config->gmch_pfit.control) 5775 return; 5776 5777 assert_pipe_disabled(dev_priv, crtc->pipe); 5778 5779 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5780 I915_READ(PFIT_CONTROL)); 5781 I915_WRITE(PFIT_CONTROL, 0); 5782 } 5783 5784 static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, 5785 struct drm_atomic_state *old_state) 5786 { 5787 struct drm_crtc *crtc = old_crtc_state->base.crtc; 5788 struct drm_device *dev = crtc->dev; 5789 struct drm_i915_private *dev_priv = to_i915(dev); 5790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5791 int pipe = intel_crtc->pipe; 5792 5793 /* 5794 * On gen2 planes are double buffered but the pipe isn't, so we must 5795 * wait for planes to fully turn off before disabling the pipe. 5796 */ 5797 if (IS_GEN2(dev_priv)) 5798 intel_wait_for_vblank(dev_priv, pipe); 5799 5800 intel_encoders_disable(crtc, old_crtc_state, old_state); 5801 5802 drm_crtc_vblank_off(crtc); 5803 assert_vblank_disabled(crtc); 5804 5805 intel_disable_pipe(intel_crtc); 5806 5807 i9xx_pfit_disable(intel_crtc); 5808 5809 intel_encoders_post_disable(crtc, old_crtc_state, old_state); 5810 5811 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 5812 if (IS_CHERRYVIEW(dev_priv)) 5813 chv_disable_pll(dev_priv, pipe); 5814 else if (IS_VALLEYVIEW(dev_priv)) 5815 vlv_disable_pll(dev_priv, pipe); 5816 else 5817 i9xx_disable_pll(intel_crtc); 5818 } 5819 5820 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state); 5821 5822 if (!IS_GEN2(dev_priv)) 5823 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5824 5825 if (!dev_priv->display.initial_watermarks) 5826 intel_update_watermarks(intel_crtc); 5827 } 5828 5829 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, 5830 struct drm_modeset_acquire_ctx *ctx) 5831 { 5832 struct intel_encoder *encoder; 5833 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5834 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5835 enum intel_display_power_domain domain; 5836 u64 domains; 5837 struct drm_atomic_state *state; 5838 struct intel_crtc_state *crtc_state; 5839 int ret; 5840 5841 if (!intel_crtc->active) 5842 return; 5843 5844 if (crtc->primary->state->visible) { 5845 WARN_ON(intel_crtc->flip_work); 5846 5847 intel_pre_disable_primary_noatomic(crtc); 5848 5849 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 5850 crtc->primary->state->visible = false; 5851 } 5852 5853 state = drm_atomic_state_alloc(crtc->dev); 5854 if (!state) { 5855 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory", 5856 crtc->base.id, crtc->name); 5857 return; 5858 } 5859 5860 state->acquire_ctx = ctx; 5861 5862 /* Everything's already locked, -EDEADLK can't happen. */ 5863 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 5864 ret = drm_atomic_add_affected_connectors(state, crtc); 5865 5866 WARN_ON(IS_ERR(crtc_state) || ret); 5867 5868 dev_priv->display.crtc_disable(crtc_state, state); 5869 5870 drm_atomic_state_put(state); 5871 5872 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 5873 crtc->base.id, crtc->name); 5874 5875 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 5876 crtc->state->active = false; 5877 intel_crtc->active = false; 5878 crtc->enabled = false; 5879 crtc->state->connector_mask = 0; 5880 crtc->state->encoder_mask = 0; 5881 5882 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 5883 encoder->base.crtc = NULL; 5884 5885 intel_fbc_disable(intel_crtc); 5886 intel_update_watermarks(intel_crtc); 5887 intel_disable_shared_dpll(intel_crtc); 5888 5889 domains = intel_crtc->enabled_power_domains; 5890 for_each_power_domain(domain, domains) 5891 intel_display_power_put(dev_priv, domain); 5892 intel_crtc->enabled_power_domains = 0; 5893 5894 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 5895 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 5896 } 5897 5898 /* 5899 * turn all crtc's off, but do not adjust state 5900 * This has to be paired with a call to intel_modeset_setup_hw_state. 5901 */ 5902 int intel_display_suspend(struct drm_device *dev) 5903 { 5904 struct drm_i915_private *dev_priv = to_i915(dev); 5905 struct drm_atomic_state *state; 5906 int ret; 5907 5908 state = drm_atomic_helper_suspend(dev); 5909 ret = PTR_ERR_OR_ZERO(state); 5910 if (ret) 5911 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 5912 else 5913 dev_priv->modeset_restore_state = state; 5914 return ret; 5915 } 5916 5917 void intel_encoder_destroy(struct drm_encoder *encoder) 5918 { 5919 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5920 5921 drm_encoder_cleanup(encoder); 5922 kfree(intel_encoder); 5923 } 5924 5925 /* Cross check the actual hw state with our own modeset state tracking (and it's 5926 * internal consistency). */ 5927 static void intel_connector_verify_state(struct intel_connector *connector) 5928 { 5929 struct drm_crtc *crtc = connector->base.state->crtc; 5930 5931 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5932 connector->base.base.id, 5933 connector->base.name); 5934 5935 if (connector->get_hw_state(connector)) { 5936 struct intel_encoder *encoder = connector->encoder; 5937 struct drm_connector_state *conn_state = connector->base.state; 5938 5939 I915_STATE_WARN(!crtc, 5940 "connector enabled without attached crtc\n"); 5941 5942 if (!crtc) 5943 return; 5944 5945 I915_STATE_WARN(!crtc->state->active, 5946 "connector is active, but attached crtc isn't\n"); 5947 5948 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 5949 return; 5950 5951 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 5952 "atomic encoder doesn't match attached encoder\n"); 5953 5954 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 5955 "attached encoder crtc differs from connector crtc\n"); 5956 } else { 5957 I915_STATE_WARN(crtc && crtc->state->active, 5958 "attached crtc is active, but connector isn't\n"); 5959 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 5960 "best encoder set without crtc!\n"); 5961 } 5962 } 5963 5964 int intel_connector_init(struct intel_connector *connector) 5965 { 5966 drm_atomic_helper_connector_reset(&connector->base); 5967 5968 if (!connector->base.state) 5969 return -ENOMEM; 5970 5971 return 0; 5972 } 5973 5974 struct intel_connector *intel_connector_alloc(void) 5975 { 5976 struct intel_connector *connector; 5977 5978 connector = kzalloc(sizeof *connector, GFP_KERNEL); 5979 if (!connector) 5980 return NULL; 5981 5982 if (intel_connector_init(connector) < 0) { 5983 kfree(connector); 5984 return NULL; 5985 } 5986 5987 return connector; 5988 } 5989 5990 /* Simple connector->get_hw_state implementation for encoders that support only 5991 * one connector and no cloning and hence the encoder state determines the state 5992 * of the connector. */ 5993 bool intel_connector_get_hw_state(struct intel_connector *connector) 5994 { 5995 enum i915_pipe pipe = 0; 5996 struct intel_encoder *encoder = connector->encoder; 5997 5998 return encoder->get_hw_state(encoder, &pipe); 5999 } 6000 6001 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6002 { 6003 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6004 return crtc_state->fdi_lanes; 6005 6006 return 0; 6007 } 6008 6009 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6010 struct intel_crtc_state *pipe_config) 6011 { 6012 struct drm_i915_private *dev_priv = to_i915(dev); 6013 struct drm_atomic_state *state = pipe_config->base.state; 6014 struct intel_crtc *other_crtc; 6015 struct intel_crtc_state *other_crtc_state; 6016 6017 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6018 pipe_name(pipe), pipe_config->fdi_lanes); 6019 if (pipe_config->fdi_lanes > 4) { 6020 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6021 pipe_name(pipe), pipe_config->fdi_lanes); 6022 return -EINVAL; 6023 } 6024 6025 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 6026 if (pipe_config->fdi_lanes > 2) { 6027 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6028 pipe_config->fdi_lanes); 6029 return -EINVAL; 6030 } else { 6031 return 0; 6032 } 6033 } 6034 6035 if (INTEL_INFO(dev_priv)->num_pipes == 2) 6036 return 0; 6037 6038 /* Ivybridge 3 pipe is really complicated */ 6039 switch (pipe) { 6040 case PIPE_A: 6041 return 0; 6042 case PIPE_B: 6043 if (pipe_config->fdi_lanes <= 2) 6044 return 0; 6045 6046 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C); 6047 other_crtc_state = 6048 intel_atomic_get_crtc_state(state, other_crtc); 6049 if (IS_ERR(other_crtc_state)) 6050 return PTR_ERR(other_crtc_state); 6051 6052 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6053 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6054 pipe_name(pipe), pipe_config->fdi_lanes); 6055 return -EINVAL; 6056 } 6057 return 0; 6058 case PIPE_C: 6059 if (pipe_config->fdi_lanes > 2) { 6060 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6061 pipe_name(pipe), pipe_config->fdi_lanes); 6062 return -EINVAL; 6063 } 6064 6065 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B); 6066 other_crtc_state = 6067 intel_atomic_get_crtc_state(state, other_crtc); 6068 if (IS_ERR(other_crtc_state)) 6069 return PTR_ERR(other_crtc_state); 6070 6071 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6072 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6073 return -EINVAL; 6074 } 6075 return 0; 6076 default: 6077 BUG(); 6078 } 6079 } 6080 6081 #define RETRY 1 6082 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6083 struct intel_crtc_state *pipe_config) 6084 { 6085 struct drm_device *dev = intel_crtc->base.dev; 6086 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6087 int lane, link_bw, fdi_dotclock, ret; 6088 bool needs_recompute = false; 6089 6090 retry: 6091 /* FDI is a binary signal running at ~2.7GHz, encoding 6092 * each output octet as 10 bits. The actual frequency 6093 * is stored as a divider into a 100MHz clock, and the 6094 * mode pixel clock is stored in units of 1KHz. 6095 * Hence the bw of each lane in terms of the mode signal 6096 * is: 6097 */ 6098 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6099 6100 fdi_dotclock = adjusted_mode->crtc_clock; 6101 6102 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6103 pipe_config->pipe_bpp); 6104 6105 pipe_config->fdi_lanes = lane; 6106 6107 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6108 link_bw, &pipe_config->fdi_m_n, false); 6109 6110 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6111 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6112 pipe_config->pipe_bpp -= 2*3; 6113 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6114 pipe_config->pipe_bpp); 6115 needs_recompute = true; 6116 pipe_config->bw_constrained = true; 6117 6118 goto retry; 6119 } 6120 6121 if (needs_recompute) 6122 return RETRY; 6123 6124 return ret; 6125 } 6126 6127 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6128 struct intel_crtc_state *pipe_config) 6129 { 6130 if (pipe_config->pipe_bpp > 24) 6131 return false; 6132 6133 /* HSW can handle pixel rate up to cdclk? */ 6134 if (IS_HASWELL(dev_priv)) 6135 return true; 6136 6137 /* 6138 * We compare against max which means we must take 6139 * the increased cdclk requirement into account when 6140 * calculating the new cdclk. 6141 * 6142 * Should measure whether using a lower cdclk w/o IPS 6143 */ 6144 return pipe_config->pixel_rate <= 6145 dev_priv->max_cdclk_freq * 95 / 100; 6146 } 6147 6148 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6149 struct intel_crtc_state *pipe_config) 6150 { 6151 struct drm_device *dev = crtc->base.dev; 6152 struct drm_i915_private *dev_priv = to_i915(dev); 6153 6154 pipe_config->ips_enabled = i915.enable_ips && 6155 hsw_crtc_supports_ips(crtc) && 6156 pipe_config_supports_ips(dev_priv, pipe_config); 6157 } 6158 6159 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6160 { 6161 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6162 6163 /* GDG double wide on either pipe, otherwise pipe A only */ 6164 return INTEL_INFO(dev_priv)->gen < 4 && 6165 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6166 } 6167 6168 static uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config) 6169 { 6170 uint32_t pixel_rate; 6171 6172 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock; 6173 6174 /* 6175 * We only use IF-ID interlacing. If we ever use 6176 * PF-ID we'll need to adjust the pixel_rate here. 6177 */ 6178 6179 if (pipe_config->pch_pfit.enabled) { 6180 uint64_t pipe_w, pipe_h, pfit_w, pfit_h; 6181 uint32_t pfit_size = pipe_config->pch_pfit.size; 6182 6183 pipe_w = pipe_config->pipe_src_w; 6184 pipe_h = pipe_config->pipe_src_h; 6185 6186 pfit_w = (pfit_size >> 16) & 0xFFFF; 6187 pfit_h = pfit_size & 0xFFFF; 6188 if (pipe_w < pfit_w) 6189 pipe_w = pfit_w; 6190 if (pipe_h < pfit_h) 6191 pipe_h = pfit_h; 6192 6193 if (WARN_ON(!pfit_w || !pfit_h)) 6194 return pixel_rate; 6195 6196 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h, 6197 pfit_w * pfit_h); 6198 } 6199 6200 return pixel_rate; 6201 } 6202 6203 static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) 6204 { 6205 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 6206 6207 if (HAS_GMCH_DISPLAY(dev_priv)) 6208 /* FIXME calculate proper pipe pixel rate for GMCH pfit */ 6209 crtc_state->pixel_rate = 6210 crtc_state->base.adjusted_mode.crtc_clock; 6211 else 6212 crtc_state->pixel_rate = 6213 ilk_pipe_pixel_rate(crtc_state); 6214 } 6215 6216 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6217 struct intel_crtc_state *pipe_config) 6218 { 6219 struct drm_device *dev = crtc->base.dev; 6220 struct drm_i915_private *dev_priv = to_i915(dev); 6221 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6222 int clock_limit = dev_priv->max_dotclk_freq; 6223 6224 if (INTEL_GEN(dev_priv) < 4) { 6225 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6226 6227 /* 6228 * Enable double wide mode when the dot clock 6229 * is > 90% of the (display) core speed. 6230 */ 6231 if (intel_crtc_supports_double_wide(crtc) && 6232 adjusted_mode->crtc_clock > clock_limit) { 6233 clock_limit = dev_priv->max_dotclk_freq; 6234 pipe_config->double_wide = true; 6235 } 6236 } 6237 6238 if (adjusted_mode->crtc_clock > clock_limit) { 6239 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6240 adjusted_mode->crtc_clock, clock_limit, 6241 yesno(pipe_config->double_wide)); 6242 return -EINVAL; 6243 } 6244 6245 /* 6246 * Pipe horizontal size must be even in: 6247 * - DVO ganged mode 6248 * - LVDS dual channel mode 6249 * - Double wide pipe 6250 */ 6251 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6252 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6253 pipe_config->pipe_src_w &= ~1; 6254 6255 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6256 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6257 */ 6258 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) && 6259 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6260 return -EINVAL; 6261 6262 intel_crtc_compute_pixel_rate(pipe_config); 6263 6264 if (HAS_IPS(dev_priv)) 6265 hsw_compute_ips_config(crtc, pipe_config); 6266 6267 if (pipe_config->has_pch_encoder) 6268 return ironlake_fdi_compute_config(crtc, pipe_config); 6269 6270 return 0; 6271 } 6272 6273 static void 6274 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 6275 { 6276 while (*num > DATA_LINK_M_N_MASK || 6277 *den > DATA_LINK_M_N_MASK) { 6278 *num >>= 1; 6279 *den >>= 1; 6280 } 6281 } 6282 6283 static void compute_m_n(unsigned int m, unsigned int n, 6284 uint32_t *ret_m, uint32_t *ret_n, 6285 bool reduce_m_n) 6286 { 6287 /* 6288 * Reduce M/N as much as possible without loss in precision. Several DP 6289 * dongles in particular seem to be fussy about too large *link* M/N 6290 * values. The passed in values are more likely to have the least 6291 * significant bits zero than M after rounding below, so do this first. 6292 */ 6293 if (reduce_m_n) { 6294 while ((m & 1) == 0 && (n & 1) == 0) { 6295 m >>= 1; 6296 n >>= 1; 6297 } 6298 } 6299 6300 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6301 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6302 intel_reduce_m_n_ratio(ret_m, ret_n); 6303 } 6304 6305 void 6306 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6307 int pixel_clock, int link_clock, 6308 struct intel_link_m_n *m_n, 6309 bool reduce_m_n) 6310 { 6311 m_n->tu = 64; 6312 6313 compute_m_n(bits_per_pixel * pixel_clock, 6314 link_clock * nlanes * 8, 6315 &m_n->gmch_m, &m_n->gmch_n, 6316 reduce_m_n); 6317 6318 compute_m_n(pixel_clock, link_clock, 6319 &m_n->link_m, &m_n->link_n, 6320 reduce_m_n); 6321 } 6322 6323 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6324 { 6325 if (i915.panel_use_ssc >= 0) 6326 return i915.panel_use_ssc != 0; 6327 return dev_priv->vbt.lvds_use_ssc 6328 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6329 } 6330 6331 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6332 { 6333 return (1 << dpll->n) << 16 | dpll->m2; 6334 } 6335 6336 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6337 { 6338 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6339 } 6340 6341 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6342 struct intel_crtc_state *crtc_state, 6343 struct dpll *reduced_clock) 6344 { 6345 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6346 u32 fp, fp2 = 0; 6347 6348 if (IS_PINEVIEW(dev_priv)) { 6349 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6350 if (reduced_clock) 6351 fp2 = pnv_dpll_compute_fp(reduced_clock); 6352 } else { 6353 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6354 if (reduced_clock) 6355 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6356 } 6357 6358 crtc_state->dpll_hw_state.fp0 = fp; 6359 6360 crtc->lowfreq_avail = false; 6361 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6362 reduced_clock) { 6363 crtc_state->dpll_hw_state.fp1 = fp2; 6364 crtc->lowfreq_avail = true; 6365 } else { 6366 crtc_state->dpll_hw_state.fp1 = fp; 6367 } 6368 } 6369 6370 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 6371 pipe) 6372 { 6373 u32 reg_val; 6374 6375 /* 6376 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6377 * and set it to a reasonable value instead. 6378 */ 6379 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6380 reg_val &= 0xffffff00; 6381 reg_val |= 0x00000030; 6382 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6383 6384 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6385 reg_val &= 0x8cffffff; 6386 reg_val = 0x8c000000; 6387 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6388 6389 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6390 reg_val &= 0xffffff00; 6391 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6392 6393 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6394 reg_val &= 0x00ffffff; 6395 reg_val |= 0xb0000000; 6396 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6397 } 6398 6399 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6400 struct intel_link_m_n *m_n) 6401 { 6402 struct drm_device *dev = crtc->base.dev; 6403 struct drm_i915_private *dev_priv = to_i915(dev); 6404 int pipe = crtc->pipe; 6405 6406 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6407 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6408 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6409 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6410 } 6411 6412 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6413 struct intel_link_m_n *m_n, 6414 struct intel_link_m_n *m2_n2) 6415 { 6416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6417 int pipe = crtc->pipe; 6418 enum transcoder transcoder = crtc->config->cpu_transcoder; 6419 6420 if (INTEL_GEN(dev_priv) >= 5) { 6421 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6422 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6423 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6424 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6425 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6426 * for gen < 8) and if DRRS is supported (to make sure the 6427 * registers are not unnecessarily accessed). 6428 */ 6429 if (m2_n2 && (IS_CHERRYVIEW(dev_priv) || 6430 INTEL_GEN(dev_priv) < 8) && crtc->config->has_drrs) { 6431 I915_WRITE(PIPE_DATA_M2(transcoder), 6432 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6433 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6434 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 6435 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 6436 } 6437 } else { 6438 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6439 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6440 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 6441 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 6442 } 6443 } 6444 6445 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6446 { 6447 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6448 6449 if (m_n == M1_N1) { 6450 dp_m_n = &crtc->config->dp_m_n; 6451 dp_m2_n2 = &crtc->config->dp_m2_n2; 6452 } else if (m_n == M2_N2) { 6453 6454 /* 6455 * M2_N2 registers are not supported. Hence m2_n2 divider value 6456 * needs to be programmed into M1_N1. 6457 */ 6458 dp_m_n = &crtc->config->dp_m2_n2; 6459 } else { 6460 DRM_ERROR("Unsupported divider value\n"); 6461 return; 6462 } 6463 6464 if (crtc->config->has_pch_encoder) 6465 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6466 else 6467 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6468 } 6469 6470 static void vlv_compute_dpll(struct intel_crtc *crtc, 6471 struct intel_crtc_state *pipe_config) 6472 { 6473 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 6474 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6475 if (crtc->pipe != PIPE_A) 6476 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6477 6478 /* DPLL not used with DSI, but still need the rest set up */ 6479 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6480 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 6481 DPLL_EXT_BUFFER_ENABLE_VLV; 6482 6483 pipe_config->dpll_hw_state.dpll_md = 6484 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6485 } 6486 6487 static void chv_compute_dpll(struct intel_crtc *crtc, 6488 struct intel_crtc_state *pipe_config) 6489 { 6490 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 6491 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 6492 if (crtc->pipe != PIPE_A) 6493 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6494 6495 /* DPLL not used with DSI, but still need the rest set up */ 6496 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 6497 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 6498 6499 pipe_config->dpll_hw_state.dpll_md = 6500 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6501 } 6502 6503 static void vlv_prepare_pll(struct intel_crtc *crtc, 6504 const struct intel_crtc_state *pipe_config) 6505 { 6506 struct drm_device *dev = crtc->base.dev; 6507 struct drm_i915_private *dev_priv = to_i915(dev); 6508 enum i915_pipe pipe = crtc->pipe; 6509 u32 mdiv; 6510 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6511 u32 coreclk, reg_val; 6512 6513 /* Enable Refclk */ 6514 I915_WRITE(DPLL(pipe), 6515 pipe_config->dpll_hw_state.dpll & 6516 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 6517 6518 /* No need to actually set up the DPLL with DSI */ 6519 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6520 return; 6521 6522 mutex_lock(&dev_priv->sb_lock); 6523 6524 bestn = pipe_config->dpll.n; 6525 bestm1 = pipe_config->dpll.m1; 6526 bestm2 = pipe_config->dpll.m2; 6527 bestp1 = pipe_config->dpll.p1; 6528 bestp2 = pipe_config->dpll.p2; 6529 6530 /* See eDP HDMI DPIO driver vbios notes doc */ 6531 6532 /* PLL B needs special handling */ 6533 if (pipe == PIPE_B) 6534 vlv_pllb_recal_opamp(dev_priv, pipe); 6535 6536 /* Set up Tx target for periodic Rcomp update */ 6537 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6538 6539 /* Disable target IRef on PLL */ 6540 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6541 reg_val &= 0x00ffffff; 6542 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6543 6544 /* Disable fast lock */ 6545 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6546 6547 /* Set idtafcrecal before PLL is enabled */ 6548 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6549 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6550 mdiv |= ((bestn << DPIO_N_SHIFT)); 6551 mdiv |= (1 << DPIO_K_SHIFT); 6552 6553 /* 6554 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6555 * but we don't support that). 6556 * Note: don't use the DAC post divider as it seems unstable. 6557 */ 6558 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6559 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6560 6561 mdiv |= DPIO_ENABLE_CALIBRATION; 6562 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6563 6564 /* Set HBR and RBR LPF coefficients */ 6565 if (pipe_config->port_clock == 162000 || 6566 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 6567 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 6568 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6569 0x009f0003); 6570 else 6571 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6572 0x00d0000f); 6573 6574 if (intel_crtc_has_dp_encoder(pipe_config)) { 6575 /* Use SSC source */ 6576 if (pipe == PIPE_A) 6577 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6578 0x0df40000); 6579 else 6580 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6581 0x0df70000); 6582 } else { /* HDMI or VGA */ 6583 /* Use bend source */ 6584 if (pipe == PIPE_A) 6585 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6586 0x0df70000); 6587 else 6588 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6589 0x0df40000); 6590 } 6591 6592 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6593 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6594 if (intel_crtc_has_dp_encoder(crtc->config)) 6595 coreclk |= 0x01000000; 6596 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6597 6598 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6599 mutex_unlock(&dev_priv->sb_lock); 6600 } 6601 6602 static void chv_prepare_pll(struct intel_crtc *crtc, 6603 const struct intel_crtc_state *pipe_config) 6604 { 6605 struct drm_device *dev = crtc->base.dev; 6606 struct drm_i915_private *dev_priv = to_i915(dev); 6607 enum i915_pipe pipe = crtc->pipe; 6608 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6609 u32 loopfilter, tribuf_calcntr; 6610 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6611 u32 dpio_val; 6612 int vco; 6613 6614 /* Enable Refclk and SSC */ 6615 I915_WRITE(DPLL(pipe), 6616 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6617 6618 /* No need to actually set up the DPLL with DSI */ 6619 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 6620 return; 6621 6622 bestn = pipe_config->dpll.n; 6623 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6624 bestm1 = pipe_config->dpll.m1; 6625 bestm2 = pipe_config->dpll.m2 >> 22; 6626 bestp1 = pipe_config->dpll.p1; 6627 bestp2 = pipe_config->dpll.p2; 6628 vco = pipe_config->dpll.vco; 6629 dpio_val = 0; 6630 loopfilter = 0; 6631 6632 mutex_lock(&dev_priv->sb_lock); 6633 6634 /* p1 and p2 divider */ 6635 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6636 5 << DPIO_CHV_S1_DIV_SHIFT | 6637 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6638 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6639 1 << DPIO_CHV_K_DIV_SHIFT); 6640 6641 /* Feedback post-divider - m2 */ 6642 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6643 6644 /* Feedback refclk divider - n and m1 */ 6645 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6646 DPIO_CHV_M1_DIV_BY_2 | 6647 1 << DPIO_CHV_N_DIV_SHIFT); 6648 6649 /* M2 fraction division */ 6650 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6651 6652 /* M2 fraction division enable */ 6653 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6654 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6655 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6656 if (bestm2_frac) 6657 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6658 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6659 6660 /* Program digital lock detect threshold */ 6661 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6662 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6663 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6664 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6665 if (!bestm2_frac) 6666 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6667 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6668 6669 /* Loop filter */ 6670 if (vco == 5400000) { 6671 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6672 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6673 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6674 tribuf_calcntr = 0x9; 6675 } else if (vco <= 6200000) { 6676 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6677 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6678 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6679 tribuf_calcntr = 0x9; 6680 } else if (vco <= 6480000) { 6681 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6682 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6683 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6684 tribuf_calcntr = 0x8; 6685 } else { 6686 /* Not supported. Apply the same limits as in the max case */ 6687 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6688 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6689 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6690 tribuf_calcntr = 0; 6691 } 6692 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6693 6694 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6695 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6696 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6697 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6698 6699 /* AFC Recal */ 6700 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6701 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6702 DPIO_AFC_RECAL); 6703 6704 mutex_unlock(&dev_priv->sb_lock); 6705 } 6706 6707 /** 6708 * vlv_force_pll_on - forcibly enable just the PLL 6709 * @dev_priv: i915 private structure 6710 * @pipe: pipe PLL to enable 6711 * @dpll: PLL configuration 6712 * 6713 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6714 * in cases where we need the PLL enabled even when @pipe is not going to 6715 * be enabled. 6716 */ 6717 int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 6718 const struct dpll *dpll) 6719 { 6720 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 6721 struct intel_crtc_state *pipe_config; 6722 6723 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 6724 if (!pipe_config) 6725 return -ENOMEM; 6726 6727 pipe_config->base.crtc = &crtc->base; 6728 pipe_config->pixel_multiplier = 1; 6729 pipe_config->dpll = *dpll; 6730 6731 if (IS_CHERRYVIEW(dev_priv)) { 6732 chv_compute_dpll(crtc, pipe_config); 6733 chv_prepare_pll(crtc, pipe_config); 6734 chv_enable_pll(crtc, pipe_config); 6735 } else { 6736 vlv_compute_dpll(crtc, pipe_config); 6737 vlv_prepare_pll(crtc, pipe_config); 6738 vlv_enable_pll(crtc, pipe_config); 6739 } 6740 6741 kfree(pipe_config); 6742 6743 return 0; 6744 } 6745 6746 /** 6747 * vlv_force_pll_off - forcibly disable just the PLL 6748 * @dev_priv: i915 private structure 6749 * @pipe: pipe PLL to disable 6750 * 6751 * Disable the PLL for @pipe. To be used in cases where we need 6752 * the PLL enabled even when @pipe is not going to be enabled. 6753 */ 6754 void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 6755 { 6756 if (IS_CHERRYVIEW(dev_priv)) 6757 chv_disable_pll(dev_priv, pipe); 6758 else 6759 vlv_disable_pll(dev_priv, pipe); 6760 } 6761 6762 static void i9xx_compute_dpll(struct intel_crtc *crtc, 6763 struct intel_crtc_state *crtc_state, 6764 struct dpll *reduced_clock) 6765 { 6766 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6767 u32 dpll; 6768 struct dpll *clock = &crtc_state->dpll; 6769 6770 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6771 6772 dpll = DPLL_VGA_MODE_DIS; 6773 6774 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 6775 dpll |= DPLLB_MODE_LVDS; 6776 else 6777 dpll |= DPLLB_MODE_DAC_SERIAL; 6778 6779 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 6780 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 6781 dpll |= (crtc_state->pixel_multiplier - 1) 6782 << SDVO_MULTIPLIER_SHIFT_HIRES; 6783 } 6784 6785 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 6786 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 6787 dpll |= DPLL_SDVO_HIGH_SPEED; 6788 6789 if (intel_crtc_has_dp_encoder(crtc_state)) 6790 dpll |= DPLL_SDVO_HIGH_SPEED; 6791 6792 /* compute bitmask from p1 value */ 6793 if (IS_PINEVIEW(dev_priv)) 6794 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6795 else { 6796 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6797 if (IS_G4X(dev_priv) && reduced_clock) 6798 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6799 } 6800 switch (clock->p2) { 6801 case 5: 6802 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6803 break; 6804 case 7: 6805 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6806 break; 6807 case 10: 6808 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6809 break; 6810 case 14: 6811 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6812 break; 6813 } 6814 if (INTEL_GEN(dev_priv) >= 4) 6815 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6816 6817 if (crtc_state->sdvo_tv_clock) 6818 dpll |= PLL_REF_INPUT_TVCLKINBC; 6819 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6820 intel_panel_use_ssc(dev_priv)) 6821 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6822 else 6823 dpll |= PLL_REF_INPUT_DREFCLK; 6824 6825 dpll |= DPLL_VCO_ENABLE; 6826 crtc_state->dpll_hw_state.dpll = dpll; 6827 6828 if (INTEL_GEN(dev_priv) >= 4) { 6829 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6830 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6831 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6832 } 6833 } 6834 6835 static void i8xx_compute_dpll(struct intel_crtc *crtc, 6836 struct intel_crtc_state *crtc_state, 6837 struct dpll *reduced_clock) 6838 { 6839 struct drm_device *dev = crtc->base.dev; 6840 struct drm_i915_private *dev_priv = to_i915(dev); 6841 u32 dpll; 6842 struct dpll *clock = &crtc_state->dpll; 6843 6844 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6845 6846 dpll = DPLL_VGA_MODE_DIS; 6847 6848 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 6849 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6850 } else { 6851 if (clock->p1 == 2) 6852 dpll |= PLL_P1_DIVIDE_BY_TWO; 6853 else 6854 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6855 if (clock->p2 == 4) 6856 dpll |= PLL_P2_DIVIDE_BY_4; 6857 } 6858 6859 if (!IS_I830(dev_priv) && 6860 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 6861 dpll |= DPLL_DVO_2X_MODE; 6862 6863 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 6864 intel_panel_use_ssc(dev_priv)) 6865 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6866 else 6867 dpll |= PLL_REF_INPUT_DREFCLK; 6868 6869 dpll |= DPLL_VCO_ENABLE; 6870 crtc_state->dpll_hw_state.dpll = dpll; 6871 } 6872 6873 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6874 { 6875 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 6876 enum i915_pipe pipe = intel_crtc->pipe; 6877 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6878 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 6879 uint32_t crtc_vtotal, crtc_vblank_end; 6880 int vsyncshift = 0; 6881 6882 /* We need to be careful not to changed the adjusted mode, for otherwise 6883 * the hw state checker will get angry at the mismatch. */ 6884 crtc_vtotal = adjusted_mode->crtc_vtotal; 6885 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6886 6887 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6888 /* the chip adds 2 halflines automatically */ 6889 crtc_vtotal -= 1; 6890 crtc_vblank_end -= 1; 6891 6892 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 6893 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6894 else 6895 vsyncshift = adjusted_mode->crtc_hsync_start - 6896 adjusted_mode->crtc_htotal / 2; 6897 if (vsyncshift < 0) 6898 vsyncshift += adjusted_mode->crtc_htotal; 6899 } 6900 6901 if (INTEL_GEN(dev_priv) > 3) 6902 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6903 6904 I915_WRITE(HTOTAL(cpu_transcoder), 6905 (adjusted_mode->crtc_hdisplay - 1) | 6906 ((adjusted_mode->crtc_htotal - 1) << 16)); 6907 I915_WRITE(HBLANK(cpu_transcoder), 6908 (adjusted_mode->crtc_hblank_start - 1) | 6909 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6910 I915_WRITE(HSYNC(cpu_transcoder), 6911 (adjusted_mode->crtc_hsync_start - 1) | 6912 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6913 6914 I915_WRITE(VTOTAL(cpu_transcoder), 6915 (adjusted_mode->crtc_vdisplay - 1) | 6916 ((crtc_vtotal - 1) << 16)); 6917 I915_WRITE(VBLANK(cpu_transcoder), 6918 (adjusted_mode->crtc_vblank_start - 1) | 6919 ((crtc_vblank_end - 1) << 16)); 6920 I915_WRITE(VSYNC(cpu_transcoder), 6921 (adjusted_mode->crtc_vsync_start - 1) | 6922 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6923 6924 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6925 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6926 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6927 * bits. */ 6928 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && 6929 (pipe == PIPE_B || pipe == PIPE_C)) 6930 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6931 6932 } 6933 6934 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 6935 { 6936 struct drm_device *dev = intel_crtc->base.dev; 6937 struct drm_i915_private *dev_priv = to_i915(dev); 6938 enum i915_pipe pipe = intel_crtc->pipe; 6939 6940 /* pipesrc controls the size that is scaled from, which should 6941 * always be the user's requested size. 6942 */ 6943 I915_WRITE(PIPESRC(pipe), 6944 ((intel_crtc->config->pipe_src_w - 1) << 16) | 6945 (intel_crtc->config->pipe_src_h - 1)); 6946 } 6947 6948 static void intel_get_pipe_timings(struct intel_crtc *crtc, 6949 struct intel_crtc_state *pipe_config) 6950 { 6951 struct drm_device *dev = crtc->base.dev; 6952 struct drm_i915_private *dev_priv = to_i915(dev); 6953 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6954 uint32_t tmp; 6955 6956 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6957 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6958 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6959 tmp = I915_READ(HBLANK(cpu_transcoder)); 6960 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6961 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6962 tmp = I915_READ(HSYNC(cpu_transcoder)); 6963 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6964 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6965 6966 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6967 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6968 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6969 tmp = I915_READ(VBLANK(cpu_transcoder)); 6970 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6971 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6972 tmp = I915_READ(VSYNC(cpu_transcoder)); 6973 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6974 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6975 6976 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6977 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6978 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 6979 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 6980 } 6981 } 6982 6983 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 6984 struct intel_crtc_state *pipe_config) 6985 { 6986 struct drm_device *dev = crtc->base.dev; 6987 struct drm_i915_private *dev_priv = to_i915(dev); 6988 u32 tmp; 6989 6990 tmp = I915_READ(PIPESRC(crtc->pipe)); 6991 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6992 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6993 6994 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 6995 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 6996 } 6997 6998 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 6999 struct intel_crtc_state *pipe_config) 7000 { 7001 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7002 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7003 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7004 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7005 7006 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7007 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7008 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7009 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7010 7011 mode->flags = pipe_config->base.adjusted_mode.flags; 7012 mode->type = DRM_MODE_TYPE_DRIVER; 7013 7014 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7015 7016 mode->hsync = drm_mode_hsync(mode); 7017 mode->vrefresh = drm_mode_vrefresh(mode); 7018 drm_mode_set_name(mode); 7019 } 7020 7021 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7022 { 7023 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 7024 uint32_t pipeconf; 7025 7026 pipeconf = 0; 7027 7028 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7029 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7030 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7031 7032 if (intel_crtc->config->double_wide) 7033 pipeconf |= PIPECONF_DOUBLE_WIDE; 7034 7035 /* only g4x and later have fancy bpc/dither controls */ 7036 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7037 IS_CHERRYVIEW(dev_priv)) { 7038 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7039 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7040 pipeconf |= PIPECONF_DITHER_EN | 7041 PIPECONF_DITHER_TYPE_SP; 7042 7043 switch (intel_crtc->config->pipe_bpp) { 7044 case 18: 7045 pipeconf |= PIPECONF_6BPC; 7046 break; 7047 case 24: 7048 pipeconf |= PIPECONF_8BPC; 7049 break; 7050 case 30: 7051 pipeconf |= PIPECONF_10BPC; 7052 break; 7053 default: 7054 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7055 BUG(); 7056 } 7057 } 7058 7059 if (HAS_PIPE_CXSR(dev_priv)) { 7060 if (intel_crtc->lowfreq_avail) { 7061 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7062 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7063 } else { 7064 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7065 } 7066 } 7067 7068 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7069 if (INTEL_GEN(dev_priv) < 4 || 7070 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7071 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7072 else 7073 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7074 } else 7075 pipeconf |= PIPECONF_PROGRESSIVE; 7076 7077 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7078 intel_crtc->config->limited_color_range) 7079 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7080 7081 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7082 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7083 } 7084 7085 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7086 struct intel_crtc_state *crtc_state) 7087 { 7088 struct drm_device *dev = crtc->base.dev; 7089 struct drm_i915_private *dev_priv = to_i915(dev); 7090 const struct intel_limit *limit; 7091 int refclk = 48000; 7092 7093 memset(&crtc_state->dpll_hw_state, 0, 7094 sizeof(crtc_state->dpll_hw_state)); 7095 7096 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7097 if (intel_panel_use_ssc(dev_priv)) { 7098 refclk = dev_priv->vbt.lvds_ssc_freq; 7099 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7100 } 7101 7102 limit = &intel_limits_i8xx_lvds; 7103 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7104 limit = &intel_limits_i8xx_dvo; 7105 } else { 7106 limit = &intel_limits_i8xx_dac; 7107 } 7108 7109 if (!crtc_state->clock_set && 7110 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7111 refclk, NULL, &crtc_state->dpll)) { 7112 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7113 return -EINVAL; 7114 } 7115 7116 i8xx_compute_dpll(crtc, crtc_state, NULL); 7117 7118 return 0; 7119 } 7120 7121 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7122 struct intel_crtc_state *crtc_state) 7123 { 7124 struct drm_device *dev = crtc->base.dev; 7125 struct drm_i915_private *dev_priv = to_i915(dev); 7126 const struct intel_limit *limit; 7127 int refclk = 96000; 7128 7129 memset(&crtc_state->dpll_hw_state, 0, 7130 sizeof(crtc_state->dpll_hw_state)); 7131 7132 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7133 if (intel_panel_use_ssc(dev_priv)) { 7134 refclk = dev_priv->vbt.lvds_ssc_freq; 7135 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7136 } 7137 7138 if (intel_is_dual_link_lvds(dev)) 7139 limit = &intel_limits_g4x_dual_channel_lvds; 7140 else 7141 limit = &intel_limits_g4x_single_channel_lvds; 7142 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 7143 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7144 limit = &intel_limits_g4x_hdmi; 7145 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7146 limit = &intel_limits_g4x_sdvo; 7147 } else { 7148 /* The option is for other outputs */ 7149 limit = &intel_limits_i9xx_sdvo; 7150 } 7151 7152 if (!crtc_state->clock_set && 7153 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7154 refclk, NULL, &crtc_state->dpll)) { 7155 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7156 return -EINVAL; 7157 } 7158 7159 i9xx_compute_dpll(crtc, crtc_state, NULL); 7160 7161 return 0; 7162 } 7163 7164 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 7165 struct intel_crtc_state *crtc_state) 7166 { 7167 struct drm_device *dev = crtc->base.dev; 7168 struct drm_i915_private *dev_priv = to_i915(dev); 7169 const struct intel_limit *limit; 7170 int refclk = 96000; 7171 7172 memset(&crtc_state->dpll_hw_state, 0, 7173 sizeof(crtc_state->dpll_hw_state)); 7174 7175 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7176 if (intel_panel_use_ssc(dev_priv)) { 7177 refclk = dev_priv->vbt.lvds_ssc_freq; 7178 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7179 } 7180 7181 limit = &intel_limits_pineview_lvds; 7182 } else { 7183 limit = &intel_limits_pineview_sdvo; 7184 } 7185 7186 if (!crtc_state->clock_set && 7187 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7188 refclk, NULL, &crtc_state->dpll)) { 7189 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7190 return -EINVAL; 7191 } 7192 7193 i9xx_compute_dpll(crtc, crtc_state, NULL); 7194 7195 return 0; 7196 } 7197 7198 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7199 struct intel_crtc_state *crtc_state) 7200 { 7201 struct drm_device *dev = crtc->base.dev; 7202 struct drm_i915_private *dev_priv = to_i915(dev); 7203 const struct intel_limit *limit; 7204 int refclk = 96000; 7205 7206 memset(&crtc_state->dpll_hw_state, 0, 7207 sizeof(crtc_state->dpll_hw_state)); 7208 7209 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7210 if (intel_panel_use_ssc(dev_priv)) { 7211 refclk = dev_priv->vbt.lvds_ssc_freq; 7212 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7213 } 7214 7215 limit = &intel_limits_i9xx_lvds; 7216 } else { 7217 limit = &intel_limits_i9xx_sdvo; 7218 } 7219 7220 if (!crtc_state->clock_set && 7221 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7222 refclk, NULL, &crtc_state->dpll)) { 7223 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7224 return -EINVAL; 7225 } 7226 7227 i9xx_compute_dpll(crtc, crtc_state, NULL); 7228 7229 return 0; 7230 } 7231 7232 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 7233 struct intel_crtc_state *crtc_state) 7234 { 7235 int refclk = 100000; 7236 const struct intel_limit *limit = &intel_limits_chv; 7237 7238 memset(&crtc_state->dpll_hw_state, 0, 7239 sizeof(crtc_state->dpll_hw_state)); 7240 7241 if (!crtc_state->clock_set && 7242 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7243 refclk, NULL, &crtc_state->dpll)) { 7244 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7245 return -EINVAL; 7246 } 7247 7248 chv_compute_dpll(crtc, crtc_state); 7249 7250 return 0; 7251 } 7252 7253 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 7254 struct intel_crtc_state *crtc_state) 7255 { 7256 int refclk = 100000; 7257 const struct intel_limit *limit = &intel_limits_vlv; 7258 7259 memset(&crtc_state->dpll_hw_state, 0, 7260 sizeof(crtc_state->dpll_hw_state)); 7261 7262 if (!crtc_state->clock_set && 7263 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7264 refclk, NULL, &crtc_state->dpll)) { 7265 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7266 return -EINVAL; 7267 } 7268 7269 vlv_compute_dpll(crtc, crtc_state); 7270 7271 return 0; 7272 } 7273 7274 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7275 struct intel_crtc_state *pipe_config) 7276 { 7277 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7278 uint32_t tmp; 7279 7280 if (INTEL_GEN(dev_priv) <= 3 && 7281 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv))) 7282 return; 7283 7284 tmp = I915_READ(PFIT_CONTROL); 7285 if (!(tmp & PFIT_ENABLE)) 7286 return; 7287 7288 /* Check whether the pfit is attached to our pipe. */ 7289 if (INTEL_GEN(dev_priv) < 4) { 7290 if (crtc->pipe != PIPE_B) 7291 return; 7292 } else { 7293 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7294 return; 7295 } 7296 7297 pipe_config->gmch_pfit.control = tmp; 7298 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7299 } 7300 7301 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7302 struct intel_crtc_state *pipe_config) 7303 { 7304 struct drm_device *dev = crtc->base.dev; 7305 struct drm_i915_private *dev_priv = to_i915(dev); 7306 int pipe = pipe_config->cpu_transcoder; 7307 struct dpll clock; 7308 u32 mdiv; 7309 int refclk = 100000; 7310 7311 /* In case of DSI, DPLL will not be used */ 7312 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7313 return; 7314 7315 mutex_lock(&dev_priv->sb_lock); 7316 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7317 mutex_unlock(&dev_priv->sb_lock); 7318 7319 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7320 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7321 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7322 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7323 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7324 7325 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 7326 } 7327 7328 static void 7329 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7330 struct intel_initial_plane_config *plane_config) 7331 { 7332 struct drm_device *dev = crtc->base.dev; 7333 struct drm_i915_private *dev_priv = to_i915(dev); 7334 u32 val, base, offset; 7335 int pipe = crtc->pipe, plane = crtc->plane; 7336 int fourcc, pixel_format; 7337 unsigned int aligned_height; 7338 struct drm_framebuffer *fb; 7339 struct intel_framebuffer *intel_fb; 7340 7341 val = I915_READ(DSPCNTR(plane)); 7342 if (!(val & DISPLAY_PLANE_ENABLE)) 7343 return; 7344 7345 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7346 if (!intel_fb) { 7347 DRM_DEBUG_KMS("failed to alloc fb\n"); 7348 return; 7349 } 7350 7351 fb = &intel_fb->base; 7352 7353 fb->dev = dev; 7354 7355 if (INTEL_GEN(dev_priv) >= 4) { 7356 if (val & DISPPLANE_TILED) { 7357 plane_config->tiling = I915_TILING_X; 7358 fb->modifier = I915_FORMAT_MOD_X_TILED; 7359 } 7360 } 7361 7362 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7363 fourcc = i9xx_format_to_fourcc(pixel_format); 7364 fb->format = drm_format_info(fourcc); 7365 7366 if (INTEL_GEN(dev_priv) >= 4) { 7367 if (plane_config->tiling) 7368 offset = I915_READ(DSPTILEOFF(plane)); 7369 else 7370 offset = I915_READ(DSPLINOFF(plane)); 7371 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7372 } else { 7373 base = I915_READ(DSPADDR(plane)); 7374 } 7375 plane_config->base = base; 7376 7377 val = I915_READ(PIPESRC(pipe)); 7378 fb->width = ((val >> 16) & 0xfff) + 1; 7379 fb->height = ((val >> 0) & 0xfff) + 1; 7380 7381 val = I915_READ(DSPSTRIDE(pipe)); 7382 fb->pitches[0] = val & 0xffffffc0; 7383 7384 aligned_height = intel_fb_align_height(fb, 0, fb->height); 7385 7386 plane_config->size = fb->pitches[0] * aligned_height; 7387 7388 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7389 pipe_name(pipe), plane, fb->width, fb->height, 7390 fb->format->cpp[0] * 8, base, fb->pitches[0], 7391 plane_config->size); 7392 7393 plane_config->fb = intel_fb; 7394 } 7395 7396 static void chv_crtc_clock_get(struct intel_crtc *crtc, 7397 struct intel_crtc_state *pipe_config) 7398 { 7399 struct drm_device *dev = crtc->base.dev; 7400 struct drm_i915_private *dev_priv = to_i915(dev); 7401 int pipe = pipe_config->cpu_transcoder; 7402 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7403 struct dpll clock; 7404 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7405 int refclk = 100000; 7406 7407 /* In case of DSI, DPLL will not be used */ 7408 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7409 return; 7410 7411 mutex_lock(&dev_priv->sb_lock); 7412 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7413 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7414 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7415 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7416 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7417 mutex_unlock(&dev_priv->sb_lock); 7418 7419 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7420 clock.m2 = (pll_dw0 & 0xff) << 22; 7421 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7422 clock.m2 |= pll_dw2 & 0x3fffff; 7423 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7424 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7425 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7426 7427 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 7428 } 7429 7430 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7431 struct intel_crtc_state *pipe_config) 7432 { 7433 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 7434 enum intel_display_power_domain power_domain; 7435 uint32_t tmp; 7436 bool ret; 7437 7438 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 7439 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 7440 return false; 7441 7442 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7443 pipe_config->shared_dpll = NULL; 7444 7445 ret = false; 7446 7447 tmp = I915_READ(PIPECONF(crtc->pipe)); 7448 if (!(tmp & PIPECONF_ENABLE)) 7449 goto out; 7450 7451 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 7452 IS_CHERRYVIEW(dev_priv)) { 7453 switch (tmp & PIPECONF_BPC_MASK) { 7454 case PIPECONF_6BPC: 7455 pipe_config->pipe_bpp = 18; 7456 break; 7457 case PIPECONF_8BPC: 7458 pipe_config->pipe_bpp = 24; 7459 break; 7460 case PIPECONF_10BPC: 7461 pipe_config->pipe_bpp = 30; 7462 break; 7463 default: 7464 break; 7465 } 7466 } 7467 7468 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 7469 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7470 pipe_config->limited_color_range = true; 7471 7472 if (INTEL_GEN(dev_priv) < 4) 7473 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7474 7475 intel_get_pipe_timings(crtc, pipe_config); 7476 intel_get_pipe_src_size(crtc, pipe_config); 7477 7478 i9xx_get_pfit_config(crtc, pipe_config); 7479 7480 if (INTEL_GEN(dev_priv) >= 4) { 7481 /* No way to read it out on pipes B and C */ 7482 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) 7483 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 7484 else 7485 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7486 pipe_config->pixel_multiplier = 7487 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7488 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7489 pipe_config->dpll_hw_state.dpll_md = tmp; 7490 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 7491 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { 7492 tmp = I915_READ(DPLL(crtc->pipe)); 7493 pipe_config->pixel_multiplier = 7494 ((tmp & SDVO_MULTIPLIER_MASK) 7495 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7496 } else { 7497 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7498 * port and will be fixed up in the encoder->get_config 7499 * function. */ 7500 pipe_config->pixel_multiplier = 1; 7501 } 7502 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7503 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 7504 /* 7505 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7506 * on 830. Filter it out here so that we don't 7507 * report errors due to that. 7508 */ 7509 if (IS_I830(dev_priv)) 7510 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7511 7512 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7513 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7514 } else { 7515 /* Mask out read-only status bits. */ 7516 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7517 DPLL_PORTC_READY_MASK | 7518 DPLL_PORTB_READY_MASK); 7519 } 7520 7521 if (IS_CHERRYVIEW(dev_priv)) 7522 chv_crtc_clock_get(crtc, pipe_config); 7523 else if (IS_VALLEYVIEW(dev_priv)) 7524 vlv_crtc_clock_get(crtc, pipe_config); 7525 else 7526 i9xx_crtc_clock_get(crtc, pipe_config); 7527 7528 /* 7529 * Normally the dotclock is filled in by the encoder .get_config() 7530 * but in case the pipe is enabled w/o any ports we need a sane 7531 * default. 7532 */ 7533 pipe_config->base.adjusted_mode.crtc_clock = 7534 pipe_config->port_clock / pipe_config->pixel_multiplier; 7535 7536 ret = true; 7537 7538 out: 7539 intel_display_power_put(dev_priv, power_domain); 7540 7541 return ret; 7542 } 7543 7544 static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv) 7545 { 7546 struct intel_encoder *encoder; 7547 int i; 7548 u32 val, final; 7549 bool has_lvds = false; 7550 bool has_cpu_edp = false; 7551 bool has_panel = false; 7552 bool has_ck505 = false; 7553 bool can_ssc = false; 7554 bool using_ssc_source = false; 7555 7556 /* We need to take the global config into account */ 7557 for_each_intel_encoder(&dev_priv->drm, encoder) { 7558 switch (encoder->type) { 7559 case INTEL_OUTPUT_LVDS: 7560 has_panel = true; 7561 has_lvds = true; 7562 break; 7563 case INTEL_OUTPUT_EDP: 7564 has_panel = true; 7565 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 7566 has_cpu_edp = true; 7567 break; 7568 default: 7569 break; 7570 } 7571 } 7572 7573 if (HAS_PCH_IBX(dev_priv)) { 7574 has_ck505 = dev_priv->vbt.display_clock_mode; 7575 can_ssc = has_ck505; 7576 } else { 7577 has_ck505 = false; 7578 can_ssc = true; 7579 } 7580 7581 /* Check if any DPLLs are using the SSC source */ 7582 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 7583 u32 temp = I915_READ(PCH_DPLL(i)); 7584 7585 if (!(temp & DPLL_VCO_ENABLE)) 7586 continue; 7587 7588 if ((temp & PLL_REF_INPUT_MASK) == 7589 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 7590 using_ssc_source = true; 7591 break; 7592 } 7593 } 7594 7595 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 7596 has_panel, has_lvds, has_ck505, using_ssc_source); 7597 7598 /* Ironlake: try to setup display ref clock before DPLL 7599 * enabling. This is only under driver's control after 7600 * PCH B stepping, previous chipset stepping should be 7601 * ignoring this setting. 7602 */ 7603 val = I915_READ(PCH_DREF_CONTROL); 7604 7605 /* As we must carefully and slowly disable/enable each source in turn, 7606 * compute the final state we want first and check if we need to 7607 * make any changes at all. 7608 */ 7609 final = val; 7610 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7611 if (has_ck505) 7612 final |= DREF_NONSPREAD_CK505_ENABLE; 7613 else 7614 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7615 7616 final &= ~DREF_SSC_SOURCE_MASK; 7617 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7618 final &= ~DREF_SSC1_ENABLE; 7619 7620 if (has_panel) { 7621 final |= DREF_SSC_SOURCE_ENABLE; 7622 7623 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7624 final |= DREF_SSC1_ENABLE; 7625 7626 if (has_cpu_edp) { 7627 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7628 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7629 else 7630 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7631 } else 7632 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7633 } else if (using_ssc_source) { 7634 final |= DREF_SSC_SOURCE_ENABLE; 7635 final |= DREF_SSC1_ENABLE; 7636 } 7637 7638 if (final == val) 7639 return; 7640 7641 /* Always enable nonspread source */ 7642 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7643 7644 if (has_ck505) 7645 val |= DREF_NONSPREAD_CK505_ENABLE; 7646 else 7647 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7648 7649 if (has_panel) { 7650 val &= ~DREF_SSC_SOURCE_MASK; 7651 val |= DREF_SSC_SOURCE_ENABLE; 7652 7653 /* SSC must be turned on before enabling the CPU output */ 7654 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7655 DRM_DEBUG_KMS("Using SSC on panel\n"); 7656 val |= DREF_SSC1_ENABLE; 7657 } else 7658 val &= ~DREF_SSC1_ENABLE; 7659 7660 /* Get SSC going before enabling the outputs */ 7661 I915_WRITE(PCH_DREF_CONTROL, val); 7662 POSTING_READ(PCH_DREF_CONTROL); 7663 udelay(200); 7664 7665 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7666 7667 /* Enable CPU source on CPU attached eDP */ 7668 if (has_cpu_edp) { 7669 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7670 DRM_DEBUG_KMS("Using SSC on eDP\n"); 7671 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7672 } else 7673 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7674 } else 7675 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7676 7677 I915_WRITE(PCH_DREF_CONTROL, val); 7678 POSTING_READ(PCH_DREF_CONTROL); 7679 udelay(200); 7680 } else { 7681 DRM_DEBUG_KMS("Disabling CPU source output\n"); 7682 7683 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7684 7685 /* Turn off CPU output */ 7686 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7687 7688 I915_WRITE(PCH_DREF_CONTROL, val); 7689 POSTING_READ(PCH_DREF_CONTROL); 7690 udelay(200); 7691 7692 if (!using_ssc_source) { 7693 DRM_DEBUG_KMS("Disabling SSC source\n"); 7694 7695 /* Turn off the SSC source */ 7696 val &= ~DREF_SSC_SOURCE_MASK; 7697 val |= DREF_SSC_SOURCE_DISABLE; 7698 7699 /* Turn off SSC1 */ 7700 val &= ~DREF_SSC1_ENABLE; 7701 7702 I915_WRITE(PCH_DREF_CONTROL, val); 7703 POSTING_READ(PCH_DREF_CONTROL); 7704 udelay(200); 7705 } 7706 } 7707 7708 BUG_ON(val != final); 7709 } 7710 7711 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7712 { 7713 uint32_t tmp; 7714 7715 tmp = I915_READ(SOUTH_CHICKEN2); 7716 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7717 I915_WRITE(SOUTH_CHICKEN2, tmp); 7718 7719 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 7720 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7721 DRM_ERROR("FDI mPHY reset assert timeout\n"); 7722 7723 tmp = I915_READ(SOUTH_CHICKEN2); 7724 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7725 I915_WRITE(SOUTH_CHICKEN2, tmp); 7726 7727 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 7728 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7729 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 7730 } 7731 7732 /* WaMPhyProgramming:hsw */ 7733 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7734 { 7735 uint32_t tmp; 7736 7737 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7738 tmp &= ~(0xFF << 24); 7739 tmp |= (0x12 << 24); 7740 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7741 7742 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7743 tmp |= (1 << 11); 7744 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7745 7746 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7747 tmp |= (1 << 11); 7748 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7749 7750 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7751 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7752 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7753 7754 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7755 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7756 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7757 7758 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7759 tmp &= ~(7 << 13); 7760 tmp |= (5 << 13); 7761 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7762 7763 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7764 tmp &= ~(7 << 13); 7765 tmp |= (5 << 13); 7766 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7767 7768 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7769 tmp &= ~0xFF; 7770 tmp |= 0x1C; 7771 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7772 7773 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7774 tmp &= ~0xFF; 7775 tmp |= 0x1C; 7776 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7777 7778 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7779 tmp &= ~(0xFF << 16); 7780 tmp |= (0x1C << 16); 7781 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7782 7783 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7784 tmp &= ~(0xFF << 16); 7785 tmp |= (0x1C << 16); 7786 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7787 7788 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7789 tmp |= (1 << 27); 7790 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7791 7792 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7793 tmp |= (1 << 27); 7794 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7795 7796 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7797 tmp &= ~(0xF << 28); 7798 tmp |= (4 << 28); 7799 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7800 7801 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7802 tmp &= ~(0xF << 28); 7803 tmp |= (4 << 28); 7804 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7805 } 7806 7807 /* Implements 3 different sequences from BSpec chapter "Display iCLK 7808 * Programming" based on the parameters passed: 7809 * - Sequence to enable CLKOUT_DP 7810 * - Sequence to enable CLKOUT_DP without spread 7811 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7812 */ 7813 static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv, 7814 bool with_spread, bool with_fdi) 7815 { 7816 uint32_t reg, tmp; 7817 7818 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7819 with_spread = true; 7820 if (WARN(HAS_PCH_LPT_LP(dev_priv) && 7821 with_fdi, "LP PCH doesn't have FDI\n")) 7822 with_fdi = false; 7823 7824 mutex_lock(&dev_priv->sb_lock); 7825 7826 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7827 tmp &= ~SBI_SSCCTL_DISABLE; 7828 tmp |= SBI_SSCCTL_PATHALT; 7829 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7830 7831 udelay(24); 7832 7833 if (with_spread) { 7834 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7835 tmp &= ~SBI_SSCCTL_PATHALT; 7836 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7837 7838 if (with_fdi) { 7839 lpt_reset_fdi_mphy(dev_priv); 7840 lpt_program_fdi_mphy(dev_priv); 7841 } 7842 } 7843 7844 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7845 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7846 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7847 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7848 7849 mutex_unlock(&dev_priv->sb_lock); 7850 } 7851 7852 /* Sequence to disable CLKOUT_DP */ 7853 static void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv) 7854 { 7855 uint32_t reg, tmp; 7856 7857 mutex_lock(&dev_priv->sb_lock); 7858 7859 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0; 7860 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7861 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7862 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7863 7864 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7865 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7866 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7867 tmp |= SBI_SSCCTL_PATHALT; 7868 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7869 udelay(32); 7870 } 7871 tmp |= SBI_SSCCTL_DISABLE; 7872 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7873 } 7874 7875 mutex_unlock(&dev_priv->sb_lock); 7876 } 7877 7878 #define BEND_IDX(steps) ((50 + (steps)) / 5) 7879 7880 static const uint16_t sscdivintphase[] = { 7881 [BEND_IDX( 50)] = 0x3B23, 7882 [BEND_IDX( 45)] = 0x3B23, 7883 [BEND_IDX( 40)] = 0x3C23, 7884 [BEND_IDX( 35)] = 0x3C23, 7885 [BEND_IDX( 30)] = 0x3D23, 7886 [BEND_IDX( 25)] = 0x3D23, 7887 [BEND_IDX( 20)] = 0x3E23, 7888 [BEND_IDX( 15)] = 0x3E23, 7889 [BEND_IDX( 10)] = 0x3F23, 7890 [BEND_IDX( 5)] = 0x3F23, 7891 [BEND_IDX( 0)] = 0x0025, 7892 [BEND_IDX( -5)] = 0x0025, 7893 [BEND_IDX(-10)] = 0x0125, 7894 [BEND_IDX(-15)] = 0x0125, 7895 [BEND_IDX(-20)] = 0x0225, 7896 [BEND_IDX(-25)] = 0x0225, 7897 [BEND_IDX(-30)] = 0x0325, 7898 [BEND_IDX(-35)] = 0x0325, 7899 [BEND_IDX(-40)] = 0x0425, 7900 [BEND_IDX(-45)] = 0x0425, 7901 [BEND_IDX(-50)] = 0x0525, 7902 }; 7903 7904 /* 7905 * Bend CLKOUT_DP 7906 * steps -50 to 50 inclusive, in steps of 5 7907 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 7908 * change in clock period = -(steps / 10) * 5.787 ps 7909 */ 7910 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 7911 { 7912 uint32_t tmp; 7913 int idx = BEND_IDX(steps); 7914 7915 if (WARN_ON(steps % 5 != 0)) 7916 return; 7917 7918 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 7919 return; 7920 7921 mutex_lock(&dev_priv->sb_lock); 7922 7923 if (steps % 10 != 0) 7924 tmp = 0xAAAAAAAB; 7925 else 7926 tmp = 0x00000000; 7927 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 7928 7929 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 7930 tmp &= 0xffff0000; 7931 tmp |= sscdivintphase[idx]; 7932 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 7933 7934 mutex_unlock(&dev_priv->sb_lock); 7935 } 7936 7937 #undef BEND_IDX 7938 7939 static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv) 7940 { 7941 struct intel_encoder *encoder; 7942 bool has_vga = false; 7943 7944 for_each_intel_encoder(&dev_priv->drm, encoder) { 7945 switch (encoder->type) { 7946 case INTEL_OUTPUT_ANALOG: 7947 has_vga = true; 7948 break; 7949 default: 7950 break; 7951 } 7952 } 7953 7954 if (has_vga) { 7955 lpt_bend_clkout_dp(dev_priv, 0); 7956 lpt_enable_clkout_dp(dev_priv, true, true); 7957 } else { 7958 lpt_disable_clkout_dp(dev_priv); 7959 } 7960 } 7961 7962 /* 7963 * Initialize reference clocks when the driver loads 7964 */ 7965 void intel_init_pch_refclk(struct drm_i915_private *dev_priv) 7966 { 7967 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 7968 ironlake_init_pch_refclk(dev_priv); 7969 else if (HAS_PCH_LPT(dev_priv)) 7970 lpt_init_pch_refclk(dev_priv); 7971 } 7972 7973 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 7974 { 7975 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 7976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7977 int pipe = intel_crtc->pipe; 7978 uint32_t val; 7979 7980 val = 0; 7981 7982 switch (intel_crtc->config->pipe_bpp) { 7983 case 18: 7984 val |= PIPECONF_6BPC; 7985 break; 7986 case 24: 7987 val |= PIPECONF_8BPC; 7988 break; 7989 case 30: 7990 val |= PIPECONF_10BPC; 7991 break; 7992 case 36: 7993 val |= PIPECONF_12BPC; 7994 break; 7995 default: 7996 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7997 BUG(); 7998 } 7999 8000 if (intel_crtc->config->dither) 8001 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8002 8003 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8004 val |= PIPECONF_INTERLACED_ILK; 8005 else 8006 val |= PIPECONF_PROGRESSIVE; 8007 8008 if (intel_crtc->config->limited_color_range) 8009 val |= PIPECONF_COLOR_RANGE_SELECT; 8010 8011 I915_WRITE(PIPECONF(pipe), val); 8012 POSTING_READ(PIPECONF(pipe)); 8013 } 8014 8015 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8016 { 8017 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8018 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8019 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8020 u32 val = 0; 8021 8022 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8023 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8024 8025 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8026 val |= PIPECONF_INTERLACED_ILK; 8027 else 8028 val |= PIPECONF_PROGRESSIVE; 8029 8030 I915_WRITE(PIPECONF(cpu_transcoder), val); 8031 POSTING_READ(PIPECONF(cpu_transcoder)); 8032 } 8033 8034 static void haswell_set_pipemisc(struct drm_crtc *crtc) 8035 { 8036 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8038 8039 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8040 u32 val = 0; 8041 8042 switch (intel_crtc->config->pipe_bpp) { 8043 case 18: 8044 val |= PIPEMISC_DITHER_6_BPC; 8045 break; 8046 case 24: 8047 val |= PIPEMISC_DITHER_8_BPC; 8048 break; 8049 case 30: 8050 val |= PIPEMISC_DITHER_10_BPC; 8051 break; 8052 case 36: 8053 val |= PIPEMISC_DITHER_12_BPC; 8054 break; 8055 default: 8056 /* Case prevented by pipe_config_set_bpp. */ 8057 BUG(); 8058 } 8059 8060 if (intel_crtc->config->dither) 8061 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8062 8063 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8064 } 8065 } 8066 8067 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8068 { 8069 /* 8070 * Account for spread spectrum to avoid 8071 * oversubscribing the link. Max center spread 8072 * is 2.5%; use 5% for safety's sake. 8073 */ 8074 u32 bps = target_clock * bpp * 21 / 20; 8075 return DIV_ROUND_UP(bps, link_bw * 8); 8076 } 8077 8078 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8079 { 8080 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8081 } 8082 8083 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8084 struct intel_crtc_state *crtc_state, 8085 struct dpll *reduced_clock) 8086 { 8087 struct drm_crtc *crtc = &intel_crtc->base; 8088 struct drm_device *dev = crtc->dev; 8089 struct drm_i915_private *dev_priv = to_i915(dev); 8090 u32 dpll, fp, fp2; 8091 int factor; 8092 8093 /* Enable autotuning of the PLL clock (if permissible) */ 8094 factor = 21; 8095 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8096 if ((intel_panel_use_ssc(dev_priv) && 8097 dev_priv->vbt.lvds_ssc_freq == 100000) || 8098 (HAS_PCH_IBX(dev_priv) && intel_is_dual_link_lvds(dev))) 8099 factor = 25; 8100 } else if (crtc_state->sdvo_tv_clock) 8101 factor = 20; 8102 8103 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8104 8105 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8106 fp |= FP_CB_TUNE; 8107 8108 if (reduced_clock) { 8109 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8110 8111 if (reduced_clock->m < factor * reduced_clock->n) 8112 fp2 |= FP_CB_TUNE; 8113 } else { 8114 fp2 = fp; 8115 } 8116 8117 dpll = 0; 8118 8119 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8120 dpll |= DPLLB_MODE_LVDS; 8121 else 8122 dpll |= DPLLB_MODE_DAC_SERIAL; 8123 8124 dpll |= (crtc_state->pixel_multiplier - 1) 8125 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8126 8127 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 8128 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 8129 dpll |= DPLL_SDVO_HIGH_SPEED; 8130 8131 if (intel_crtc_has_dp_encoder(crtc_state)) 8132 dpll |= DPLL_SDVO_HIGH_SPEED; 8133 8134 /* 8135 * The high speed IO clock is only really required for 8136 * SDVO/HDMI/DP, but we also enable it for CRT to make it 8137 * possible to share the DPLL between CRT and HDMI. Enabling 8138 * the clock needlessly does no real harm, except use up a 8139 * bit of power potentially. 8140 * 8141 * We'll limit this to IVB with 3 pipes, since it has only two 8142 * DPLLs and so DPLL sharing is the only way to get three pipes 8143 * driving PCH ports at the same time. On SNB we could do this, 8144 * and potentially avoid enabling the second DPLL, but it's not 8145 * clear if it''s a win or loss power wise. No point in doing 8146 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 8147 */ 8148 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 8149 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 8150 dpll |= DPLL_SDVO_HIGH_SPEED; 8151 8152 /* compute bitmask from p1 value */ 8153 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8154 /* also FPA1 */ 8155 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8156 8157 switch (crtc_state->dpll.p2) { 8158 case 5: 8159 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8160 break; 8161 case 7: 8162 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8163 break; 8164 case 10: 8165 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8166 break; 8167 case 14: 8168 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8169 break; 8170 } 8171 8172 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8173 intel_panel_use_ssc(dev_priv)) 8174 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8175 else 8176 dpll |= PLL_REF_INPUT_DREFCLK; 8177 8178 dpll |= DPLL_VCO_ENABLE; 8179 8180 crtc_state->dpll_hw_state.dpll = dpll; 8181 crtc_state->dpll_hw_state.fp0 = fp; 8182 crtc_state->dpll_hw_state.fp1 = fp2; 8183 } 8184 8185 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8186 struct intel_crtc_state *crtc_state) 8187 { 8188 struct drm_device *dev = crtc->base.dev; 8189 struct drm_i915_private *dev_priv = to_i915(dev); 8190 struct dpll reduced_clock; 8191 bool has_reduced_clock = false; 8192 struct intel_shared_dpll *pll; 8193 const struct intel_limit *limit; 8194 int refclk = 120000; 8195 8196 memset(&crtc_state->dpll_hw_state, 0, 8197 sizeof(crtc_state->dpll_hw_state)); 8198 8199 crtc->lowfreq_avail = false; 8200 8201 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8202 if (!crtc_state->has_pch_encoder) 8203 return 0; 8204 8205 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8206 if (intel_panel_use_ssc(dev_priv)) { 8207 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8208 dev_priv->vbt.lvds_ssc_freq); 8209 refclk = dev_priv->vbt.lvds_ssc_freq; 8210 } 8211 8212 if (intel_is_dual_link_lvds(dev)) { 8213 if (refclk == 100000) 8214 limit = &intel_limits_ironlake_dual_lvds_100m; 8215 else 8216 limit = &intel_limits_ironlake_dual_lvds; 8217 } else { 8218 if (refclk == 100000) 8219 limit = &intel_limits_ironlake_single_lvds_100m; 8220 else 8221 limit = &intel_limits_ironlake_single_lvds; 8222 } 8223 } else { 8224 limit = &intel_limits_ironlake_dac; 8225 } 8226 8227 if (!crtc_state->clock_set && 8228 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8229 refclk, NULL, &crtc_state->dpll)) { 8230 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8231 return -EINVAL; 8232 } 8233 8234 ironlake_compute_dpll(crtc, crtc_state, 8235 has_reduced_clock ? &reduced_clock : NULL); 8236 8237 pll = intel_get_shared_dpll(crtc, crtc_state, NULL); 8238 if (pll == NULL) { 8239 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8240 pipe_name(crtc->pipe)); 8241 return -EINVAL; 8242 } 8243 8244 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 8245 has_reduced_clock) 8246 crtc->lowfreq_avail = true; 8247 8248 return 0; 8249 } 8250 8251 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8252 struct intel_link_m_n *m_n) 8253 { 8254 struct drm_device *dev = crtc->base.dev; 8255 struct drm_i915_private *dev_priv = to_i915(dev); 8256 enum i915_pipe pipe = crtc->pipe; 8257 8258 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8259 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8260 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8261 & ~TU_SIZE_MASK; 8262 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8263 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8264 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8265 } 8266 8267 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8268 enum transcoder transcoder, 8269 struct intel_link_m_n *m_n, 8270 struct intel_link_m_n *m2_n2) 8271 { 8272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 8273 enum i915_pipe pipe = crtc->pipe; 8274 8275 if (INTEL_GEN(dev_priv) >= 5) { 8276 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8277 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8278 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8279 & ~TU_SIZE_MASK; 8280 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8281 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8282 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8283 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8284 * gen < 8) and if DRRS is supported (to make sure the 8285 * registers are not unnecessarily read). 8286 */ 8287 if (m2_n2 && INTEL_GEN(dev_priv) < 8 && 8288 crtc->config->has_drrs) { 8289 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8290 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8291 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8292 & ~TU_SIZE_MASK; 8293 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8294 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8295 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8296 } 8297 } else { 8298 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8299 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8300 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8301 & ~TU_SIZE_MASK; 8302 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8303 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8304 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8305 } 8306 } 8307 8308 void intel_dp_get_m_n(struct intel_crtc *crtc, 8309 struct intel_crtc_state *pipe_config) 8310 { 8311 if (pipe_config->has_pch_encoder) 8312 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8313 else 8314 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8315 &pipe_config->dp_m_n, 8316 &pipe_config->dp_m2_n2); 8317 } 8318 8319 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8320 struct intel_crtc_state *pipe_config) 8321 { 8322 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8323 &pipe_config->fdi_m_n, NULL); 8324 } 8325 8326 static void skylake_get_pfit_config(struct intel_crtc *crtc, 8327 struct intel_crtc_state *pipe_config) 8328 { 8329 struct drm_device *dev = crtc->base.dev; 8330 struct drm_i915_private *dev_priv = to_i915(dev); 8331 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8332 uint32_t ps_ctrl = 0; 8333 int id = -1; 8334 int i; 8335 8336 /* find scaler attached to this pipe */ 8337 for (i = 0; i < crtc->num_scalers; i++) { 8338 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8339 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8340 id = i; 8341 pipe_config->pch_pfit.enabled = true; 8342 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8343 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8344 break; 8345 } 8346 } 8347 8348 scaler_state->scaler_id = id; 8349 if (id >= 0) { 8350 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8351 } else { 8352 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8353 } 8354 } 8355 8356 static void 8357 skylake_get_initial_plane_config(struct intel_crtc *crtc, 8358 struct intel_initial_plane_config *plane_config) 8359 { 8360 struct drm_device *dev = crtc->base.dev; 8361 struct drm_i915_private *dev_priv = to_i915(dev); 8362 u32 val, base, offset, stride_mult, tiling; 8363 int pipe = crtc->pipe; 8364 int fourcc, pixel_format; 8365 unsigned int aligned_height; 8366 struct drm_framebuffer *fb; 8367 struct intel_framebuffer *intel_fb; 8368 8369 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8370 if (!intel_fb) { 8371 DRM_DEBUG_KMS("failed to alloc fb\n"); 8372 return; 8373 } 8374 8375 fb = &intel_fb->base; 8376 8377 fb->dev = dev; 8378 8379 val = I915_READ(PLANE_CTL(pipe, 0)); 8380 if (!(val & PLANE_CTL_ENABLE)) 8381 goto error; 8382 8383 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8384 fourcc = skl_format_to_fourcc(pixel_format, 8385 val & PLANE_CTL_ORDER_RGBX, 8386 val & PLANE_CTL_ALPHA_MASK); 8387 fb->format = drm_format_info(fourcc); 8388 8389 tiling = val & PLANE_CTL_TILED_MASK; 8390 switch (tiling) { 8391 case PLANE_CTL_TILED_LINEAR: 8392 fb->modifier = DRM_FORMAT_MOD_LINEAR; 8393 break; 8394 case PLANE_CTL_TILED_X: 8395 plane_config->tiling = I915_TILING_X; 8396 fb->modifier = I915_FORMAT_MOD_X_TILED; 8397 break; 8398 case PLANE_CTL_TILED_Y: 8399 fb->modifier = I915_FORMAT_MOD_Y_TILED; 8400 break; 8401 case PLANE_CTL_TILED_YF: 8402 fb->modifier = I915_FORMAT_MOD_Yf_TILED; 8403 break; 8404 default: 8405 MISSING_CASE(tiling); 8406 goto error; 8407 } 8408 8409 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8410 plane_config->base = base; 8411 8412 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8413 8414 val = I915_READ(PLANE_SIZE(pipe, 0)); 8415 fb->height = ((val >> 16) & 0xfff) + 1; 8416 fb->width = ((val >> 0) & 0x1fff) + 1; 8417 8418 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8419 stride_mult = intel_fb_stride_alignment(fb, 0); 8420 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8421 8422 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8423 8424 plane_config->size = fb->pitches[0] * aligned_height; 8425 8426 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8427 pipe_name(pipe), fb->width, fb->height, 8428 fb->format->cpp[0] * 8, base, fb->pitches[0], 8429 plane_config->size); 8430 8431 plane_config->fb = intel_fb; 8432 return; 8433 8434 error: 8435 kfree(intel_fb); 8436 } 8437 8438 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8439 struct intel_crtc_state *pipe_config) 8440 { 8441 struct drm_device *dev = crtc->base.dev; 8442 struct drm_i915_private *dev_priv = to_i915(dev); 8443 uint32_t tmp; 8444 8445 tmp = I915_READ(PF_CTL(crtc->pipe)); 8446 8447 if (tmp & PF_ENABLE) { 8448 pipe_config->pch_pfit.enabled = true; 8449 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8450 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8451 8452 /* We currently do not free assignements of panel fitters on 8453 * ivb/hsw (since we don't use the higher upscaling modes which 8454 * differentiates them) so just WARN about this case for now. */ 8455 if (IS_GEN7(dev_priv)) { 8456 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8457 PF_PIPE_SEL_IVB(crtc->pipe)); 8458 } 8459 } 8460 } 8461 8462 static void 8463 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8464 struct intel_initial_plane_config *plane_config) 8465 { 8466 struct drm_device *dev = crtc->base.dev; 8467 struct drm_i915_private *dev_priv = to_i915(dev); 8468 u32 val, base, offset; 8469 int pipe = crtc->pipe; 8470 int fourcc, pixel_format; 8471 unsigned int aligned_height; 8472 struct drm_framebuffer *fb; 8473 struct intel_framebuffer *intel_fb; 8474 8475 val = I915_READ(DSPCNTR(pipe)); 8476 if (!(val & DISPLAY_PLANE_ENABLE)) 8477 return; 8478 8479 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8480 if (!intel_fb) { 8481 DRM_DEBUG_KMS("failed to alloc fb\n"); 8482 return; 8483 } 8484 8485 fb = &intel_fb->base; 8486 8487 fb->dev = dev; 8488 8489 if (INTEL_GEN(dev_priv) >= 4) { 8490 if (val & DISPPLANE_TILED) { 8491 plane_config->tiling = I915_TILING_X; 8492 fb->modifier = I915_FORMAT_MOD_X_TILED; 8493 } 8494 } 8495 8496 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8497 fourcc = i9xx_format_to_fourcc(pixel_format); 8498 fb->format = drm_format_info(fourcc); 8499 8500 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 8501 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 8502 offset = I915_READ(DSPOFFSET(pipe)); 8503 } else { 8504 if (plane_config->tiling) 8505 offset = I915_READ(DSPTILEOFF(pipe)); 8506 else 8507 offset = I915_READ(DSPLINOFF(pipe)); 8508 } 8509 plane_config->base = base; 8510 8511 val = I915_READ(PIPESRC(pipe)); 8512 fb->width = ((val >> 16) & 0xfff) + 1; 8513 fb->height = ((val >> 0) & 0xfff) + 1; 8514 8515 val = I915_READ(DSPSTRIDE(pipe)); 8516 fb->pitches[0] = val & 0xffffffc0; 8517 8518 aligned_height = intel_fb_align_height(fb, 0, fb->height); 8519 8520 plane_config->size = fb->pitches[0] * aligned_height; 8521 8522 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8523 pipe_name(pipe), fb->width, fb->height, 8524 fb->format->cpp[0] * 8, base, fb->pitches[0], 8525 plane_config->size); 8526 8527 plane_config->fb = intel_fb; 8528 } 8529 8530 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 8531 struct intel_crtc_state *pipe_config) 8532 { 8533 struct drm_device *dev = crtc->base.dev; 8534 struct drm_i915_private *dev_priv = to_i915(dev); 8535 enum intel_display_power_domain power_domain; 8536 uint32_t tmp; 8537 bool ret; 8538 8539 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8540 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8541 return false; 8542 8543 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8544 pipe_config->shared_dpll = NULL; 8545 8546 ret = false; 8547 tmp = I915_READ(PIPECONF(crtc->pipe)); 8548 if (!(tmp & PIPECONF_ENABLE)) 8549 goto out; 8550 8551 switch (tmp & PIPECONF_BPC_MASK) { 8552 case PIPECONF_6BPC: 8553 pipe_config->pipe_bpp = 18; 8554 break; 8555 case PIPECONF_8BPC: 8556 pipe_config->pipe_bpp = 24; 8557 break; 8558 case PIPECONF_10BPC: 8559 pipe_config->pipe_bpp = 30; 8560 break; 8561 case PIPECONF_12BPC: 8562 pipe_config->pipe_bpp = 36; 8563 break; 8564 default: 8565 break; 8566 } 8567 8568 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8569 pipe_config->limited_color_range = true; 8570 8571 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8572 struct intel_shared_dpll *pll; 8573 enum intel_dpll_id pll_id; 8574 8575 pipe_config->has_pch_encoder = true; 8576 8577 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 8578 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8579 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8580 8581 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8582 8583 if (HAS_PCH_IBX(dev_priv)) { 8584 /* 8585 * The pipe->pch transcoder and pch transcoder->pll 8586 * mapping is fixed. 8587 */ 8588 pll_id = (enum intel_dpll_id) crtc->pipe; 8589 } else { 8590 tmp = I915_READ(PCH_DPLL_SEL); 8591 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8592 pll_id = DPLL_ID_PCH_PLL_B; 8593 else 8594 pll_id= DPLL_ID_PCH_PLL_A; 8595 } 8596 8597 pipe_config->shared_dpll = 8598 intel_get_shared_dpll_by_id(dev_priv, pll_id); 8599 pll = pipe_config->shared_dpll; 8600 8601 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 8602 &pipe_config->dpll_hw_state)); 8603 8604 tmp = pipe_config->dpll_hw_state.dpll; 8605 pipe_config->pixel_multiplier = 8606 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8607 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8608 8609 ironlake_pch_clock_get(crtc, pipe_config); 8610 } else { 8611 pipe_config->pixel_multiplier = 1; 8612 } 8613 8614 intel_get_pipe_timings(crtc, pipe_config); 8615 intel_get_pipe_src_size(crtc, pipe_config); 8616 8617 ironlake_get_pfit_config(crtc, pipe_config); 8618 8619 ret = true; 8620 8621 out: 8622 intel_display_power_put(dev_priv, power_domain); 8623 8624 return ret; 8625 } 8626 8627 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 8628 { 8629 struct drm_device *dev = &dev_priv->drm; 8630 struct intel_crtc *crtc; 8631 8632 for_each_intel_crtc(dev, crtc) 8633 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 8634 pipe_name(crtc->pipe)); 8635 8636 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 8637 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 8638 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 8639 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 8640 I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON, "Panel power on\n"); 8641 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 8642 "CPU PWM1 enabled\n"); 8643 if (IS_HASWELL(dev_priv)) 8644 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 8645 "CPU PWM2 enabled\n"); 8646 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 8647 "PCH PWM1 enabled\n"); 8648 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 8649 "Utility pin enabled\n"); 8650 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 8651 8652 /* 8653 * In theory we can still leave IRQs enabled, as long as only the HPD 8654 * interrupts remain enabled. We used to check for that, but since it's 8655 * gen-specific and since we only disable LCPLL after we fully disable 8656 * the interrupts, the check below should be enough. 8657 */ 8658 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 8659 } 8660 8661 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 8662 { 8663 if (IS_HASWELL(dev_priv)) 8664 return I915_READ(D_COMP_HSW); 8665 else 8666 return I915_READ(D_COMP_BDW); 8667 } 8668 8669 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8670 { 8671 if (IS_HASWELL(dev_priv)) { 8672 mutex_lock(&dev_priv->rps.hw_lock); 8673 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8674 val)) 8675 DRM_DEBUG_KMS("Failed to write to D_COMP\n"); 8676 mutex_unlock(&dev_priv->rps.hw_lock); 8677 } else { 8678 I915_WRITE(D_COMP_BDW, val); 8679 POSTING_READ(D_COMP_BDW); 8680 } 8681 } 8682 8683 /* 8684 * This function implements pieces of two sequences from BSpec: 8685 * - Sequence for display software to disable LCPLL 8686 * - Sequence for display software to allow package C8+ 8687 * The steps implemented here are just the steps that actually touch the LCPLL 8688 * register. Callers should take care of disabling all the display engine 8689 * functions, doing the mode unset, fixing interrupts, etc. 8690 */ 8691 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 8692 bool switch_to_fclk, bool allow_power_down) 8693 { 8694 uint32_t val; 8695 8696 assert_can_disable_lcpll(dev_priv); 8697 8698 val = I915_READ(LCPLL_CTL); 8699 8700 if (switch_to_fclk) { 8701 val |= LCPLL_CD_SOURCE_FCLK; 8702 I915_WRITE(LCPLL_CTL, val); 8703 8704 if (wait_for_us(I915_READ(LCPLL_CTL) & 8705 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 8706 DRM_ERROR("Switching to FCLK failed\n"); 8707 8708 val = I915_READ(LCPLL_CTL); 8709 } 8710 8711 val |= LCPLL_PLL_DISABLE; 8712 I915_WRITE(LCPLL_CTL, val); 8713 POSTING_READ(LCPLL_CTL); 8714 8715 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 8716 DRM_ERROR("LCPLL still locked\n"); 8717 8718 val = hsw_read_dcomp(dev_priv); 8719 val |= D_COMP_COMP_DISABLE; 8720 hsw_write_dcomp(dev_priv, val); 8721 ndelay(100); 8722 8723 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 8724 1)) 8725 DRM_ERROR("D_COMP RCOMP still in progress\n"); 8726 8727 if (allow_power_down) { 8728 val = I915_READ(LCPLL_CTL); 8729 val |= LCPLL_POWER_DOWN_ALLOW; 8730 I915_WRITE(LCPLL_CTL, val); 8731 POSTING_READ(LCPLL_CTL); 8732 } 8733 } 8734 8735 /* 8736 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8737 * source. 8738 */ 8739 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8740 { 8741 uint32_t val; 8742 8743 val = I915_READ(LCPLL_CTL); 8744 8745 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8746 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8747 return; 8748 8749 /* 8750 * Make sure we're not on PC8 state before disabling PC8, otherwise 8751 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8752 */ 8753 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8754 8755 if (val & LCPLL_POWER_DOWN_ALLOW) { 8756 val &= ~LCPLL_POWER_DOWN_ALLOW; 8757 I915_WRITE(LCPLL_CTL, val); 8758 POSTING_READ(LCPLL_CTL); 8759 } 8760 8761 val = hsw_read_dcomp(dev_priv); 8762 val |= D_COMP_COMP_FORCE; 8763 val &= ~D_COMP_COMP_DISABLE; 8764 hsw_write_dcomp(dev_priv, val); 8765 8766 val = I915_READ(LCPLL_CTL); 8767 val &= ~LCPLL_PLL_DISABLE; 8768 I915_WRITE(LCPLL_CTL, val); 8769 8770 if (intel_wait_for_register(dev_priv, 8771 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 8772 5)) 8773 DRM_ERROR("LCPLL not locked yet\n"); 8774 8775 if (val & LCPLL_CD_SOURCE_FCLK) { 8776 val = I915_READ(LCPLL_CTL); 8777 val &= ~LCPLL_CD_SOURCE_FCLK; 8778 I915_WRITE(LCPLL_CTL, val); 8779 8780 if (wait_for_us((I915_READ(LCPLL_CTL) & 8781 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8782 DRM_ERROR("Switching back to LCPLL failed\n"); 8783 } 8784 8785 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8786 intel_update_cdclk(dev_priv); 8787 } 8788 8789 /* 8790 * Package states C8 and deeper are really deep PC states that can only be 8791 * reached when all the devices on the system allow it, so even if the graphics 8792 * device allows PC8+, it doesn't mean the system will actually get to these 8793 * states. Our driver only allows PC8+ when going into runtime PM. 8794 * 8795 * The requirements for PC8+ are that all the outputs are disabled, the power 8796 * well is disabled and most interrupts are disabled, and these are also 8797 * requirements for runtime PM. When these conditions are met, we manually do 8798 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8799 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8800 * hang the machine. 8801 * 8802 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8803 * the state of some registers, so when we come back from PC8+ we need to 8804 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8805 * need to take care of the registers kept by RC6. Notice that this happens even 8806 * if we don't put the device in PCI D3 state (which is what currently happens 8807 * because of the runtime PM support). 8808 * 8809 * For more, read "Display Sequences for Package C8" on the hardware 8810 * documentation. 8811 */ 8812 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8813 { 8814 uint32_t val; 8815 8816 DRM_DEBUG_KMS("Enabling package C8+\n"); 8817 8818 if (HAS_PCH_LPT_LP(dev_priv)) { 8819 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8820 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8821 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8822 } 8823 8824 lpt_disable_clkout_dp(dev_priv); 8825 hsw_disable_lcpll(dev_priv, true, true); 8826 } 8827 8828 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8829 { 8830 uint32_t val; 8831 8832 DRM_DEBUG_KMS("Disabling package C8+\n"); 8833 8834 hsw_restore_lcpll(dev_priv); 8835 lpt_init_pch_refclk(dev_priv); 8836 8837 if (HAS_PCH_LPT_LP(dev_priv)) { 8838 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8839 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8840 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8841 } 8842 } 8843 8844 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8845 struct intel_crtc_state *crtc_state) 8846 { 8847 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 8848 struct intel_encoder *encoder = 8849 intel_ddi_get_crtc_new_encoder(crtc_state); 8850 8851 if (!intel_get_shared_dpll(crtc, crtc_state, encoder)) { 8852 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8853 pipe_name(crtc->pipe)); 8854 return -EINVAL; 8855 } 8856 } 8857 8858 crtc->lowfreq_avail = false; 8859 8860 return 0; 8861 } 8862 8863 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 8864 enum port port, 8865 struct intel_crtc_state *pipe_config) 8866 { 8867 enum intel_dpll_id id; 8868 8869 switch (port) { 8870 case PORT_A: 8871 id = DPLL_ID_SKL_DPLL0; 8872 break; 8873 case PORT_B: 8874 id = DPLL_ID_SKL_DPLL1; 8875 break; 8876 case PORT_C: 8877 id = DPLL_ID_SKL_DPLL2; 8878 break; 8879 default: 8880 DRM_ERROR("Incorrect port type\n"); 8881 return; 8882 } 8883 8884 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8885 } 8886 8887 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8888 enum port port, 8889 struct intel_crtc_state *pipe_config) 8890 { 8891 enum intel_dpll_id id; 8892 u32 temp; 8893 8894 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8895 id = temp >> (port * 3 + 1); 8896 8897 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3)) 8898 return; 8899 8900 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8901 } 8902 8903 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8904 enum port port, 8905 struct intel_crtc_state *pipe_config) 8906 { 8907 enum intel_dpll_id id; 8908 uint32_t ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8909 8910 switch (ddi_pll_sel) { 8911 case PORT_CLK_SEL_WRPLL1: 8912 id = DPLL_ID_WRPLL1; 8913 break; 8914 case PORT_CLK_SEL_WRPLL2: 8915 id = DPLL_ID_WRPLL2; 8916 break; 8917 case PORT_CLK_SEL_SPLL: 8918 id = DPLL_ID_SPLL; 8919 break; 8920 case PORT_CLK_SEL_LCPLL_810: 8921 id = DPLL_ID_LCPLL_810; 8922 break; 8923 case PORT_CLK_SEL_LCPLL_1350: 8924 id = DPLL_ID_LCPLL_1350; 8925 break; 8926 case PORT_CLK_SEL_LCPLL_2700: 8927 id = DPLL_ID_LCPLL_2700; 8928 break; 8929 default: 8930 MISSING_CASE(ddi_pll_sel); 8931 /* fall through */ 8932 case PORT_CLK_SEL_NONE: 8933 return; 8934 } 8935 8936 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 8937 } 8938 8939 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 8940 struct intel_crtc_state *pipe_config, 8941 u64 *power_domain_mask) 8942 { 8943 struct drm_device *dev = crtc->base.dev; 8944 struct drm_i915_private *dev_priv = to_i915(dev); 8945 enum intel_display_power_domain power_domain; 8946 u32 tmp; 8947 8948 /* 8949 * The pipe->transcoder mapping is fixed with the exception of the eDP 8950 * transcoder handled below. 8951 */ 8952 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8953 8954 /* 8955 * XXX: Do intel_display_power_get_if_enabled before reading this (for 8956 * consistency and less surprising code; it's in always on power). 8957 */ 8958 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8959 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8960 enum i915_pipe trans_edp_pipe; 8961 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8962 default: 8963 WARN(1, "unknown pipe linked to edp transcoder\n"); 8964 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8965 case TRANS_DDI_EDP_INPUT_A_ON: 8966 trans_edp_pipe = PIPE_A; 8967 break; 8968 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8969 trans_edp_pipe = PIPE_B; 8970 break; 8971 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8972 trans_edp_pipe = PIPE_C; 8973 break; 8974 } 8975 8976 if (trans_edp_pipe == crtc->pipe) 8977 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8978 } 8979 8980 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 8981 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8982 return false; 8983 *power_domain_mask |= BIT_ULL(power_domain); 8984 8985 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 8986 8987 return tmp & PIPECONF_ENABLE; 8988 } 8989 8990 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 8991 struct intel_crtc_state *pipe_config, 8992 u64 *power_domain_mask) 8993 { 8994 struct drm_device *dev = crtc->base.dev; 8995 struct drm_i915_private *dev_priv = to_i915(dev); 8996 enum intel_display_power_domain power_domain; 8997 enum port port; 8998 enum transcoder cpu_transcoder; 8999 u32 tmp; 9000 9001 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 9002 if (port == PORT_A) 9003 cpu_transcoder = TRANSCODER_DSI_A; 9004 else 9005 cpu_transcoder = TRANSCODER_DSI_C; 9006 9007 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 9008 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9009 continue; 9010 *power_domain_mask |= BIT_ULL(power_domain); 9011 9012 /* 9013 * The PLL needs to be enabled with a valid divider 9014 * configuration, otherwise accessing DSI registers will hang 9015 * the machine. See BSpec North Display Engine 9016 * registers/MIPI[BXT]. We can break out here early, since we 9017 * need the same DSI PLL to be enabled for both DSI ports. 9018 */ 9019 if (!intel_dsi_pll_is_enabled(dev_priv)) 9020 break; 9021 9022 /* XXX: this works for video mode only */ 9023 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 9024 if (!(tmp & DPI_ENABLE)) 9025 continue; 9026 9027 tmp = I915_READ(MIPI_CTRL(port)); 9028 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 9029 continue; 9030 9031 pipe_config->cpu_transcoder = cpu_transcoder; 9032 break; 9033 } 9034 9035 return transcoder_is_dsi(pipe_config->cpu_transcoder); 9036 } 9037 9038 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9039 struct intel_crtc_state *pipe_config) 9040 { 9041 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9042 struct intel_shared_dpll *pll; 9043 enum port port; 9044 uint32_t tmp; 9045 9046 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9047 9048 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9049 9050 if (IS_GEN9_BC(dev_priv)) 9051 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9052 else if (IS_GEN9_LP(dev_priv)) 9053 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9054 else 9055 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9056 9057 pll = pipe_config->shared_dpll; 9058 if (pll) { 9059 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9060 &pipe_config->dpll_hw_state)); 9061 } 9062 9063 /* 9064 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9065 * DDI E. So just check whether this pipe is wired to DDI E and whether 9066 * the PCH transcoder is on. 9067 */ 9068 if (INTEL_GEN(dev_priv) < 9 && 9069 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9070 pipe_config->has_pch_encoder = true; 9071 9072 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9073 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9074 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9075 9076 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9077 } 9078 } 9079 9080 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9081 struct intel_crtc_state *pipe_config) 9082 { 9083 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9084 enum intel_display_power_domain power_domain; 9085 u64 power_domain_mask; 9086 bool active; 9087 9088 if (INTEL_GEN(dev_priv) >= 9) { 9089 intel_crtc_init_scalers(crtc, pipe_config); 9090 9091 pipe_config->scaler_state.scaler_id = -1; 9092 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9093 } 9094 9095 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9096 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9097 return false; 9098 power_domain_mask = BIT_ULL(power_domain); 9099 9100 pipe_config->shared_dpll = NULL; 9101 9102 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 9103 9104 if (IS_GEN9_LP(dev_priv) && 9105 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 9106 WARN_ON(active); 9107 active = true; 9108 } 9109 9110 if (!active) 9111 goto out; 9112 9113 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9114 haswell_get_ddi_port_state(crtc, pipe_config); 9115 intel_get_pipe_timings(crtc, pipe_config); 9116 } 9117 9118 intel_get_pipe_src_size(crtc, pipe_config); 9119 9120 pipe_config->gamma_mode = 9121 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9122 9123 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9124 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9125 power_domain_mask |= BIT_ULL(power_domain); 9126 if (INTEL_GEN(dev_priv) >= 9) 9127 skylake_get_pfit_config(crtc, pipe_config); 9128 else 9129 ironlake_get_pfit_config(crtc, pipe_config); 9130 } 9131 9132 if (IS_HASWELL(dev_priv)) 9133 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9134 (I915_READ(IPS_CTL) & IPS_ENABLE); 9135 9136 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 9137 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 9138 pipe_config->pixel_multiplier = 9139 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9140 } else { 9141 pipe_config->pixel_multiplier = 1; 9142 } 9143 9144 out: 9145 for_each_power_domain(power_domain, power_domain_mask) 9146 intel_display_power_put(dev_priv, power_domain); 9147 9148 return active; 9149 } 9150 9151 static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, 9152 const struct intel_plane_state *plane_state) 9153 { 9154 unsigned int width = plane_state->base.crtc_w; 9155 unsigned int stride = roundup_pow_of_two(width) * 4; 9156 9157 switch (stride) { 9158 default: 9159 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 9160 width, stride); 9161 stride = 256; 9162 /* fallthrough */ 9163 case 256: 9164 case 512: 9165 case 1024: 9166 case 2048: 9167 break; 9168 } 9169 9170 return CURSOR_ENABLE | 9171 CURSOR_GAMMA_ENABLE | 9172 CURSOR_FORMAT_ARGB | 9173 CURSOR_STRIDE(stride); 9174 } 9175 9176 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 9177 const struct intel_plane_state *plane_state) 9178 { 9179 struct drm_device *dev = crtc->dev; 9180 struct drm_i915_private *dev_priv = to_i915(dev); 9181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9182 uint32_t cntl = 0, size = 0; 9183 9184 if (plane_state && plane_state->base.visible) { 9185 unsigned int width = plane_state->base.crtc_w; 9186 unsigned int height = plane_state->base.crtc_h; 9187 9188 cntl = plane_state->ctl; 9189 size = (height << 12) | width; 9190 } 9191 9192 if (intel_crtc->cursor_cntl != 0 && 9193 (intel_crtc->cursor_base != base || 9194 intel_crtc->cursor_size != size || 9195 intel_crtc->cursor_cntl != cntl)) { 9196 /* On these chipsets we can only modify the base/size/stride 9197 * whilst the cursor is disabled. 9198 */ 9199 I915_WRITE_FW(CURCNTR(PIPE_A), 0); 9200 POSTING_READ_FW(CURCNTR(PIPE_A)); 9201 intel_crtc->cursor_cntl = 0; 9202 } 9203 9204 if (intel_crtc->cursor_base != base) { 9205 I915_WRITE_FW(CURBASE(PIPE_A), base); 9206 intel_crtc->cursor_base = base; 9207 } 9208 9209 if (intel_crtc->cursor_size != size) { 9210 I915_WRITE_FW(CURSIZE, size); 9211 intel_crtc->cursor_size = size; 9212 } 9213 9214 if (intel_crtc->cursor_cntl != cntl) { 9215 I915_WRITE_FW(CURCNTR(PIPE_A), cntl); 9216 POSTING_READ_FW(CURCNTR(PIPE_A)); 9217 intel_crtc->cursor_cntl = cntl; 9218 } 9219 } 9220 9221 static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, 9222 const struct intel_plane_state *plane_state) 9223 { 9224 struct drm_i915_private *dev_priv = 9225 to_i915(plane_state->base.plane->dev); 9226 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 9227 enum i915_pipe pipe = crtc->pipe; 9228 u32 cntl; 9229 9230 cntl = MCURSOR_GAMMA_ENABLE; 9231 9232 if (HAS_DDI(dev_priv)) 9233 cntl |= CURSOR_PIPE_CSC_ENABLE; 9234 9235 cntl |= pipe << 28; /* Connect to correct pipe */ 9236 9237 switch (plane_state->base.crtc_w) { 9238 case 64: 9239 cntl |= CURSOR_MODE_64_ARGB_AX; 9240 break; 9241 case 128: 9242 cntl |= CURSOR_MODE_128_ARGB_AX; 9243 break; 9244 case 256: 9245 cntl |= CURSOR_MODE_256_ARGB_AX; 9246 break; 9247 default: 9248 MISSING_CASE(plane_state->base.crtc_w); 9249 return 0; 9250 } 9251 9252 if (plane_state->base.rotation & DRM_ROTATE_180) 9253 cntl |= CURSOR_ROTATE_180; 9254 9255 return cntl; 9256 } 9257 9258 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 9259 const struct intel_plane_state *plane_state) 9260 { 9261 struct drm_device *dev = crtc->dev; 9262 struct drm_i915_private *dev_priv = to_i915(dev); 9263 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9264 int pipe = intel_crtc->pipe; 9265 uint32_t cntl = 0; 9266 9267 if (plane_state && plane_state->base.visible) 9268 cntl = plane_state->ctl; 9269 9270 if (intel_crtc->cursor_cntl != cntl) { 9271 I915_WRITE_FW(CURCNTR(pipe), cntl); 9272 POSTING_READ_FW(CURCNTR(pipe)); 9273 intel_crtc->cursor_cntl = cntl; 9274 } 9275 9276 /* and commit changes on next vblank */ 9277 I915_WRITE_FW(CURBASE(pipe), base); 9278 POSTING_READ_FW(CURBASE(pipe)); 9279 9280 intel_crtc->cursor_base = base; 9281 } 9282 9283 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 9284 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 9285 const struct intel_plane_state *plane_state) 9286 { 9287 struct drm_device *dev = crtc->dev; 9288 struct drm_i915_private *dev_priv = to_i915(dev); 9289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9290 int pipe = intel_crtc->pipe; 9291 u32 base = intel_crtc->cursor_addr; 9292 unsigned long irqflags; 9293 u32 pos = 0; 9294 9295 if (plane_state) { 9296 int x = plane_state->base.crtc_x; 9297 int y = plane_state->base.crtc_y; 9298 9299 if (x < 0) { 9300 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9301 x = -x; 9302 } 9303 pos |= x << CURSOR_X_SHIFT; 9304 9305 if (y < 0) { 9306 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9307 y = -y; 9308 } 9309 pos |= y << CURSOR_Y_SHIFT; 9310 9311 /* ILK+ do this automagically */ 9312 if (HAS_GMCH_DISPLAY(dev_priv) && 9313 plane_state->base.rotation & DRM_ROTATE_180) { 9314 base += (plane_state->base.crtc_h * 9315 plane_state->base.crtc_w - 1) * 4; 9316 } 9317 } 9318 9319 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 9320 9321 I915_WRITE_FW(CURPOS(pipe), pos); 9322 9323 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 9324 i845_update_cursor(crtc, base, plane_state); 9325 else 9326 i9xx_update_cursor(crtc, base, plane_state); 9327 9328 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 9329 } 9330 9331 static bool cursor_size_ok(struct drm_i915_private *dev_priv, 9332 uint32_t width, uint32_t height) 9333 { 9334 if (width == 0 || height == 0) 9335 return false; 9336 9337 /* 9338 * 845g/865g are special in that they are only limited by 9339 * the width of their cursors, the height is arbitrary up to 9340 * the precision of the register. Everything else requires 9341 * square cursors, limited to a few power-of-two sizes. 9342 */ 9343 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 9344 if ((width & 63) != 0) 9345 return false; 9346 9347 if (width > (IS_I845G(dev_priv) ? 64 : 512)) 9348 return false; 9349 9350 if (height > 1023) 9351 return false; 9352 } else { 9353 switch (width | height) { 9354 case 256: 9355 case 128: 9356 if (IS_GEN2(dev_priv)) 9357 return false; 9358 case 64: 9359 break; 9360 default: 9361 return false; 9362 } 9363 } 9364 9365 return true; 9366 } 9367 9368 /* VESA 640x480x72Hz mode to set on the pipe */ 9369 static struct drm_display_mode load_detect_mode = { 9370 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9371 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9372 }; 9373 9374 struct drm_framebuffer * 9375 intel_framebuffer_create(struct drm_i915_gem_object *obj, 9376 struct drm_mode_fb_cmd2 *mode_cmd) 9377 { 9378 struct intel_framebuffer *intel_fb; 9379 int ret; 9380 9381 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9382 if (!intel_fb) 9383 return ERR_PTR(-ENOMEM); 9384 9385 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); 9386 if (ret) 9387 goto err; 9388 9389 return &intel_fb->base; 9390 9391 err: 9392 kfree(intel_fb); 9393 return ERR_PTR(ret); 9394 } 9395 9396 static u32 9397 intel_framebuffer_pitch_for_width(int width, int bpp) 9398 { 9399 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 9400 return ALIGN(pitch, 64); 9401 } 9402 9403 static u32 9404 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 9405 { 9406 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 9407 return PAGE_ALIGN(pitch * mode->vdisplay); 9408 } 9409 9410 static struct drm_framebuffer * 9411 intel_framebuffer_create_for_mode(struct drm_device *dev, 9412 struct drm_display_mode *mode, 9413 int depth, int bpp) 9414 { 9415 struct drm_framebuffer *fb; 9416 struct drm_i915_gem_object *obj; 9417 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 9418 9419 obj = i915_gem_object_create(to_i915(dev), 9420 intel_framebuffer_size_for_mode(mode, bpp)); 9421 if (IS_ERR(obj)) 9422 return ERR_CAST(obj); 9423 9424 mode_cmd.width = mode->hdisplay; 9425 mode_cmd.height = mode->vdisplay; 9426 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 9427 bpp); 9428 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 9429 9430 fb = intel_framebuffer_create(obj, &mode_cmd); 9431 if (IS_ERR(fb)) 9432 i915_gem_object_put(obj); 9433 9434 return fb; 9435 } 9436 9437 static struct drm_framebuffer * 9438 mode_fits_in_fbdev(struct drm_device *dev, 9439 struct drm_display_mode *mode) 9440 { 9441 #ifdef CONFIG_DRM_FBDEV_EMULATION 9442 struct drm_i915_private *dev_priv = to_i915(dev); 9443 struct drm_i915_gem_object *obj; 9444 struct drm_framebuffer *fb; 9445 9446 if (!dev_priv->fbdev) 9447 return NULL; 9448 9449 if (!dev_priv->fbdev->fb) 9450 return NULL; 9451 9452 obj = dev_priv->fbdev->fb->obj; 9453 BUG_ON(!obj); 9454 9455 fb = &dev_priv->fbdev->fb->base; 9456 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 9457 fb->format->cpp[0] * 8)) 9458 return NULL; 9459 9460 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 9461 return NULL; 9462 9463 drm_framebuffer_reference(fb); 9464 return fb; 9465 #else 9466 return NULL; 9467 #endif 9468 } 9469 9470 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 9471 struct drm_crtc *crtc, 9472 struct drm_display_mode *mode, 9473 struct drm_framebuffer *fb, 9474 int x, int y) 9475 { 9476 struct drm_plane_state *plane_state; 9477 int hdisplay, vdisplay; 9478 int ret; 9479 9480 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 9481 if (IS_ERR(plane_state)) 9482 return PTR_ERR(plane_state); 9483 9484 if (mode) 9485 drm_mode_get_hv_timing(mode, &hdisplay, &vdisplay); 9486 else 9487 hdisplay = vdisplay = 0; 9488 9489 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 9490 if (ret) 9491 return ret; 9492 drm_atomic_set_fb_for_plane(plane_state, fb); 9493 plane_state->crtc_x = 0; 9494 plane_state->crtc_y = 0; 9495 plane_state->crtc_w = hdisplay; 9496 plane_state->crtc_h = vdisplay; 9497 plane_state->src_x = x << 16; 9498 plane_state->src_y = y << 16; 9499 plane_state->src_w = hdisplay << 16; 9500 plane_state->src_h = vdisplay << 16; 9501 9502 return 0; 9503 } 9504 9505 int intel_get_load_detect_pipe(struct drm_connector *connector, 9506 struct drm_display_mode *mode, 9507 struct intel_load_detect_pipe *old, 9508 struct drm_modeset_acquire_ctx *ctx) 9509 { 9510 struct intel_crtc *intel_crtc; 9511 struct intel_encoder *intel_encoder = 9512 intel_attached_encoder(connector); 9513 struct drm_crtc *possible_crtc; 9514 struct drm_encoder *encoder = &intel_encoder->base; 9515 struct drm_crtc *crtc = NULL; 9516 struct drm_device *dev = encoder->dev; 9517 struct drm_i915_private *dev_priv = to_i915(dev); 9518 struct drm_framebuffer *fb; 9519 struct drm_mode_config *config = &dev->mode_config; 9520 struct drm_atomic_state *state = NULL, *restore_state = NULL; 9521 struct drm_connector_state *connector_state; 9522 struct intel_crtc_state *crtc_state; 9523 int ret, i = -1; 9524 9525 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9526 connector->base.id, connector->name, 9527 encoder->base.id, encoder->name); 9528 9529 old->restore_state = NULL; 9530 9531 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); 9532 9533 /* 9534 * Algorithm gets a little messy: 9535 * 9536 * - if the connector already has an assigned crtc, use it (but make 9537 * sure it's on first) 9538 * 9539 * - try to find the first unused crtc that can drive this connector, 9540 * and use that if we find one 9541 */ 9542 9543 /* See if we already have a CRTC for this connector */ 9544 if (connector->state->crtc) { 9545 crtc = connector->state->crtc; 9546 9547 ret = drm_modeset_lock(&crtc->mutex, ctx); 9548 if (ret) 9549 goto fail; 9550 9551 /* Make sure the crtc and connector are running */ 9552 goto found; 9553 } 9554 9555 /* Find an unused one (if possible) */ 9556 for_each_crtc(dev, possible_crtc) { 9557 i++; 9558 if (!(encoder->possible_crtcs & (1 << i))) 9559 continue; 9560 9561 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 9562 if (ret) 9563 goto fail; 9564 9565 if (possible_crtc->state->enable) { 9566 drm_modeset_unlock(&possible_crtc->mutex); 9567 continue; 9568 } 9569 9570 crtc = possible_crtc; 9571 break; 9572 } 9573 9574 /* 9575 * If we didn't find an unused CRTC, don't use any. 9576 */ 9577 if (!crtc) { 9578 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 9579 goto fail; 9580 } 9581 9582 found: 9583 intel_crtc = to_intel_crtc(crtc); 9584 9585 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9586 if (ret) 9587 goto fail; 9588 9589 state = drm_atomic_state_alloc(dev); 9590 restore_state = drm_atomic_state_alloc(dev); 9591 if (!state || !restore_state) { 9592 ret = -ENOMEM; 9593 goto fail; 9594 } 9595 9596 state->acquire_ctx = ctx; 9597 restore_state->acquire_ctx = ctx; 9598 9599 connector_state = drm_atomic_get_connector_state(state, connector); 9600 if (IS_ERR(connector_state)) { 9601 ret = PTR_ERR(connector_state); 9602 goto fail; 9603 } 9604 9605 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 9606 if (ret) 9607 goto fail; 9608 9609 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 9610 if (IS_ERR(crtc_state)) { 9611 ret = PTR_ERR(crtc_state); 9612 goto fail; 9613 } 9614 9615 crtc_state->base.active = crtc_state->base.enable = true; 9616 9617 if (!mode) 9618 mode = &load_detect_mode; 9619 9620 /* We need a framebuffer large enough to accommodate all accesses 9621 * that the plane may generate whilst we perform load detection. 9622 * We can not rely on the fbcon either being present (we get called 9623 * during its initialisation to detect all boot displays, or it may 9624 * not even exist) or that it is large enough to satisfy the 9625 * requested mode. 9626 */ 9627 fb = mode_fits_in_fbdev(dev, mode); 9628 if (fb == NULL) { 9629 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 9630 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 9631 } else 9632 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 9633 if (IS_ERR(fb)) { 9634 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 9635 goto fail; 9636 } 9637 9638 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9639 if (ret) 9640 goto fail; 9641 9642 drm_framebuffer_unreference(fb); 9643 9644 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9645 if (ret) 9646 goto fail; 9647 9648 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 9649 if (!ret) 9650 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 9651 if (!ret) 9652 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 9653 if (ret) { 9654 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 9655 goto fail; 9656 } 9657 9658 ret = drm_atomic_commit(state); 9659 if (ret) { 9660 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9661 goto fail; 9662 } 9663 9664 old->restore_state = restore_state; 9665 drm_atomic_state_put(state); 9666 9667 /* let the connector get through one full cycle before testing */ 9668 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 9669 return true; 9670 9671 fail: 9672 if (state) { 9673 drm_atomic_state_put(state); 9674 state = NULL; 9675 } 9676 if (restore_state) { 9677 drm_atomic_state_put(restore_state); 9678 restore_state = NULL; 9679 } 9680 9681 if (ret == -EDEADLK) 9682 return ret; 9683 9684 return false; 9685 } 9686 9687 void intel_release_load_detect_pipe(struct drm_connector *connector, 9688 struct intel_load_detect_pipe *old, 9689 struct drm_modeset_acquire_ctx *ctx) 9690 { 9691 struct intel_encoder *intel_encoder = 9692 intel_attached_encoder(connector); 9693 struct drm_encoder *encoder = &intel_encoder->base; 9694 struct drm_atomic_state *state = old->restore_state; 9695 int ret; 9696 9697 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9698 connector->base.id, connector->name, 9699 encoder->base.id, encoder->name); 9700 9701 if (!state) 9702 return; 9703 9704 ret = drm_atomic_helper_commit_duplicated_state(state, ctx); 9705 if (ret) 9706 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 9707 drm_atomic_state_put(state); 9708 } 9709 9710 static int i9xx_pll_refclk(struct drm_device *dev, 9711 const struct intel_crtc_state *pipe_config) 9712 { 9713 struct drm_i915_private *dev_priv = to_i915(dev); 9714 u32 dpll = pipe_config->dpll_hw_state.dpll; 9715 9716 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 9717 return dev_priv->vbt.lvds_ssc_freq; 9718 else if (HAS_PCH_SPLIT(dev_priv)) 9719 return 120000; 9720 else if (!IS_GEN2(dev_priv)) 9721 return 96000; 9722 else 9723 return 48000; 9724 } 9725 9726 /* Returns the clock of the currently programmed mode of the given pipe. */ 9727 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 9728 struct intel_crtc_state *pipe_config) 9729 { 9730 struct drm_device *dev = crtc->base.dev; 9731 struct drm_i915_private *dev_priv = to_i915(dev); 9732 int pipe = pipe_config->cpu_transcoder; 9733 u32 dpll = pipe_config->dpll_hw_state.dpll; 9734 u32 fp; 9735 struct dpll clock; 9736 int port_clock; 9737 int refclk = i9xx_pll_refclk(dev, pipe_config); 9738 9739 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 9740 fp = pipe_config->dpll_hw_state.fp0; 9741 else 9742 fp = pipe_config->dpll_hw_state.fp1; 9743 9744 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 9745 if (IS_PINEVIEW(dev_priv)) { 9746 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 9747 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 9748 } else { 9749 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 9750 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 9751 } 9752 9753 if (!IS_GEN2(dev_priv)) { 9754 if (IS_PINEVIEW(dev_priv)) 9755 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 9756 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 9757 else 9758 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 9759 DPLL_FPA01_P1_POST_DIV_SHIFT); 9760 9761 switch (dpll & DPLL_MODE_MASK) { 9762 case DPLLB_MODE_DAC_SERIAL: 9763 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 9764 5 : 10; 9765 break; 9766 case DPLLB_MODE_LVDS: 9767 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 9768 7 : 14; 9769 break; 9770 default: 9771 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 9772 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 9773 return; 9774 } 9775 9776 if (IS_PINEVIEW(dev_priv)) 9777 port_clock = pnv_calc_dpll_params(refclk, &clock); 9778 else 9779 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9780 } else { 9781 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS); 9782 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 9783 9784 if (is_lvds) { 9785 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 9786 DPLL_FPA01_P1_POST_DIV_SHIFT); 9787 9788 if (lvds & LVDS_CLKB_POWER_UP) 9789 clock.p2 = 7; 9790 else 9791 clock.p2 = 14; 9792 } else { 9793 if (dpll & PLL_P1_DIVIDE_BY_TWO) 9794 clock.p1 = 2; 9795 else { 9796 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 9797 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 9798 } 9799 if (dpll & PLL_P2_DIVIDE_BY_4) 9800 clock.p2 = 4; 9801 else 9802 clock.p2 = 2; 9803 } 9804 9805 port_clock = i9xx_calc_dpll_params(refclk, &clock); 9806 } 9807 9808 /* 9809 * This value includes pixel_multiplier. We will use 9810 * port_clock to compute adjusted_mode.crtc_clock in the 9811 * encoder's get_config() function. 9812 */ 9813 pipe_config->port_clock = port_clock; 9814 } 9815 9816 int intel_dotclock_calculate(int link_freq, 9817 const struct intel_link_m_n *m_n) 9818 { 9819 /* 9820 * The calculation for the data clock is: 9821 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 9822 * But we want to avoid losing precison if possible, so: 9823 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 9824 * 9825 * and the link clock is simpler: 9826 * link_clock = (m * link_clock) / n 9827 */ 9828 9829 if (!m_n->link_n) 9830 return 0; 9831 9832 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 9833 } 9834 9835 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 9836 struct intel_crtc_state *pipe_config) 9837 { 9838 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 9839 9840 /* read out port_clock from the DPLL */ 9841 i9xx_crtc_clock_get(crtc, pipe_config); 9842 9843 /* 9844 * In case there is an active pipe without active ports, 9845 * we may need some idea for the dotclock anyway. 9846 * Calculate one based on the FDI configuration. 9847 */ 9848 pipe_config->base.adjusted_mode.crtc_clock = 9849 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 9850 &pipe_config->fdi_m_n); 9851 } 9852 9853 /** Returns the currently programmed mode of the given pipe. */ 9854 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 9855 struct drm_crtc *crtc) 9856 { 9857 struct drm_i915_private *dev_priv = to_i915(dev); 9858 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9859 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 9860 struct drm_display_mode *mode; 9861 struct intel_crtc_state *pipe_config; 9862 int htot = I915_READ(HTOTAL(cpu_transcoder)); 9863 int hsync = I915_READ(HSYNC(cpu_transcoder)); 9864 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 9865 int vsync = I915_READ(VSYNC(cpu_transcoder)); 9866 enum i915_pipe pipe = intel_crtc->pipe; 9867 9868 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 9869 if (!mode) 9870 return NULL; 9871 9872 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 9873 if (!pipe_config) { 9874 kfree(mode); 9875 return NULL; 9876 } 9877 9878 /* 9879 * Construct a pipe_config sufficient for getting the clock info 9880 * back out of crtc_clock_get. 9881 * 9882 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 9883 * to use a real value here instead. 9884 */ 9885 pipe_config->cpu_transcoder = (enum transcoder) pipe; 9886 pipe_config->pixel_multiplier = 1; 9887 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 9888 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 9889 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 9890 i9xx_crtc_clock_get(intel_crtc, pipe_config); 9891 9892 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 9893 mode->hdisplay = (htot & 0xffff) + 1; 9894 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 9895 mode->hsync_start = (hsync & 0xffff) + 1; 9896 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 9897 mode->vdisplay = (vtot & 0xffff) + 1; 9898 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 9899 mode->vsync_start = (vsync & 0xffff) + 1; 9900 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 9901 9902 drm_mode_set_name(mode); 9903 9904 kfree(pipe_config); 9905 9906 return mode; 9907 } 9908 9909 static void intel_crtc_destroy(struct drm_crtc *crtc) 9910 { 9911 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9912 struct drm_device *dev = crtc->dev; 9913 struct intel_flip_work *work; 9914 9915 spin_lock_irq(&dev->event_lock); 9916 work = intel_crtc->flip_work; 9917 intel_crtc->flip_work = NULL; 9918 spin_unlock_irq(&dev->event_lock); 9919 9920 if (work) { 9921 cancel_work_sync(&work->mmio_work); 9922 cancel_work_sync(&work->unpin_work); 9923 kfree(work); 9924 } 9925 9926 drm_crtc_cleanup(crtc); 9927 9928 kfree(intel_crtc); 9929 } 9930 9931 static void intel_unpin_work_fn(struct work_struct *__work) 9932 { 9933 struct intel_flip_work *work = 9934 container_of(__work, struct intel_flip_work, unpin_work); 9935 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 9936 struct drm_device *dev = crtc->base.dev; 9937 struct drm_plane *primary = crtc->base.primary; 9938 9939 if (is_mmio_work(work)) 9940 flush_work(&work->mmio_work); 9941 9942 mutex_lock(&dev->struct_mutex); 9943 intel_unpin_fb_vma(work->old_vma); 9944 i915_gem_object_put(work->pending_flip_obj); 9945 mutex_unlock(&dev->struct_mutex); 9946 9947 i915_gem_request_put(work->flip_queued_req); 9948 9949 intel_frontbuffer_flip_complete(to_i915(dev), 9950 to_intel_plane(primary)->frontbuffer_bit); 9951 intel_fbc_post_update(crtc); 9952 drm_framebuffer_unreference(work->old_fb); 9953 9954 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 9955 atomic_dec(&crtc->unpin_work_count); 9956 9957 kfree(work); 9958 } 9959 9960 /* Is 'a' after or equal to 'b'? */ 9961 static bool g4x_flip_count_after_eq(u32 a, u32 b) 9962 { 9963 return !((a - b) & 0x80000000); 9964 } 9965 9966 static bool __pageflip_finished_cs(struct intel_crtc *crtc, 9967 struct intel_flip_work *work) 9968 { 9969 struct drm_device *dev = crtc->base.dev; 9970 struct drm_i915_private *dev_priv = to_i915(dev); 9971 9972 if (abort_flip_on_reset(crtc)) 9973 return true; 9974 9975 /* 9976 * The relevant registers doen't exist on pre-ctg. 9977 * As the flip done interrupt doesn't trigger for mmio 9978 * flips on gmch platforms, a flip count check isn't 9979 * really needed there. But since ctg has the registers, 9980 * include it in the check anyway. 9981 */ 9982 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 9983 return true; 9984 9985 /* 9986 * BDW signals flip done immediately if the plane 9987 * is disabled, even if the plane enable is already 9988 * armed to occur at the next vblank :( 9989 */ 9990 9991 /* 9992 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9993 * used the same base address. In that case the mmio flip might 9994 * have completed, but the CS hasn't even executed the flip yet. 9995 * 9996 * A flip count check isn't enough as the CS might have updated 9997 * the base address just after start of vblank, but before we 9998 * managed to process the interrupt. This means we'd complete the 9999 * CS flip too soon. 10000 * 10001 * Combining both checks should get us a good enough result. It may 10002 * still happen that the CS flip has been executed, but has not 10003 * yet actually completed. But in case the base address is the same 10004 * anyway, we don't really care. 10005 */ 10006 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10007 crtc->flip_work->gtt_offset && 10008 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 10009 crtc->flip_work->flip_count); 10010 } 10011 10012 static bool 10013 __pageflip_finished_mmio(struct intel_crtc *crtc, 10014 struct intel_flip_work *work) 10015 { 10016 /* 10017 * MMIO work completes when vblank is different from 10018 * flip_queued_vblank. 10019 * 10020 * Reset counter value doesn't matter, this is handled by 10021 * i915_wait_request finishing early, so no need to handle 10022 * reset here. 10023 */ 10024 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 10025 } 10026 10027 10028 static bool pageflip_finished(struct intel_crtc *crtc, 10029 struct intel_flip_work *work) 10030 { 10031 if (!atomic_read(&work->pending)) 10032 return false; 10033 10034 smp_rmb(); 10035 10036 if (is_mmio_work(work)) 10037 return __pageflip_finished_mmio(crtc, work); 10038 else 10039 return __pageflip_finished_cs(crtc, work); 10040 } 10041 10042 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 10043 { 10044 struct drm_device *dev = &dev_priv->drm; 10045 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10046 struct intel_flip_work *work; 10047 unsigned long flags; 10048 10049 /* Ignore early vblank irqs */ 10050 if (!crtc) 10051 return; 10052 10053 /* 10054 * This is called both by irq handlers and the reset code (to complete 10055 * lost pageflips) so needs the full irqsave spinlocks. 10056 */ 10057 spin_lock_irqsave(&dev->event_lock, flags); 10058 work = crtc->flip_work; 10059 10060 if (work != NULL && 10061 !is_mmio_work(work) && 10062 pageflip_finished(crtc, work)) 10063 page_flip_completed(crtc); 10064 10065 spin_unlock_irqrestore(&dev->event_lock, flags); 10066 } 10067 10068 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 10069 { 10070 struct drm_device *dev = &dev_priv->drm; 10071 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10072 struct intel_flip_work *work; 10073 unsigned long flags; 10074 10075 /* Ignore early vblank irqs */ 10076 if (!crtc) 10077 return; 10078 10079 /* 10080 * This is called both by irq handlers and the reset code (to complete 10081 * lost pageflips) so needs the full irqsave spinlocks. 10082 */ 10083 spin_lock_irqsave(&dev->event_lock, flags); 10084 work = crtc->flip_work; 10085 10086 if (work != NULL && 10087 is_mmio_work(work) && 10088 pageflip_finished(crtc, work)) 10089 page_flip_completed(crtc); 10090 10091 spin_unlock_irqrestore(&dev->event_lock, flags); 10092 } 10093 10094 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, 10095 struct intel_flip_work *work) 10096 { 10097 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); 10098 10099 /* Ensure that the work item is consistent when activating it ... */ 10100 smp_mb__before_atomic(); 10101 atomic_set(&work->pending, 1); 10102 } 10103 10104 static int intel_gen2_queue_flip(struct drm_device *dev, 10105 struct drm_crtc *crtc, 10106 struct drm_framebuffer *fb, 10107 struct drm_i915_gem_object *obj, 10108 struct drm_i915_gem_request *req, 10109 uint32_t flags) 10110 { 10111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10112 u32 flip_mask, *cs; 10113 10114 cs = intel_ring_begin(req, 6); 10115 if (IS_ERR(cs)) 10116 return PTR_ERR(cs); 10117 10118 /* Can't queue multiple flips, so wait for the previous 10119 * one to finish before executing the next. 10120 */ 10121 if (intel_crtc->plane) 10122 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10123 else 10124 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10125 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10126 *cs++ = MI_NOOP; 10127 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10128 *cs++ = fb->pitches[0]; 10129 *cs++ = intel_crtc->flip_work->gtt_offset; 10130 *cs++ = 0; /* aux display base address, unused */ 10131 10132 return 0; 10133 } 10134 10135 static int intel_gen3_queue_flip(struct drm_device *dev, 10136 struct drm_crtc *crtc, 10137 struct drm_framebuffer *fb, 10138 struct drm_i915_gem_object *obj, 10139 struct drm_i915_gem_request *req, 10140 uint32_t flags) 10141 { 10142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10143 u32 flip_mask, *cs; 10144 10145 cs = intel_ring_begin(req, 6); 10146 if (IS_ERR(cs)) 10147 return PTR_ERR(cs); 10148 10149 if (intel_crtc->plane) 10150 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10151 else 10152 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10153 *cs++ = MI_WAIT_FOR_EVENT | flip_mask; 10154 *cs++ = MI_NOOP; 10155 *cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10156 *cs++ = fb->pitches[0]; 10157 *cs++ = intel_crtc->flip_work->gtt_offset; 10158 *cs++ = MI_NOOP; 10159 10160 return 0; 10161 } 10162 10163 static int intel_gen4_queue_flip(struct drm_device *dev, 10164 struct drm_crtc *crtc, 10165 struct drm_framebuffer *fb, 10166 struct drm_i915_gem_object *obj, 10167 struct drm_i915_gem_request *req, 10168 uint32_t flags) 10169 { 10170 struct drm_i915_private *dev_priv = to_i915(dev); 10171 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10172 u32 pf, pipesrc, *cs; 10173 10174 cs = intel_ring_begin(req, 4); 10175 if (IS_ERR(cs)) 10176 return PTR_ERR(cs); 10177 10178 /* i965+ uses the linear or tiled offsets from the 10179 * Display Registers (which do not change across a page-flip) 10180 * so we need only reprogram the base address. 10181 */ 10182 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10183 *cs++ = fb->pitches[0]; 10184 *cs++ = intel_crtc->flip_work->gtt_offset | 10185 intel_fb_modifier_to_tiling(fb->modifier); 10186 10187 /* XXX Enabling the panel-fitter across page-flip is so far 10188 * untested on non-native modes, so ignore it for now. 10189 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 10190 */ 10191 pf = 0; 10192 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10193 *cs++ = pf | pipesrc; 10194 10195 return 0; 10196 } 10197 10198 static int intel_gen6_queue_flip(struct drm_device *dev, 10199 struct drm_crtc *crtc, 10200 struct drm_framebuffer *fb, 10201 struct drm_i915_gem_object *obj, 10202 struct drm_i915_gem_request *req, 10203 uint32_t flags) 10204 { 10205 struct drm_i915_private *dev_priv = to_i915(dev); 10206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10207 u32 pf, pipesrc, *cs; 10208 10209 cs = intel_ring_begin(req, 4); 10210 if (IS_ERR(cs)) 10211 return PTR_ERR(cs); 10212 10213 *cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane); 10214 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10215 *cs++ = intel_crtc->flip_work->gtt_offset; 10216 10217 /* Contrary to the suggestions in the documentation, 10218 * "Enable Panel Fitter" does not seem to be required when page 10219 * flipping with a non-native mode, and worse causes a normal 10220 * modeset to fail. 10221 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 10222 */ 10223 pf = 0; 10224 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10225 *cs++ = pf | pipesrc; 10226 10227 return 0; 10228 } 10229 10230 static int intel_gen7_queue_flip(struct drm_device *dev, 10231 struct drm_crtc *crtc, 10232 struct drm_framebuffer *fb, 10233 struct drm_i915_gem_object *obj, 10234 struct drm_i915_gem_request *req, 10235 uint32_t flags) 10236 { 10237 struct drm_i915_private *dev_priv = to_i915(dev); 10238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10239 u32 *cs, plane_bit = 0; 10240 int len, ret; 10241 10242 switch (intel_crtc->plane) { 10243 case PLANE_A: 10244 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 10245 break; 10246 case PLANE_B: 10247 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 10248 break; 10249 case PLANE_C: 10250 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 10251 break; 10252 default: 10253 WARN_ONCE(1, "unknown plane in flip command\n"); 10254 return -ENODEV; 10255 } 10256 10257 len = 4; 10258 if (req->engine->id == RCS) { 10259 len += 6; 10260 /* 10261 * On Gen 8, SRM is now taking an extra dword to accommodate 10262 * 48bits addresses, and we need a NOOP for the batch size to 10263 * stay even. 10264 */ 10265 if (IS_GEN8(dev_priv)) 10266 len += 2; 10267 } 10268 10269 /* 10270 * BSpec MI_DISPLAY_FLIP for IVB: 10271 * "The full packet must be contained within the same cache line." 10272 * 10273 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 10274 * cacheline, if we ever start emitting more commands before 10275 * the MI_DISPLAY_FLIP we may need to first emit everything else, 10276 * then do the cacheline alignment, and finally emit the 10277 * MI_DISPLAY_FLIP. 10278 */ 10279 ret = intel_ring_cacheline_align(req); 10280 if (ret) 10281 return ret; 10282 10283 cs = intel_ring_begin(req, len); 10284 if (IS_ERR(cs)) 10285 return PTR_ERR(cs); 10286 10287 /* Unmask the flip-done completion message. Note that the bspec says that 10288 * we should do this for both the BCS and RCS, and that we must not unmask 10289 * more than one flip event at any time (or ensure that one flip message 10290 * can be sent by waiting for flip-done prior to queueing new flips). 10291 * Experimentation says that BCS works despite DERRMR masking all 10292 * flip-done completion events and that unmasking all planes at once 10293 * for the RCS also doesn't appear to drop events. Setting the DERRMR 10294 * to zero does lead to lockups within MI_DISPLAY_FLIP. 10295 */ 10296 if (req->engine->id == RCS) { 10297 *cs++ = MI_LOAD_REGISTER_IMM(1); 10298 *cs++ = i915_mmio_reg_offset(DERRMR); 10299 *cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE | 10300 DERRMR_PIPEB_PRI_FLIP_DONE | 10301 DERRMR_PIPEC_PRI_FLIP_DONE); 10302 if (IS_GEN8(dev_priv)) 10303 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | 10304 MI_SRM_LRM_GLOBAL_GTT; 10305 else 10306 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 10307 *cs++ = i915_mmio_reg_offset(DERRMR); 10308 *cs++ = i915_ggtt_offset(req->engine->scratch) + 256; 10309 if (IS_GEN8(dev_priv)) { 10310 *cs++ = 0; 10311 *cs++ = MI_NOOP; 10312 } 10313 } 10314 10315 *cs++ = MI_DISPLAY_FLIP_I915 | plane_bit; 10316 *cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier); 10317 *cs++ = intel_crtc->flip_work->gtt_offset; 10318 *cs++ = MI_NOOP; 10319 10320 return 0; 10321 } 10322 10323 static bool use_mmio_flip(struct intel_engine_cs *engine, 10324 struct drm_i915_gem_object *obj) 10325 { 10326 /* 10327 * This is not being used for older platforms, because 10328 * non-availability of flip done interrupt forces us to use 10329 * CS flips. Older platforms derive flip done using some clever 10330 * tricks involving the flip_pending status bits and vblank irqs. 10331 * So using MMIO flips there would disrupt this mechanism. 10332 */ 10333 10334 if (engine == NULL) 10335 return true; 10336 10337 if (INTEL_GEN(engine->i915) < 5) 10338 return false; 10339 10340 if (i915.use_mmio_flip < 0) 10341 return false; 10342 else if (i915.use_mmio_flip > 0) 10343 return true; 10344 else if (i915.enable_execlists) 10345 return true; 10346 10347 return engine != i915_gem_object_last_write_engine(obj); 10348 } 10349 10350 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 10351 unsigned int rotation, 10352 struct intel_flip_work *work) 10353 { 10354 struct drm_device *dev = intel_crtc->base.dev; 10355 struct drm_i915_private *dev_priv = to_i915(dev); 10356 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10357 const enum i915_pipe pipe = intel_crtc->pipe; 10358 u32 ctl, stride = skl_plane_stride(fb, 0, rotation); 10359 10360 ctl = I915_READ(PLANE_CTL(pipe, 0)); 10361 ctl &= ~PLANE_CTL_TILED_MASK; 10362 switch (fb->modifier) { 10363 case DRM_FORMAT_MOD_LINEAR: 10364 break; 10365 case I915_FORMAT_MOD_X_TILED: 10366 ctl |= PLANE_CTL_TILED_X; 10367 break; 10368 case I915_FORMAT_MOD_Y_TILED: 10369 ctl |= PLANE_CTL_TILED_Y; 10370 break; 10371 case I915_FORMAT_MOD_Yf_TILED: 10372 ctl |= PLANE_CTL_TILED_YF; 10373 break; 10374 default: 10375 MISSING_CASE(fb->modifier); 10376 } 10377 10378 /* 10379 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 10380 * PLANE_SURF updates, the update is then guaranteed to be atomic. 10381 */ 10382 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 10383 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 10384 10385 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 10386 POSTING_READ(PLANE_SURF(pipe, 0)); 10387 } 10388 10389 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 10390 struct intel_flip_work *work) 10391 { 10392 struct drm_device *dev = intel_crtc->base.dev; 10393 struct drm_i915_private *dev_priv = to_i915(dev); 10394 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10395 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 10396 u32 dspcntr; 10397 10398 dspcntr = I915_READ(reg); 10399 10400 if (fb->modifier == I915_FORMAT_MOD_X_TILED) 10401 dspcntr |= DISPPLANE_TILED; 10402 else 10403 dspcntr &= ~DISPPLANE_TILED; 10404 10405 I915_WRITE(reg, dspcntr); 10406 10407 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 10408 POSTING_READ(DSPSURF(intel_crtc->plane)); 10409 } 10410 10411 static void intel_mmio_flip_work_func(struct work_struct *w) 10412 { 10413 struct intel_flip_work *work = 10414 container_of(w, struct intel_flip_work, mmio_work); 10415 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10417 struct intel_framebuffer *intel_fb = 10418 to_intel_framebuffer(crtc->base.primary->fb); 10419 struct drm_i915_gem_object *obj = intel_fb->obj; 10420 10421 #ifndef __DragonFly__ 10422 WARN_ON(i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT, NULL) < 0); 10423 #else 10424 /* 10425 XXX: on DragonFly, mmio flips sometimes get stuck with the 10426 system_unbound_wq thread sleeping in lstim state. 10427 Bound the timeout to 10 jiffies for lack of a better immediate fix. 10428 */ 10429 WARN_ON(i915_gem_object_wait(obj, 0, 10, NULL) < 0); 10430 #endif 10431 10432 intel_pipe_update_start(crtc); 10433 10434 if (INTEL_GEN(dev_priv) >= 9) 10435 skl_do_mmio_flip(crtc, work->rotation, work); 10436 else 10437 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 10438 ilk_do_mmio_flip(crtc, work); 10439 10440 intel_pipe_update_end(crtc, work); 10441 } 10442 10443 static int intel_default_queue_flip(struct drm_device *dev, 10444 struct drm_crtc *crtc, 10445 struct drm_framebuffer *fb, 10446 struct drm_i915_gem_object *obj, 10447 struct drm_i915_gem_request *req, 10448 uint32_t flags) 10449 { 10450 return -ENODEV; 10451 } 10452 10453 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 10454 struct intel_crtc *intel_crtc, 10455 struct intel_flip_work *work) 10456 { 10457 u32 addr, vblank; 10458 10459 if (!atomic_read(&work->pending)) 10460 return false; 10461 10462 smp_rmb(); 10463 10464 vblank = intel_crtc_get_vblank_counter(intel_crtc); 10465 if (work->flip_ready_vblank == 0) { 10466 if (work->flip_queued_req && 10467 !i915_gem_request_completed(work->flip_queued_req)) 10468 return false; 10469 10470 work->flip_ready_vblank = vblank; 10471 } 10472 10473 if (vblank - work->flip_ready_vblank < 3) 10474 return false; 10475 10476 /* Potential stall - if we see that the flip has happened, 10477 * assume a missed interrupt. */ 10478 if (INTEL_GEN(dev_priv) >= 4) 10479 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 10480 else 10481 addr = I915_READ(DSPADDR(intel_crtc->plane)); 10482 10483 /* There is a potential issue here with a false positive after a flip 10484 * to the same address. We could address this by checking for a 10485 * non-incrementing frame counter. 10486 */ 10487 return addr == work->gtt_offset; 10488 } 10489 10490 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 10491 { 10492 struct drm_device *dev = &dev_priv->drm; 10493 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 10494 struct intel_flip_work *work; 10495 10496 // WARN_ON(!in_interrupt()); 10497 10498 if (crtc == NULL) 10499 return; 10500 10501 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10502 work = crtc->flip_work; 10503 10504 if (work != NULL && !is_mmio_work(work) && 10505 __pageflip_stall_check_cs(dev_priv, crtc, work)) { 10506 WARN_ONCE(1, 10507 "Kicking stuck page flip: queued at %d, now %d\n", 10508 work->flip_queued_vblank, intel_crtc_get_vblank_counter(crtc)); 10509 page_flip_completed(crtc); 10510 work = NULL; 10511 } 10512 10513 if (work != NULL && !is_mmio_work(work) && 10514 intel_crtc_get_vblank_counter(crtc) - work->flip_queued_vblank > 1) 10515 intel_queue_rps_boost_for_request(work->flip_queued_req); 10516 lockmgr(&dev->event_lock, LK_RELEASE); 10517 } 10518 10519 __maybe_unused 10520 static int intel_crtc_page_flip(struct drm_crtc *crtc, 10521 struct drm_framebuffer *fb, 10522 struct drm_pending_vblank_event *event, 10523 uint32_t page_flip_flags) 10524 { 10525 struct drm_device *dev = crtc->dev; 10526 struct drm_i915_private *dev_priv = to_i915(dev); 10527 struct drm_framebuffer *old_fb = crtc->primary->fb; 10528 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10530 struct drm_plane *primary = crtc->primary; 10531 enum i915_pipe pipe = intel_crtc->pipe; 10532 struct intel_flip_work *work; 10533 struct intel_engine_cs *engine; 10534 bool mmio_flip; 10535 struct drm_i915_gem_request *request; 10536 struct i915_vma *vma; 10537 int ret; 10538 10539 /* 10540 * drm_mode_page_flip_ioctl() should already catch this, but double 10541 * check to be safe. In the future we may enable pageflipping from 10542 * a disabled primary plane. 10543 */ 10544 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 10545 return -EBUSY; 10546 10547 /* Can't change pixel format via MI display flips. */ 10548 if (fb->format != crtc->primary->fb->format) 10549 return -EINVAL; 10550 10551 /* 10552 * TILEOFF/LINOFF registers can't be changed via MI display flips. 10553 * Note that pitch changes could also affect these register. 10554 */ 10555 if (INTEL_GEN(dev_priv) > 3 && 10556 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 10557 fb->pitches[0] != crtc->primary->fb->pitches[0])) 10558 return -EINVAL; 10559 10560 if (i915_terminally_wedged(&dev_priv->gpu_error)) 10561 goto out_hang; 10562 10563 work = kzalloc(sizeof(*work), GFP_KERNEL); 10564 if (work == NULL) 10565 return -ENOMEM; 10566 10567 work->event = event; 10568 work->crtc = crtc; 10569 work->old_fb = old_fb; 10570 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 10571 10572 ret = drm_crtc_vblank_get(crtc); 10573 if (ret) 10574 goto free_work; 10575 10576 /* We borrow the event spin lock for protecting flip_work */ 10577 spin_lock_irq(&dev->event_lock); 10578 if (intel_crtc->flip_work) { 10579 /* Before declaring the flip queue wedged, check if 10580 * the hardware completed the operation behind our backs. 10581 */ 10582 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { 10583 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 10584 page_flip_completed(intel_crtc); 10585 } else { 10586 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 10587 spin_unlock_irq(&dev->event_lock); 10588 10589 drm_crtc_vblank_put(crtc); 10590 kfree(work); 10591 return -EBUSY; 10592 } 10593 } 10594 intel_crtc->flip_work = work; 10595 spin_unlock_irq(&dev->event_lock); 10596 10597 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 10598 flush_workqueue(dev_priv->wq); 10599 10600 /* Reference the objects for the scheduled work. */ 10601 drm_framebuffer_reference(work->old_fb); 10602 10603 crtc->primary->fb = fb; 10604 update_state_fb(crtc->primary); 10605 10606 work->pending_flip_obj = i915_gem_object_get(obj); 10607 10608 ret = i915_mutex_lock_interruptible(dev); 10609 if (ret) 10610 goto cleanup; 10611 10612 intel_crtc->reset_count = i915_reset_count(&dev_priv->gpu_error); 10613 if (i915_reset_backoff_or_wedged(&dev_priv->gpu_error)) { 10614 ret = -EIO; 10615 goto unlock; 10616 } 10617 10618 atomic_inc(&intel_crtc->unpin_work_count); 10619 10620 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 10621 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 10622 10623 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 10624 engine = dev_priv->engine[BCS]; 10625 if (fb->modifier != old_fb->modifier) 10626 /* vlv: DISPLAY_FLIP fails to change tiling */ 10627 engine = NULL; 10628 } else if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { 10629 engine = dev_priv->engine[BCS]; 10630 } else if (INTEL_GEN(dev_priv) >= 7) { 10631 engine = i915_gem_object_last_write_engine(obj); 10632 if (engine == NULL || engine->id != RCS) 10633 engine = dev_priv->engine[BCS]; 10634 } else { 10635 engine = dev_priv->engine[RCS]; 10636 } 10637 10638 mmio_flip = use_mmio_flip(engine, obj); 10639 10640 vma = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 10641 if (IS_ERR(vma)) { 10642 ret = PTR_ERR(vma); 10643 goto cleanup_pending; 10644 } 10645 10646 work->old_vma = to_intel_plane_state(primary->state)->vma; 10647 to_intel_plane_state(primary->state)->vma = vma; 10648 10649 work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset; 10650 work->rotation = crtc->primary->state->rotation; 10651 10652 /* 10653 * There's the potential that the next frame will not be compatible with 10654 * FBC, so we want to call pre_update() before the actual page flip. 10655 * The problem is that pre_update() caches some information about the fb 10656 * object, so we want to do this only after the object is pinned. Let's 10657 * be on the safe side and do this immediately before scheduling the 10658 * flip. 10659 */ 10660 intel_fbc_pre_update(intel_crtc, intel_crtc->config, 10661 to_intel_plane_state(primary->state)); 10662 10663 if (mmio_flip) { 10664 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 10665 queue_work(system_unbound_wq, &work->mmio_work); 10666 } else { 10667 request = i915_gem_request_alloc(engine, 10668 dev_priv->kernel_context); 10669 if (IS_ERR(request)) { 10670 ret = PTR_ERR(request); 10671 goto cleanup_unpin; 10672 } 10673 10674 ret = i915_gem_request_await_object(request, obj, false); 10675 if (ret) 10676 goto cleanup_request; 10677 10678 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 10679 page_flip_flags); 10680 if (ret) 10681 goto cleanup_request; 10682 10683 intel_mark_page_flip_active(intel_crtc, work); 10684 10685 work->flip_queued_req = i915_gem_request_get(request); 10686 i915_add_request(request); 10687 } 10688 10689 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 10690 i915_gem_track_fb(intel_fb_obj(old_fb), obj, 10691 to_intel_plane(primary)->frontbuffer_bit); 10692 mutex_unlock(&dev->struct_mutex); 10693 10694 intel_frontbuffer_flip_prepare(to_i915(dev), 10695 to_intel_plane(primary)->frontbuffer_bit); 10696 10697 trace_i915_flip_request(intel_crtc->plane, obj); 10698 10699 return 0; 10700 10701 cleanup_request: 10702 i915_add_request(request); 10703 cleanup_unpin: 10704 to_intel_plane_state(primary->state)->vma = work->old_vma; 10705 intel_unpin_fb_vma(vma); 10706 cleanup_pending: 10707 atomic_dec(&intel_crtc->unpin_work_count); 10708 unlock: 10709 mutex_unlock(&dev->struct_mutex); 10710 cleanup: 10711 crtc->primary->fb = old_fb; 10712 update_state_fb(crtc->primary); 10713 10714 i915_gem_object_put(obj); 10715 drm_framebuffer_unreference(work->old_fb); 10716 10717 spin_lock_irq(&dev->event_lock); 10718 intel_crtc->flip_work = NULL; 10719 spin_unlock_irq(&dev->event_lock); 10720 10721 drm_crtc_vblank_put(crtc); 10722 free_work: 10723 kfree(work); 10724 10725 if (ret == -EIO) { 10726 struct drm_atomic_state *state; 10727 struct drm_plane_state *plane_state; 10728 10729 out_hang: 10730 state = drm_atomic_state_alloc(dev); 10731 if (!state) 10732 return -ENOMEM; 10733 state->acquire_ctx = dev->mode_config.acquire_ctx; 10734 10735 retry: 10736 plane_state = drm_atomic_get_plane_state(state, primary); 10737 ret = PTR_ERR_OR_ZERO(plane_state); 10738 if (!ret) { 10739 drm_atomic_set_fb_for_plane(plane_state, fb); 10740 10741 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 10742 if (!ret) 10743 ret = drm_atomic_commit(state); 10744 } 10745 10746 if (ret == -EDEADLK) { 10747 drm_modeset_backoff(state->acquire_ctx); 10748 drm_atomic_state_clear(state); 10749 goto retry; 10750 } 10751 10752 drm_atomic_state_put(state); 10753 10754 if (ret == 0 && event) { 10755 spin_lock_irq(&dev->event_lock); 10756 drm_crtc_send_vblank_event(crtc, event); 10757 spin_unlock_irq(&dev->event_lock); 10758 } 10759 } 10760 return ret; 10761 } 10762 10763 10764 /** 10765 * intel_wm_need_update - Check whether watermarks need updating 10766 * @plane: drm plane 10767 * @state: new plane state 10768 * 10769 * Check current plane state versus the new one to determine whether 10770 * watermarks need to be recalculated. 10771 * 10772 * Returns true or false. 10773 */ 10774 static bool intel_wm_need_update(struct drm_plane *plane, 10775 struct drm_plane_state *state) 10776 { 10777 struct intel_plane_state *new = to_intel_plane_state(state); 10778 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 10779 10780 /* Update watermarks on tiling or size changes. */ 10781 if (new->base.visible != cur->base.visible) 10782 return true; 10783 10784 if (!cur->base.fb || !new->base.fb) 10785 return false; 10786 10787 if (cur->base.fb->modifier != new->base.fb->modifier || 10788 cur->base.rotation != new->base.rotation || 10789 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) || 10790 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) || 10791 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) || 10792 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst)) 10793 return true; 10794 10795 return false; 10796 } 10797 10798 static bool needs_scaling(struct intel_plane_state *state) 10799 { 10800 int src_w = drm_rect_width(&state->base.src) >> 16; 10801 int src_h = drm_rect_height(&state->base.src) >> 16; 10802 int dst_w = drm_rect_width(&state->base.dst); 10803 int dst_h = drm_rect_height(&state->base.dst); 10804 10805 return (src_w != dst_w || src_h != dst_h); 10806 } 10807 10808 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 10809 struct drm_plane_state *plane_state) 10810 { 10811 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 10812 struct drm_crtc *crtc = crtc_state->crtc; 10813 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10814 struct intel_plane *plane = to_intel_plane(plane_state->plane); 10815 struct drm_device *dev = crtc->dev; 10816 struct drm_i915_private *dev_priv = to_i915(dev); 10817 struct intel_plane_state *old_plane_state = 10818 to_intel_plane_state(plane->base.state); 10819 bool mode_changed = needs_modeset(crtc_state); 10820 bool was_crtc_enabled = crtc->state->active; 10821 bool is_crtc_enabled = crtc_state->active; 10822 bool turn_off, turn_on, visible, was_visible; 10823 struct drm_framebuffer *fb = plane_state->fb; 10824 int ret; 10825 10826 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { 10827 ret = skl_update_scaler_plane( 10828 to_intel_crtc_state(crtc_state), 10829 to_intel_plane_state(plane_state)); 10830 if (ret) 10831 return ret; 10832 } 10833 10834 was_visible = old_plane_state->base.visible; 10835 visible = plane_state->visible; 10836 10837 if (!was_crtc_enabled && WARN_ON(was_visible)) 10838 was_visible = false; 10839 10840 /* 10841 * Visibility is calculated as if the crtc was on, but 10842 * after scaler setup everything depends on it being off 10843 * when the crtc isn't active. 10844 * 10845 * FIXME this is wrong for watermarks. Watermarks should also 10846 * be computed as if the pipe would be active. Perhaps move 10847 * per-plane wm computation to the .check_plane() hook, and 10848 * only combine the results from all planes in the current place? 10849 */ 10850 if (!is_crtc_enabled) { 10851 plane_state->visible = visible = false; 10852 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id); 10853 } 10854 10855 if (!was_visible && !visible) 10856 return 0; 10857 10858 if (fb != old_plane_state->base.fb) 10859 pipe_config->fb_changed = true; 10860 10861 turn_off = was_visible && (!visible || mode_changed); 10862 turn_on = visible && (!was_visible || mode_changed); 10863 10864 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 10865 intel_crtc->base.base.id, intel_crtc->base.name, 10866 plane->base.base.id, plane->base.name, 10867 fb ? fb->base.id : -1); 10868 10869 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 10870 plane->base.base.id, plane->base.name, 10871 was_visible, visible, 10872 turn_off, turn_on, mode_changed); 10873 10874 if (turn_on) { 10875 if (INTEL_GEN(dev_priv) < 5) 10876 pipe_config->update_wm_pre = true; 10877 10878 /* must disable cxsr around plane enable/disable */ 10879 if (plane->id != PLANE_CURSOR) 10880 pipe_config->disable_cxsr = true; 10881 } else if (turn_off) { 10882 if (INTEL_GEN(dev_priv) < 5) 10883 pipe_config->update_wm_post = true; 10884 10885 /* must disable cxsr around plane enable/disable */ 10886 if (plane->id != PLANE_CURSOR) 10887 pipe_config->disable_cxsr = true; 10888 } else if (intel_wm_need_update(&plane->base, plane_state)) { 10889 if (INTEL_GEN(dev_priv) < 5) { 10890 /* FIXME bollocks */ 10891 pipe_config->update_wm_pre = true; 10892 pipe_config->update_wm_post = true; 10893 } 10894 } 10895 10896 if (visible || was_visible) 10897 pipe_config->fb_bits |= plane->frontbuffer_bit; 10898 10899 /* 10900 * WaCxSRDisabledForSpriteScaling:ivb 10901 * 10902 * cstate->update_wm was already set above, so this flag will 10903 * take effect when we commit and program watermarks. 10904 */ 10905 if (plane->id == PLANE_SPRITE0 && IS_IVYBRIDGE(dev_priv) && 10906 needs_scaling(to_intel_plane_state(plane_state)) && 10907 !needs_scaling(old_plane_state)) 10908 pipe_config->disable_lp_wm = true; 10909 10910 return 0; 10911 } 10912 10913 static bool encoders_cloneable(const struct intel_encoder *a, 10914 const struct intel_encoder *b) 10915 { 10916 /* masks could be asymmetric, so check both ways */ 10917 return a == b || (a->cloneable & (1 << b->type) && 10918 b->cloneable & (1 << a->type)); 10919 } 10920 10921 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 10922 struct intel_crtc *crtc, 10923 struct intel_encoder *encoder) 10924 { 10925 struct intel_encoder *source_encoder; 10926 struct drm_connector *connector; 10927 struct drm_connector_state *connector_state; 10928 int i; 10929 10930 for_each_new_connector_in_state(state, connector, connector_state, i) { 10931 if (connector_state->crtc != &crtc->base) 10932 continue; 10933 10934 source_encoder = 10935 to_intel_encoder(connector_state->best_encoder); 10936 if (!encoders_cloneable(encoder, source_encoder)) 10937 return false; 10938 } 10939 10940 return true; 10941 } 10942 10943 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 10944 struct drm_crtc_state *crtc_state) 10945 { 10946 struct drm_device *dev = crtc->dev; 10947 struct drm_i915_private *dev_priv = to_i915(dev); 10948 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10949 struct intel_crtc_state *pipe_config = 10950 to_intel_crtc_state(crtc_state); 10951 struct drm_atomic_state *state = crtc_state->state; 10952 int ret; 10953 bool mode_changed = needs_modeset(crtc_state); 10954 10955 if (mode_changed && !crtc_state->active) 10956 pipe_config->update_wm_post = true; 10957 10958 if (mode_changed && crtc_state->enable && 10959 dev_priv->display.crtc_compute_clock && 10960 !WARN_ON(pipe_config->shared_dpll)) { 10961 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 10962 pipe_config); 10963 if (ret) 10964 return ret; 10965 } 10966 10967 if (crtc_state->color_mgmt_changed) { 10968 ret = intel_color_check(crtc, crtc_state); 10969 if (ret) 10970 return ret; 10971 10972 /* 10973 * Changing color management on Intel hardware is 10974 * handled as part of planes update. 10975 */ 10976 crtc_state->planes_changed = true; 10977 } 10978 10979 ret = 0; 10980 if (dev_priv->display.compute_pipe_wm) { 10981 ret = dev_priv->display.compute_pipe_wm(pipe_config); 10982 if (ret) { 10983 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 10984 return ret; 10985 } 10986 } 10987 10988 if (dev_priv->display.compute_intermediate_wm && 10989 !to_intel_atomic_state(state)->skip_intermediate_wm) { 10990 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 10991 return 0; 10992 10993 /* 10994 * Calculate 'intermediate' watermarks that satisfy both the 10995 * old state and the new state. We can program these 10996 * immediately. 10997 */ 10998 ret = dev_priv->display.compute_intermediate_wm(dev, 10999 intel_crtc, 11000 pipe_config); 11001 if (ret) { 11002 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 11003 return ret; 11004 } 11005 } else if (dev_priv->display.compute_intermediate_wm) { 11006 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 11007 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 11008 } 11009 11010 if (INTEL_GEN(dev_priv) >= 9) { 11011 if (mode_changed) 11012 ret = skl_update_scaler_crtc(pipe_config); 11013 11014 if (!ret) 11015 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, 11016 pipe_config); 11017 } 11018 11019 return ret; 11020 } 11021 11022 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11023 .atomic_begin = intel_begin_crtc_commit, 11024 .atomic_flush = intel_finish_crtc_commit, 11025 .atomic_check = intel_crtc_atomic_check, 11026 }; 11027 11028 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11029 { 11030 struct intel_connector *connector; 11031 struct drm_connector_list_iter conn_iter; 11032 11033 drm_connector_list_iter_begin(dev, &conn_iter); 11034 for_each_intel_connector_iter(connector, &conn_iter) { 11035 if (connector->base.state->crtc) 11036 drm_connector_unreference(&connector->base); 11037 11038 if (connector->base.encoder) { 11039 connector->base.state->best_encoder = 11040 connector->base.encoder; 11041 connector->base.state->crtc = 11042 connector->base.encoder->crtc; 11043 11044 drm_connector_reference(&connector->base); 11045 } else { 11046 connector->base.state->best_encoder = NULL; 11047 connector->base.state->crtc = NULL; 11048 } 11049 } 11050 drm_connector_list_iter_end(&conn_iter); 11051 } 11052 11053 static void 11054 connected_sink_compute_bpp(struct intel_connector *connector, 11055 struct intel_crtc_state *pipe_config) 11056 { 11057 const struct drm_display_info *info = &connector->base.display_info; 11058 int bpp = pipe_config->pipe_bpp; 11059 11060 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11061 connector->base.base.id, 11062 connector->base.name); 11063 11064 /* Don't use an invalid EDID bpc value */ 11065 if (info->bpc != 0 && info->bpc * 3 < bpp) { 11066 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11067 bpp, info->bpc * 3); 11068 pipe_config->pipe_bpp = info->bpc * 3; 11069 } 11070 11071 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11072 if (info->bpc == 0 && bpp > 24) { 11073 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11074 bpp); 11075 pipe_config->pipe_bpp = 24; 11076 } 11077 } 11078 11079 static int 11080 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11081 struct intel_crtc_state *pipe_config) 11082 { 11083 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11084 struct drm_atomic_state *state; 11085 struct drm_connector *connector; 11086 struct drm_connector_state *connector_state; 11087 int bpp, i; 11088 11089 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 11090 IS_CHERRYVIEW(dev_priv))) 11091 bpp = 10*3; 11092 else if (INTEL_GEN(dev_priv) >= 5) 11093 bpp = 12*3; 11094 else 11095 bpp = 8*3; 11096 11097 11098 pipe_config->pipe_bpp = bpp; 11099 11100 state = pipe_config->base.state; 11101 11102 /* Clamp display bpp to EDID value */ 11103 for_each_new_connector_in_state(state, connector, connector_state, i) { 11104 if (connector_state->crtc != &crtc->base) 11105 continue; 11106 11107 connected_sink_compute_bpp(to_intel_connector(connector), 11108 pipe_config); 11109 } 11110 11111 return bpp; 11112 } 11113 11114 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11115 { 11116 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11117 "type: 0x%x flags: 0x%x\n", 11118 mode->crtc_clock, 11119 mode->crtc_hdisplay, mode->crtc_hsync_start, 11120 mode->crtc_hsync_end, mode->crtc_htotal, 11121 mode->crtc_vdisplay, mode->crtc_vsync_start, 11122 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 11123 } 11124 11125 static inline void 11126 intel_dump_m_n_config(struct intel_crtc_state *pipe_config, char *id, 11127 unsigned int lane_count, struct intel_link_m_n *m_n) 11128 { 11129 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11130 id, lane_count, 11131 m_n->gmch_m, m_n->gmch_n, 11132 m_n->link_m, m_n->link_n, m_n->tu); 11133 } 11134 11135 static void intel_dump_pipe_config(struct intel_crtc *crtc, 11136 struct intel_crtc_state *pipe_config, 11137 const char *context) 11138 { 11139 struct drm_device *dev = crtc->base.dev; 11140 struct drm_i915_private *dev_priv = to_i915(dev); 11141 struct drm_plane *plane; 11142 struct intel_plane *intel_plane; 11143 struct intel_plane_state *state; 11144 struct drm_framebuffer *fb; 11145 11146 DRM_DEBUG_KMS("[CRTC:%d:%s]%s\n", 11147 crtc->base.base.id, crtc->base.name, context); 11148 11149 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n", 11150 transcoder_name(pipe_config->cpu_transcoder), 11151 pipe_config->pipe_bpp, pipe_config->dither); 11152 11153 if (pipe_config->has_pch_encoder) 11154 intel_dump_m_n_config(pipe_config, "fdi", 11155 pipe_config->fdi_lanes, 11156 &pipe_config->fdi_m_n); 11157 11158 if (intel_crtc_has_dp_encoder(pipe_config)) { 11159 intel_dump_m_n_config(pipe_config, "dp m_n", 11160 pipe_config->lane_count, &pipe_config->dp_m_n); 11161 if (pipe_config->has_drrs) 11162 intel_dump_m_n_config(pipe_config, "dp m2_n2", 11163 pipe_config->lane_count, 11164 &pipe_config->dp_m2_n2); 11165 } 11166 11167 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 11168 pipe_config->has_audio, pipe_config->has_infoframe); 11169 11170 DRM_DEBUG_KMS("requested mode:\n"); 11171 drm_mode_debug_printmodeline(&pipe_config->base.mode); 11172 DRM_DEBUG_KMS("adjusted mode:\n"); 11173 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 11174 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 11175 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n", 11176 pipe_config->port_clock, 11177 pipe_config->pipe_src_w, pipe_config->pipe_src_h, 11178 pipe_config->pixel_rate); 11179 11180 if (INTEL_GEN(dev_priv) >= 9) 11181 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 11182 crtc->num_scalers, 11183 pipe_config->scaler_state.scaler_users, 11184 pipe_config->scaler_state.scaler_id); 11185 11186 if (HAS_GMCH_DISPLAY(dev_priv)) 11187 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 11188 pipe_config->gmch_pfit.control, 11189 pipe_config->gmch_pfit.pgm_ratios, 11190 pipe_config->gmch_pfit.lvds_border_bits); 11191 else 11192 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 11193 pipe_config->pch_pfit.pos, 11194 pipe_config->pch_pfit.size, 11195 enableddisabled(pipe_config->pch_pfit.enabled)); 11196 11197 DRM_DEBUG_KMS("ips: %i, double wide: %i\n", 11198 pipe_config->ips_enabled, pipe_config->double_wide); 11199 11200 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); 11201 11202 DRM_DEBUG_KMS("planes on this crtc\n"); 11203 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 11204 struct drm_format_name_buf format_name; 11205 intel_plane = to_intel_plane(plane); 11206 if (intel_plane->pipe != crtc->pipe) 11207 continue; 11208 11209 state = to_intel_plane_state(plane->state); 11210 fb = state->base.fb; 11211 if (!fb) { 11212 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 11213 plane->base.id, plane->name, state->scaler_id); 11214 continue; 11215 } 11216 11217 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", 11218 plane->base.id, plane->name, 11219 fb->base.id, fb->width, fb->height, 11220 drm_get_format_name(fb->format->format, &format_name)); 11221 if (INTEL_GEN(dev_priv) >= 9) 11222 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 11223 state->scaler_id, 11224 state->base.src.x1 >> 16, 11225 state->base.src.y1 >> 16, 11226 drm_rect_width(&state->base.src) >> 16, 11227 drm_rect_height(&state->base.src) >> 16, 11228 state->base.dst.x1, state->base.dst.y1, 11229 drm_rect_width(&state->base.dst), 11230 drm_rect_height(&state->base.dst)); 11231 } 11232 } 11233 11234 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 11235 { 11236 struct drm_device *dev = state->dev; 11237 struct drm_connector *connector; 11238 unsigned int used_ports = 0; 11239 unsigned int used_mst_ports = 0; 11240 11241 /* 11242 * Walk the connector list instead of the encoder 11243 * list to detect the problem on ddi platforms 11244 * where there's just one encoder per digital port. 11245 */ 11246 drm_for_each_connector(connector, dev) { 11247 struct drm_connector_state *connector_state; 11248 struct intel_encoder *encoder; 11249 11250 connector_state = drm_atomic_get_existing_connector_state(state, connector); 11251 if (!connector_state) 11252 connector_state = connector->state; 11253 11254 if (!connector_state->best_encoder) 11255 continue; 11256 11257 encoder = to_intel_encoder(connector_state->best_encoder); 11258 11259 WARN_ON(!connector_state->crtc); 11260 11261 switch (encoder->type) { 11262 unsigned int port_mask; 11263 case INTEL_OUTPUT_UNKNOWN: 11264 if (WARN_ON(!HAS_DDI(to_i915(dev)))) 11265 break; 11266 case INTEL_OUTPUT_DP: 11267 case INTEL_OUTPUT_HDMI: 11268 case INTEL_OUTPUT_EDP: 11269 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 11270 11271 /* the same port mustn't appear more than once */ 11272 if (used_ports & port_mask) 11273 return false; 11274 11275 used_ports |= port_mask; 11276 break; 11277 case INTEL_OUTPUT_DP_MST: 11278 used_mst_ports |= 11279 1 << enc_to_mst(&encoder->base)->primary->port; 11280 break; 11281 default: 11282 break; 11283 } 11284 } 11285 11286 /* can't mix MST and SST/HDMI on the same port */ 11287 if (used_ports & used_mst_ports) 11288 return false; 11289 11290 return true; 11291 } 11292 11293 static void 11294 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 11295 { 11296 struct drm_i915_private *dev_priv = 11297 to_i915(crtc_state->base.crtc->dev); 11298 struct intel_crtc_scaler_state scaler_state; 11299 struct intel_dpll_hw_state dpll_hw_state; 11300 struct intel_shared_dpll *shared_dpll; 11301 struct intel_crtc_wm_state wm_state; 11302 bool force_thru; 11303 11304 /* FIXME: before the switch to atomic started, a new pipe_config was 11305 * kzalloc'd. Code that depends on any field being zero should be 11306 * fixed, so that the crtc_state can be safely duplicated. For now, 11307 * only fields that are know to not cause problems are preserved. */ 11308 11309 scaler_state = crtc_state->scaler_state; 11310 shared_dpll = crtc_state->shared_dpll; 11311 dpll_hw_state = crtc_state->dpll_hw_state; 11312 force_thru = crtc_state->pch_pfit.force_thru; 11313 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11314 wm_state = crtc_state->wm; 11315 11316 /* Keep base drm_crtc_state intact, only clear our extended struct */ 11317 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); 11318 memset(&crtc_state->base + 1, 0, 11319 sizeof(*crtc_state) - sizeof(crtc_state->base)); 11320 11321 crtc_state->scaler_state = scaler_state; 11322 crtc_state->shared_dpll = shared_dpll; 11323 crtc_state->dpll_hw_state = dpll_hw_state; 11324 crtc_state->pch_pfit.force_thru = force_thru; 11325 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11326 crtc_state->wm = wm_state; 11327 } 11328 11329 static int 11330 intel_modeset_pipe_config(struct drm_crtc *crtc, 11331 struct intel_crtc_state *pipe_config) 11332 { 11333 struct drm_atomic_state *state = pipe_config->base.state; 11334 struct intel_encoder *encoder; 11335 struct drm_connector *connector; 11336 struct drm_connector_state *connector_state; 11337 int base_bpp, ret = -EINVAL; 11338 int i; 11339 bool retry = true; 11340 11341 clear_intel_crtc_state(pipe_config); 11342 11343 pipe_config->cpu_transcoder = 11344 (enum transcoder) to_intel_crtc(crtc)->pipe; 11345 11346 /* 11347 * Sanitize sync polarity flags based on requested ones. If neither 11348 * positive or negative polarity is requested, treat this as meaning 11349 * negative polarity. 11350 */ 11351 if (!(pipe_config->base.adjusted_mode.flags & 11352 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 11353 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 11354 11355 if (!(pipe_config->base.adjusted_mode.flags & 11356 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11357 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11358 11359 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11360 pipe_config); 11361 if (base_bpp < 0) 11362 goto fail; 11363 11364 /* 11365 * Determine the real pipe dimensions. Note that stereo modes can 11366 * increase the actual pipe size due to the frame doubling and 11367 * insertion of additional space for blanks between the frame. This 11368 * is stored in the crtc timings. We use the requested mode to do this 11369 * computation to clearly distinguish it from the adjusted mode, which 11370 * can be changed by the connectors in the below retry loop. 11371 */ 11372 drm_mode_get_hv_timing(&pipe_config->base.mode, 11373 &pipe_config->pipe_src_w, 11374 &pipe_config->pipe_src_h); 11375 11376 for_each_new_connector_in_state(state, connector, connector_state, i) { 11377 if (connector_state->crtc != crtc) 11378 continue; 11379 11380 encoder = to_intel_encoder(connector_state->best_encoder); 11381 11382 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 11383 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11384 goto fail; 11385 } 11386 11387 /* 11388 * Determine output_types before calling the .compute_config() 11389 * hooks so that the hooks can use this information safely. 11390 */ 11391 pipe_config->output_types |= 1 << encoder->type; 11392 } 11393 11394 encoder_retry: 11395 /* Ensure the port clock defaults are reset when retrying. */ 11396 pipe_config->port_clock = 0; 11397 pipe_config->pixel_multiplier = 1; 11398 11399 /* Fill in default crtc timings, allow encoders to overwrite them. */ 11400 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 11401 CRTC_STEREO_DOUBLE); 11402 11403 /* Pass our mode to the connectors and the CRTC to give them a chance to 11404 * adjust it according to limitations or connector properties, and also 11405 * a chance to reject the mode entirely. 11406 */ 11407 for_each_new_connector_in_state(state, connector, connector_state, i) { 11408 if (connector_state->crtc != crtc) 11409 continue; 11410 11411 encoder = to_intel_encoder(connector_state->best_encoder); 11412 11413 if (!(encoder->compute_config(encoder, pipe_config, connector_state))) { 11414 DRM_DEBUG_KMS("Encoder config failure\n"); 11415 goto fail; 11416 } 11417 } 11418 11419 /* Set default port clock if not overwritten by the encoder. Needs to be 11420 * done afterwards in case the encoder adjusts the mode. */ 11421 if (!pipe_config->port_clock) 11422 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 11423 * pipe_config->pixel_multiplier; 11424 11425 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11426 if (ret < 0) { 11427 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11428 goto fail; 11429 } 11430 11431 if (ret == RETRY) { 11432 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11433 ret = -EINVAL; 11434 goto fail; 11435 } 11436 11437 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11438 retry = false; 11439 goto encoder_retry; 11440 } 11441 11442 /* Dithering seems to not pass-through bits correctly when it should, so 11443 * only enable it on 6bpc panels and when its not a compliance 11444 * test requesting 6bpc video pattern. 11445 */ 11446 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) && 11447 !pipe_config->dither_force_disable; 11448 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 11449 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11450 11451 fail: 11452 return ret; 11453 } 11454 11455 static void 11456 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 11457 { 11458 struct drm_crtc *crtc; 11459 struct drm_crtc_state *new_crtc_state; 11460 int i; 11461 11462 /* Double check state. */ 11463 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 11464 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state); 11465 11466 /* Update hwmode for vblank functions */ 11467 if (new_crtc_state->active) 11468 crtc->hwmode = new_crtc_state->adjusted_mode; 11469 else 11470 crtc->hwmode.crtc_clock = 0; 11471 11472 /* 11473 * Update legacy state to satisfy fbc code. This can 11474 * be removed when fbc uses the atomic state. 11475 */ 11476 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 11477 struct drm_plane_state *plane_state = crtc->primary->state; 11478 11479 crtc->primary->fb = plane_state->fb; 11480 crtc->x = plane_state->src_x >> 16; 11481 crtc->y = plane_state->src_y >> 16; 11482 } 11483 } 11484 } 11485 11486 static bool intel_fuzzy_clock_check(int clock1, int clock2) 11487 { 11488 int diff; 11489 11490 if (clock1 == clock2) 11491 return true; 11492 11493 if (!clock1 || !clock2) 11494 return false; 11495 11496 diff = abs(clock1 - clock2); 11497 11498 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 11499 return true; 11500 11501 return false; 11502 } 11503 11504 static bool 11505 intel_compare_m_n(unsigned int m, unsigned int n, 11506 unsigned int m2, unsigned int n2, 11507 bool exact) 11508 { 11509 if (m == m2 && n == n2) 11510 return true; 11511 11512 if (exact || !m || !n || !m2 || !n2) 11513 return false; 11514 11515 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 11516 11517 if (n > n2) { 11518 while (n > n2) { 11519 m2 <<= 1; 11520 n2 <<= 1; 11521 } 11522 } else if (n < n2) { 11523 while (n < n2) { 11524 m <<= 1; 11525 n <<= 1; 11526 } 11527 } 11528 11529 if (n != n2) 11530 return false; 11531 11532 return intel_fuzzy_clock_check(m, m2); 11533 } 11534 11535 static bool 11536 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 11537 struct intel_link_m_n *m2_n2, 11538 bool adjust) 11539 { 11540 if (m_n->tu == m2_n2->tu && 11541 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 11542 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 11543 intel_compare_m_n(m_n->link_m, m_n->link_n, 11544 m2_n2->link_m, m2_n2->link_n, !adjust)) { 11545 if (adjust) 11546 *m2_n2 = *m_n; 11547 11548 return true; 11549 } 11550 11551 return false; 11552 } 11553 11554 static void __printf(3, 4) 11555 pipe_config_err(bool adjust, const char *name, const char *format, ...) 11556 { 11557 char *level; 11558 unsigned int category; 11559 struct va_format vaf; 11560 va_list args; 11561 11562 if (adjust) { 11563 level = KERN_DEBUG; 11564 category = DRM_UT_KMS; 11565 } else { 11566 level = KERN_ERR; 11567 category = DRM_UT_NONE; 11568 } 11569 11570 va_start(args, format); 11571 vaf.fmt = format; 11572 vaf.va = &args; 11573 11574 drm_printk(level, category, "mismatch in %s %pV", name, &vaf); 11575 11576 va_end(args); 11577 } 11578 11579 static bool 11580 intel_pipe_config_compare(struct drm_i915_private *dev_priv, 11581 struct intel_crtc_state *current_config, 11582 struct intel_crtc_state *pipe_config, 11583 bool adjust) 11584 { 11585 bool ret = true; 11586 11587 #define PIPE_CONF_CHECK_X(name) \ 11588 if (current_config->name != pipe_config->name) { \ 11589 pipe_config_err(adjust, __stringify(name), \ 11590 "(expected 0x%08x, found 0x%08x)\n", \ 11591 current_config->name, \ 11592 pipe_config->name); \ 11593 ret = false; \ 11594 } 11595 11596 #define PIPE_CONF_CHECK_I(name) \ 11597 if (current_config->name != pipe_config->name) { \ 11598 pipe_config_err(adjust, __stringify(name), \ 11599 "(expected %i, found %i)\n", \ 11600 current_config->name, \ 11601 pipe_config->name); \ 11602 ret = false; \ 11603 } 11604 11605 #define PIPE_CONF_CHECK_P(name) \ 11606 if (current_config->name != pipe_config->name) { \ 11607 pipe_config_err(adjust, __stringify(name), \ 11608 "(expected %p, found %p)\n", \ 11609 current_config->name, \ 11610 pipe_config->name); \ 11611 ret = false; \ 11612 } 11613 11614 #define PIPE_CONF_CHECK_M_N(name) \ 11615 if (!intel_compare_link_m_n(¤t_config->name, \ 11616 &pipe_config->name,\ 11617 adjust)) { \ 11618 pipe_config_err(adjust, __stringify(name), \ 11619 "(expected tu %i gmch %i/%i link %i/%i, " \ 11620 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11621 current_config->name.tu, \ 11622 current_config->name.gmch_m, \ 11623 current_config->name.gmch_n, \ 11624 current_config->name.link_m, \ 11625 current_config->name.link_n, \ 11626 pipe_config->name.tu, \ 11627 pipe_config->name.gmch_m, \ 11628 pipe_config->name.gmch_n, \ 11629 pipe_config->name.link_m, \ 11630 pipe_config->name.link_n); \ 11631 ret = false; \ 11632 } 11633 11634 /* This is required for BDW+ where there is only one set of registers for 11635 * switching between high and low RR. 11636 * This macro can be used whenever a comparison has to be made between one 11637 * hw state and multiple sw state variables. 11638 */ 11639 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 11640 if (!intel_compare_link_m_n(¤t_config->name, \ 11641 &pipe_config->name, adjust) && \ 11642 !intel_compare_link_m_n(¤t_config->alt_name, \ 11643 &pipe_config->name, adjust)) { \ 11644 pipe_config_err(adjust, __stringify(name), \ 11645 "(expected tu %i gmch %i/%i link %i/%i, " \ 11646 "or tu %i gmch %i/%i link %i/%i, " \ 11647 "found tu %i, gmch %i/%i link %i/%i)\n", \ 11648 current_config->name.tu, \ 11649 current_config->name.gmch_m, \ 11650 current_config->name.gmch_n, \ 11651 current_config->name.link_m, \ 11652 current_config->name.link_n, \ 11653 current_config->alt_name.tu, \ 11654 current_config->alt_name.gmch_m, \ 11655 current_config->alt_name.gmch_n, \ 11656 current_config->alt_name.link_m, \ 11657 current_config->alt_name.link_n, \ 11658 pipe_config->name.tu, \ 11659 pipe_config->name.gmch_m, \ 11660 pipe_config->name.gmch_n, \ 11661 pipe_config->name.link_m, \ 11662 pipe_config->name.link_n); \ 11663 ret = false; \ 11664 } 11665 11666 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 11667 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11668 pipe_config_err(adjust, __stringify(name), \ 11669 "(%x) (expected %i, found %i)\n", \ 11670 (mask), \ 11671 current_config->name & (mask), \ 11672 pipe_config->name & (mask)); \ 11673 ret = false; \ 11674 } 11675 11676 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 11677 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11678 pipe_config_err(adjust, __stringify(name), \ 11679 "(expected %i, found %i)\n", \ 11680 current_config->name, \ 11681 pipe_config->name); \ 11682 ret = false; \ 11683 } 11684 11685 #define PIPE_CONF_QUIRK(quirk) \ 11686 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11687 11688 PIPE_CONF_CHECK_I(cpu_transcoder); 11689 11690 PIPE_CONF_CHECK_I(has_pch_encoder); 11691 PIPE_CONF_CHECK_I(fdi_lanes); 11692 PIPE_CONF_CHECK_M_N(fdi_m_n); 11693 11694 PIPE_CONF_CHECK_I(lane_count); 11695 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 11696 11697 if (INTEL_GEN(dev_priv) < 8) { 11698 PIPE_CONF_CHECK_M_N(dp_m_n); 11699 11700 if (current_config->has_drrs) 11701 PIPE_CONF_CHECK_M_N(dp_m2_n2); 11702 } else 11703 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 11704 11705 PIPE_CONF_CHECK_X(output_types); 11706 11707 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 11708 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 11709 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 11710 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 11711 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 11712 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 11713 11714 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 11715 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 11716 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 11717 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 11718 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 11719 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11720 11721 PIPE_CONF_CHECK_I(pixel_multiplier); 11722 PIPE_CONF_CHECK_I(has_hdmi_sink); 11723 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || 11724 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 11725 PIPE_CONF_CHECK_I(limited_color_range); 11726 11727 PIPE_CONF_CHECK_I(hdmi_scrambling); 11728 PIPE_CONF_CHECK_I(hdmi_high_tmds_clock_ratio); 11729 PIPE_CONF_CHECK_I(has_infoframe); 11730 11731 PIPE_CONF_CHECK_I(has_audio); 11732 11733 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11734 DRM_MODE_FLAG_INTERLACE); 11735 11736 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11737 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11738 DRM_MODE_FLAG_PHSYNC); 11739 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11740 DRM_MODE_FLAG_NHSYNC); 11741 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11742 DRM_MODE_FLAG_PVSYNC); 11743 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11744 DRM_MODE_FLAG_NVSYNC); 11745 } 11746 11747 PIPE_CONF_CHECK_X(gmch_pfit.control); 11748 /* pfit ratios are autocomputed by the hw on gen4+ */ 11749 if (INTEL_GEN(dev_priv) < 4) 11750 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 11751 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 11752 11753 if (!adjust) { 11754 PIPE_CONF_CHECK_I(pipe_src_w); 11755 PIPE_CONF_CHECK_I(pipe_src_h); 11756 11757 PIPE_CONF_CHECK_I(pch_pfit.enabled); 11758 if (current_config->pch_pfit.enabled) { 11759 PIPE_CONF_CHECK_X(pch_pfit.pos); 11760 PIPE_CONF_CHECK_X(pch_pfit.size); 11761 } 11762 11763 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 11764 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate); 11765 } 11766 11767 /* BDW+ don't expose a synchronous way to read the state */ 11768 if (IS_HASWELL(dev_priv)) 11769 PIPE_CONF_CHECK_I(ips_enabled); 11770 11771 PIPE_CONF_CHECK_I(double_wide); 11772 11773 PIPE_CONF_CHECK_P(shared_dpll); 11774 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11775 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11776 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11777 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11778 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11779 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 11780 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11781 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11782 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11783 11784 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 11785 PIPE_CONF_CHECK_X(dsi_pll.div); 11786 11787 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) 11788 PIPE_CONF_CHECK_I(pipe_bpp); 11789 11790 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 11791 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11792 11793 #undef PIPE_CONF_CHECK_X 11794 #undef PIPE_CONF_CHECK_I 11795 #undef PIPE_CONF_CHECK_P 11796 #undef PIPE_CONF_CHECK_FLAGS 11797 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 11798 #undef PIPE_CONF_QUIRK 11799 11800 return ret; 11801 } 11802 11803 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 11804 const struct intel_crtc_state *pipe_config) 11805 { 11806 if (pipe_config->has_pch_encoder) { 11807 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 11808 &pipe_config->fdi_m_n); 11809 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 11810 11811 /* 11812 * FDI already provided one idea for the dotclock. 11813 * Yell if the encoder disagrees. 11814 */ 11815 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 11816 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 11817 fdi_dotclock, dotclock); 11818 } 11819 } 11820 11821 static void verify_wm_state(struct drm_crtc *crtc, 11822 struct drm_crtc_state *new_state) 11823 { 11824 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 11825 struct skl_ddb_allocation hw_ddb, *sw_ddb; 11826 struct skl_pipe_wm hw_wm, *sw_wm; 11827 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm; 11828 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry; 11829 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11830 const enum i915_pipe pipe = intel_crtc->pipe; 11831 int plane, level, max_level = ilk_wm_max_level(dev_priv); 11832 11833 if (INTEL_GEN(dev_priv) < 9 || !new_state->active) 11834 return; 11835 11836 skl_pipe_wm_get_hw_state(crtc, &hw_wm); 11837 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal; 11838 11839 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 11840 sw_ddb = &dev_priv->wm.skl_hw.ddb; 11841 11842 /* planes */ 11843 for_each_universal_plane(dev_priv, pipe, plane) { 11844 hw_plane_wm = &hw_wm.planes[plane]; 11845 sw_plane_wm = &sw_wm->planes[plane]; 11846 11847 /* Watermarks */ 11848 for (level = 0; level <= max_level; level++) { 11849 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11850 &sw_plane_wm->wm[level])) 11851 continue; 11852 11853 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11854 pipe_name(pipe), plane + 1, level, 11855 sw_plane_wm->wm[level].plane_en, 11856 sw_plane_wm->wm[level].plane_res_b, 11857 sw_plane_wm->wm[level].plane_res_l, 11858 hw_plane_wm->wm[level].plane_en, 11859 hw_plane_wm->wm[level].plane_res_b, 11860 hw_plane_wm->wm[level].plane_res_l); 11861 } 11862 11863 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11864 &sw_plane_wm->trans_wm)) { 11865 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11866 pipe_name(pipe), plane + 1, 11867 sw_plane_wm->trans_wm.plane_en, 11868 sw_plane_wm->trans_wm.plane_res_b, 11869 sw_plane_wm->trans_wm.plane_res_l, 11870 hw_plane_wm->trans_wm.plane_en, 11871 hw_plane_wm->trans_wm.plane_res_b, 11872 hw_plane_wm->trans_wm.plane_res_l); 11873 } 11874 11875 /* DDB */ 11876 hw_ddb_entry = &hw_ddb.plane[pipe][plane]; 11877 sw_ddb_entry = &sw_ddb->plane[pipe][plane]; 11878 11879 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11880 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n", 11881 pipe_name(pipe), plane + 1, 11882 sw_ddb_entry->start, sw_ddb_entry->end, 11883 hw_ddb_entry->start, hw_ddb_entry->end); 11884 } 11885 } 11886 11887 /* 11888 * cursor 11889 * If the cursor plane isn't active, we may not have updated it's ddb 11890 * allocation. In that case since the ddb allocation will be updated 11891 * once the plane becomes visible, we can skip this check 11892 */ 11893 if (intel_crtc->cursor_addr) { 11894 hw_plane_wm = &hw_wm.planes[PLANE_CURSOR]; 11895 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR]; 11896 11897 /* Watermarks */ 11898 for (level = 0; level <= max_level; level++) { 11899 if (skl_wm_level_equals(&hw_plane_wm->wm[level], 11900 &sw_plane_wm->wm[level])) 11901 continue; 11902 11903 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11904 pipe_name(pipe), level, 11905 sw_plane_wm->wm[level].plane_en, 11906 sw_plane_wm->wm[level].plane_res_b, 11907 sw_plane_wm->wm[level].plane_res_l, 11908 hw_plane_wm->wm[level].plane_en, 11909 hw_plane_wm->wm[level].plane_res_b, 11910 hw_plane_wm->wm[level].plane_res_l); 11911 } 11912 11913 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm, 11914 &sw_plane_wm->trans_wm)) { 11915 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n", 11916 pipe_name(pipe), 11917 sw_plane_wm->trans_wm.plane_en, 11918 sw_plane_wm->trans_wm.plane_res_b, 11919 sw_plane_wm->trans_wm.plane_res_l, 11920 hw_plane_wm->trans_wm.plane_en, 11921 hw_plane_wm->trans_wm.plane_res_b, 11922 hw_plane_wm->trans_wm.plane_res_l); 11923 } 11924 11925 /* DDB */ 11926 hw_ddb_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 11927 sw_ddb_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 11928 11929 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) { 11930 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n", 11931 pipe_name(pipe), 11932 sw_ddb_entry->start, sw_ddb_entry->end, 11933 hw_ddb_entry->start, hw_ddb_entry->end); 11934 } 11935 } 11936 } 11937 11938 static void 11939 verify_connector_state(struct drm_device *dev, 11940 struct drm_atomic_state *state, 11941 struct drm_crtc *crtc) 11942 { 11943 struct drm_connector *connector; 11944 struct drm_connector_state *new_conn_state; 11945 int i; 11946 11947 for_each_new_connector_in_state(state, connector, new_conn_state, i) { 11948 struct drm_encoder *encoder = connector->encoder; 11949 11950 if (new_conn_state->crtc != crtc) 11951 continue; 11952 11953 intel_connector_verify_state(to_intel_connector(connector)); 11954 11955 I915_STATE_WARN(new_conn_state->best_encoder != encoder, 11956 "connector's atomic encoder doesn't match legacy encoder\n"); 11957 } 11958 } 11959 11960 static void 11961 verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state) 11962 { 11963 struct intel_encoder *encoder; 11964 struct drm_connector *connector; 11965 struct drm_connector_state *old_conn_state, *new_conn_state; 11966 int i; 11967 11968 for_each_intel_encoder(dev, encoder) { 11969 bool enabled = false, found = false; 11970 enum i915_pipe pipe; 11971 11972 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 11973 encoder->base.base.id, 11974 encoder->base.name); 11975 11976 for_each_oldnew_connector_in_state(state, connector, old_conn_state, 11977 new_conn_state, i) { 11978 if (old_conn_state->best_encoder == &encoder->base) 11979 found = true; 11980 11981 if (new_conn_state->best_encoder != &encoder->base) 11982 continue; 11983 found = enabled = true; 11984 11985 I915_STATE_WARN(new_conn_state->crtc != 11986 encoder->base.crtc, 11987 "connector's crtc doesn't match encoder crtc\n"); 11988 } 11989 11990 if (!found) 11991 continue; 11992 11993 I915_STATE_WARN(!!encoder->base.crtc != enabled, 11994 "encoder's enabled state mismatch " 11995 "(expected %i, found %i)\n", 11996 !!encoder->base.crtc, enabled); 11997 11998 if (!encoder->base.crtc) { 11999 bool active; 12000 12001 active = encoder->get_hw_state(encoder, &pipe); 12002 I915_STATE_WARN(active, 12003 "encoder detached but still enabled on pipe %c.\n", 12004 pipe_name(pipe)); 12005 } 12006 } 12007 } 12008 12009 static void 12010 verify_crtc_state(struct drm_crtc *crtc, 12011 struct drm_crtc_state *old_crtc_state, 12012 struct drm_crtc_state *new_crtc_state) 12013 { 12014 struct drm_device *dev = crtc->dev; 12015 struct drm_i915_private *dev_priv = to_i915(dev); 12016 struct intel_encoder *encoder; 12017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12018 struct intel_crtc_state *pipe_config, *sw_config; 12019 struct drm_atomic_state *old_state; 12020 bool active; 12021 12022 old_state = old_crtc_state->state; 12023 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 12024 pipe_config = to_intel_crtc_state(old_crtc_state); 12025 memset(pipe_config, 0, sizeof(*pipe_config)); 12026 pipe_config->base.crtc = crtc; 12027 pipe_config->base.state = old_state; 12028 12029 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 12030 12031 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 12032 12033 /* hw state is inconsistent with the pipe quirk */ 12034 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12035 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12036 active = new_crtc_state->active; 12037 12038 I915_STATE_WARN(new_crtc_state->active != active, 12039 "crtc active state doesn't match with hw state " 12040 "(expected %i, found %i)\n", new_crtc_state->active, active); 12041 12042 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 12043 "transitional active state does not match atomic hw state " 12044 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 12045 12046 for_each_encoder_on_crtc(dev, crtc, encoder) { 12047 enum i915_pipe pipe; 12048 12049 active = encoder->get_hw_state(encoder, &pipe); 12050 I915_STATE_WARN(active != new_crtc_state->active, 12051 "[ENCODER:%i] active %i with crtc active %i\n", 12052 encoder->base.base.id, active, new_crtc_state->active); 12053 12054 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12055 "Encoder connected to wrong pipe %c\n", 12056 pipe_name(pipe)); 12057 12058 if (active) { 12059 pipe_config->output_types |= 1 << encoder->type; 12060 encoder->get_config(encoder, pipe_config); 12061 } 12062 } 12063 12064 intel_crtc_compute_pixel_rate(pipe_config); 12065 12066 if (!new_crtc_state->active) 12067 return; 12068 12069 intel_pipe_config_sanity_check(dev_priv, pipe_config); 12070 12071 sw_config = to_intel_crtc_state(crtc->state); 12072 if (!intel_pipe_config_compare(dev_priv, sw_config, 12073 pipe_config, false)) { 12074 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12075 intel_dump_pipe_config(intel_crtc, pipe_config, 12076 "[hw state]"); 12077 intel_dump_pipe_config(intel_crtc, sw_config, 12078 "[sw state]"); 12079 } 12080 } 12081 12082 static void 12083 verify_single_dpll_state(struct drm_i915_private *dev_priv, 12084 struct intel_shared_dpll *pll, 12085 struct drm_crtc *crtc, 12086 struct drm_crtc_state *new_state) 12087 { 12088 struct intel_dpll_hw_state dpll_hw_state; 12089 unsigned crtc_mask; 12090 bool active; 12091 12092 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12093 12094 DRM_DEBUG_KMS("%s\n", pll->name); 12095 12096 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 12097 12098 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 12099 I915_STATE_WARN(!pll->on && pll->active_mask, 12100 "pll in active use but not on in sw tracking\n"); 12101 I915_STATE_WARN(pll->on && !pll->active_mask, 12102 "pll is on but not used by any active crtc\n"); 12103 I915_STATE_WARN(pll->on != active, 12104 "pll on state mismatch (expected %i, found %i)\n", 12105 pll->on, active); 12106 } 12107 12108 if (!crtc) { 12109 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask, 12110 "more active pll users than references: %x vs %x\n", 12111 pll->active_mask, pll->state.crtc_mask); 12112 12113 return; 12114 } 12115 12116 crtc_mask = 1 << drm_crtc_index(crtc); 12117 12118 if (new_state->active) 12119 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 12120 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 12121 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12122 else 12123 I915_STATE_WARN(pll->active_mask & crtc_mask, 12124 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 12125 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 12126 12127 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask), 12128 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 12129 crtc_mask, pll->state.crtc_mask); 12130 12131 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state, 12132 &dpll_hw_state, 12133 sizeof(dpll_hw_state)), 12134 "pll hw state mismatch\n"); 12135 } 12136 12137 static void 12138 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 12139 struct drm_crtc_state *old_crtc_state, 12140 struct drm_crtc_state *new_crtc_state) 12141 { 12142 struct drm_i915_private *dev_priv = to_i915(dev); 12143 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 12144 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 12145 12146 if (new_state->shared_dpll) 12147 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 12148 12149 if (old_state->shared_dpll && 12150 old_state->shared_dpll != new_state->shared_dpll) { 12151 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 12152 struct intel_shared_dpll *pll = old_state->shared_dpll; 12153 12154 I915_STATE_WARN(pll->active_mask & crtc_mask, 12155 "pll active mismatch (didn't expect pipe %c in active mask)\n", 12156 pipe_name(drm_crtc_index(crtc))); 12157 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask, 12158 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 12159 pipe_name(drm_crtc_index(crtc))); 12160 } 12161 } 12162 12163 static void 12164 intel_modeset_verify_crtc(struct drm_crtc *crtc, 12165 struct drm_atomic_state *state, 12166 struct drm_crtc_state *old_state, 12167 struct drm_crtc_state *new_state) 12168 { 12169 if (!needs_modeset(new_state) && 12170 !to_intel_crtc_state(new_state)->update_pipe) 12171 return; 12172 12173 verify_wm_state(crtc, new_state); 12174 verify_connector_state(crtc->dev, state, crtc); 12175 verify_crtc_state(crtc, old_state, new_state); 12176 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 12177 } 12178 12179 static void 12180 verify_disabled_dpll_state(struct drm_device *dev) 12181 { 12182 struct drm_i915_private *dev_priv = to_i915(dev); 12183 int i; 12184 12185 for (i = 0; i < dev_priv->num_shared_dpll; i++) 12186 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 12187 } 12188 12189 static void 12190 intel_modeset_verify_disabled(struct drm_device *dev, 12191 struct drm_atomic_state *state) 12192 { 12193 verify_encoder_state(dev, state); 12194 verify_connector_state(dev, state, NULL); 12195 verify_disabled_dpll_state(dev); 12196 } 12197 12198 static void update_scanline_offset(struct intel_crtc *crtc) 12199 { 12200 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 12201 12202 /* 12203 * The scanline counter increments at the leading edge of hsync. 12204 * 12205 * On most platforms it starts counting from vtotal-1 on the 12206 * first active line. That means the scanline counter value is 12207 * always one less than what we would expect. Ie. just after 12208 * start of vblank, which also occurs at start of hsync (on the 12209 * last active line), the scanline counter will read vblank_start-1. 12210 * 12211 * On gen2 the scanline counter starts counting from 1 instead 12212 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 12213 * to keep the value positive), instead of adding one. 12214 * 12215 * On HSW+ the behaviour of the scanline counter depends on the output 12216 * type. For DP ports it behaves like most other platforms, but on HDMI 12217 * there's an extra 1 line difference. So we need to add two instead of 12218 * one to the value. 12219 * 12220 * On VLV/CHV DSI the scanline counter would appear to increment 12221 * approx. 1/3 of a scanline before start of vblank. Unfortunately 12222 * that means we can't tell whether we're in vblank or not while 12223 * we're on that particular line. We must still set scanline_offset 12224 * to 1 so that the vblank timestamps come out correct when we query 12225 * the scanline counter from within the vblank interrupt handler. 12226 * However if queried just before the start of vblank we'll get an 12227 * answer that's slightly in the future. 12228 */ 12229 if (IS_GEN2(dev_priv)) { 12230 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12231 int vtotal; 12232 12233 vtotal = adjusted_mode->crtc_vtotal; 12234 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 12235 vtotal /= 2; 12236 12237 crtc->scanline_offset = vtotal - 1; 12238 } else if (HAS_DDI(dev_priv) && 12239 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 12240 crtc->scanline_offset = 2; 12241 } else 12242 crtc->scanline_offset = 1; 12243 } 12244 12245 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 12246 { 12247 struct drm_device *dev = state->dev; 12248 struct drm_i915_private *dev_priv = to_i915(dev); 12249 struct drm_crtc *crtc; 12250 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12251 int i; 12252 12253 if (!dev_priv->display.crtc_compute_clock) 12254 return; 12255 12256 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12257 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12258 struct intel_shared_dpll *old_dpll = 12259 to_intel_crtc_state(old_crtc_state)->shared_dpll; 12260 12261 if (!needs_modeset(new_crtc_state)) 12262 continue; 12263 12264 to_intel_crtc_state(new_crtc_state)->shared_dpll = NULL; 12265 12266 if (!old_dpll) 12267 continue; 12268 12269 intel_release_shared_dpll(old_dpll, intel_crtc, state); 12270 } 12271 } 12272 12273 /* 12274 * This implements the workaround described in the "notes" section of the mode 12275 * set sequence documentation. When going from no pipes or single pipe to 12276 * multiple pipes, and planes are enabled after the pipe, we need to wait at 12277 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 12278 */ 12279 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 12280 { 12281 struct drm_crtc_state *crtc_state; 12282 struct intel_crtc *intel_crtc; 12283 struct drm_crtc *crtc; 12284 struct intel_crtc_state *first_crtc_state = NULL; 12285 struct intel_crtc_state *other_crtc_state = NULL; 12286 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 12287 int i; 12288 12289 /* look at all crtc's that are going to be enabled in during modeset */ 12290 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12291 intel_crtc = to_intel_crtc(crtc); 12292 12293 if (!crtc_state->active || !needs_modeset(crtc_state)) 12294 continue; 12295 12296 if (first_crtc_state) { 12297 other_crtc_state = to_intel_crtc_state(crtc_state); 12298 break; 12299 } else { 12300 first_crtc_state = to_intel_crtc_state(crtc_state); 12301 first_pipe = intel_crtc->pipe; 12302 } 12303 } 12304 12305 /* No workaround needed? */ 12306 if (!first_crtc_state) 12307 return 0; 12308 12309 /* w/a possibly needed, check how many crtc's are already enabled. */ 12310 for_each_intel_crtc(state->dev, intel_crtc) { 12311 struct intel_crtc_state *pipe_config; 12312 12313 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 12314 if (IS_ERR(pipe_config)) 12315 return PTR_ERR(pipe_config); 12316 12317 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 12318 12319 if (!pipe_config->base.active || 12320 needs_modeset(&pipe_config->base)) 12321 continue; 12322 12323 /* 2 or more enabled crtcs means no need for w/a */ 12324 if (enabled_pipe != INVALID_PIPE) 12325 return 0; 12326 12327 enabled_pipe = intel_crtc->pipe; 12328 } 12329 12330 if (enabled_pipe != INVALID_PIPE) 12331 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 12332 else if (other_crtc_state) 12333 other_crtc_state->hsw_workaround_pipe = first_pipe; 12334 12335 return 0; 12336 } 12337 12338 static int intel_lock_all_pipes(struct drm_atomic_state *state) 12339 { 12340 struct drm_crtc *crtc; 12341 12342 /* Add all pipes to the state */ 12343 for_each_crtc(state->dev, crtc) { 12344 struct drm_crtc_state *crtc_state; 12345 12346 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12347 if (IS_ERR(crtc_state)) 12348 return PTR_ERR(crtc_state); 12349 } 12350 12351 return 0; 12352 } 12353 12354 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 12355 { 12356 struct drm_crtc *crtc; 12357 12358 /* 12359 * Add all pipes to the state, and force 12360 * a modeset on all the active ones. 12361 */ 12362 for_each_crtc(state->dev, crtc) { 12363 struct drm_crtc_state *crtc_state; 12364 int ret; 12365 12366 crtc_state = drm_atomic_get_crtc_state(state, crtc); 12367 if (IS_ERR(crtc_state)) 12368 return PTR_ERR(crtc_state); 12369 12370 if (!crtc_state->active || needs_modeset(crtc_state)) 12371 continue; 12372 12373 crtc_state->mode_changed = true; 12374 12375 ret = drm_atomic_add_affected_connectors(state, crtc); 12376 if (ret) 12377 return ret; 12378 12379 ret = drm_atomic_add_affected_planes(state, crtc); 12380 if (ret) 12381 return ret; 12382 } 12383 12384 return 0; 12385 } 12386 12387 static int intel_modeset_checks(struct drm_atomic_state *state) 12388 { 12389 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12390 struct drm_i915_private *dev_priv = to_i915(state->dev); 12391 struct drm_crtc *crtc; 12392 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12393 int ret = 0, i; 12394 12395 if (!check_digital_port_conflicts(state)) { 12396 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 12397 return -EINVAL; 12398 } 12399 12400 intel_state->modeset = true; 12401 intel_state->active_crtcs = dev_priv->active_crtcs; 12402 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12403 intel_state->cdclk.actual = dev_priv->cdclk.actual; 12404 12405 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12406 if (new_crtc_state->active) 12407 intel_state->active_crtcs |= 1 << i; 12408 else 12409 intel_state->active_crtcs &= ~(1 << i); 12410 12411 if (old_crtc_state->active != new_crtc_state->active) 12412 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 12413 } 12414 12415 /* 12416 * See if the config requires any additional preparation, e.g. 12417 * to adjust global state with pipes off. We need to do this 12418 * here so we can get the modeset_pipe updated config for the new 12419 * mode set on this crtc. For other crtcs we need to use the 12420 * adjusted_mode bits in the crtc directly. 12421 */ 12422 if (dev_priv->display.modeset_calc_cdclk) { 12423 ret = dev_priv->display.modeset_calc_cdclk(state); 12424 if (ret < 0) 12425 return ret; 12426 12427 /* 12428 * Writes to dev_priv->cdclk.logical must protected by 12429 * holding all the crtc locks, even if we don't end up 12430 * touching the hardware 12431 */ 12432 if (!intel_cdclk_state_compare(&dev_priv->cdclk.logical, 12433 &intel_state->cdclk.logical)) { 12434 ret = intel_lock_all_pipes(state); 12435 if (ret < 0) 12436 return ret; 12437 } 12438 12439 /* All pipes must be switched off while we change the cdclk. */ 12440 if (!intel_cdclk_state_compare(&dev_priv->cdclk.actual, 12441 &intel_state->cdclk.actual)) { 12442 ret = intel_modeset_all_pipes(state); 12443 if (ret < 0) 12444 return ret; 12445 } 12446 12447 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", 12448 intel_state->cdclk.logical.cdclk, 12449 intel_state->cdclk.actual.cdclk); 12450 } else { 12451 to_intel_atomic_state(state)->cdclk.logical = dev_priv->cdclk.logical; 12452 } 12453 12454 intel_modeset_clear_plls(state); 12455 12456 if (IS_HASWELL(dev_priv)) 12457 return haswell_mode_set_planes_workaround(state); 12458 12459 return 0; 12460 } 12461 12462 /* 12463 * Handle calculation of various watermark data at the end of the atomic check 12464 * phase. The code here should be run after the per-crtc and per-plane 'check' 12465 * handlers to ensure that all derived state has been updated. 12466 */ 12467 static int calc_watermark_data(struct drm_atomic_state *state) 12468 { 12469 struct drm_device *dev = state->dev; 12470 struct drm_i915_private *dev_priv = to_i915(dev); 12471 12472 /* Is there platform-specific watermark information to calculate? */ 12473 if (dev_priv->display.compute_global_watermarks) 12474 return dev_priv->display.compute_global_watermarks(state); 12475 12476 return 0; 12477 } 12478 12479 /** 12480 * intel_atomic_check - validate state object 12481 * @dev: drm device 12482 * @state: state to validate 12483 */ 12484 static int intel_atomic_check(struct drm_device *dev, 12485 struct drm_atomic_state *state) 12486 { 12487 struct drm_i915_private *dev_priv = to_i915(dev); 12488 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12489 struct drm_crtc *crtc; 12490 struct drm_crtc_state *old_crtc_state, *crtc_state; 12491 int ret, i; 12492 bool any_ms = false; 12493 12494 ret = drm_atomic_helper_check_modeset(dev, state); 12495 if (ret) 12496 return ret; 12497 12498 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, crtc_state, i) { 12499 struct intel_crtc_state *pipe_config = 12500 to_intel_crtc_state(crtc_state); 12501 12502 /* Catch I915_MODE_FLAG_INHERITED */ 12503 if (crtc_state->mode.private_flags != old_crtc_state->mode.private_flags) 12504 crtc_state->mode_changed = true; 12505 12506 if (!needs_modeset(crtc_state)) 12507 continue; 12508 12509 if (!crtc_state->enable) { 12510 any_ms = true; 12511 continue; 12512 } 12513 12514 /* FIXME: For only active_changed we shouldn't need to do any 12515 * state recomputation at all. */ 12516 12517 ret = drm_atomic_add_affected_connectors(state, crtc); 12518 if (ret) 12519 return ret; 12520 12521 ret = intel_modeset_pipe_config(crtc, pipe_config); 12522 if (ret) { 12523 intel_dump_pipe_config(to_intel_crtc(crtc), 12524 pipe_config, "[failed]"); 12525 return ret; 12526 } 12527 12528 if (i915.fastboot && 12529 intel_pipe_config_compare(dev_priv, 12530 to_intel_crtc_state(old_crtc_state), 12531 pipe_config, true)) { 12532 crtc_state->mode_changed = false; 12533 pipe_config->update_pipe = true; 12534 } 12535 12536 if (needs_modeset(crtc_state)) 12537 any_ms = true; 12538 12539 ret = drm_atomic_add_affected_planes(state, crtc); 12540 if (ret) 12541 return ret; 12542 12543 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 12544 needs_modeset(crtc_state) ? 12545 "[modeset]" : "[fastset]"); 12546 } 12547 12548 if (any_ms) { 12549 ret = intel_modeset_checks(state); 12550 12551 if (ret) 12552 return ret; 12553 } else { 12554 intel_state->cdclk.logical = dev_priv->cdclk.logical; 12555 } 12556 12557 ret = drm_atomic_helper_check_planes(dev, state); 12558 if (ret) 12559 return ret; 12560 12561 intel_fbc_choose_crtc(dev_priv, state); 12562 return calc_watermark_data(state); 12563 } 12564 12565 static int intel_atomic_prepare_commit(struct drm_device *dev, 12566 struct drm_atomic_state *state) 12567 { 12568 struct drm_i915_private *dev_priv = to_i915(dev); 12569 struct drm_crtc_state *crtc_state; 12570 struct drm_crtc *crtc; 12571 int i, ret; 12572 12573 for_each_new_crtc_in_state(state, crtc, crtc_state, i) { 12574 if (state->legacy_cursor_update) 12575 continue; 12576 12577 ret = intel_crtc_wait_for_pending_flips(crtc); 12578 if (ret) 12579 return ret; 12580 12581 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 12582 flush_workqueue(dev_priv->wq); 12583 } 12584 12585 ret = mutex_lock_interruptible(&dev->struct_mutex); 12586 if (ret) 12587 return ret; 12588 12589 ret = drm_atomic_helper_prepare_planes(dev, state); 12590 mutex_unlock(&dev->struct_mutex); 12591 12592 return ret; 12593 } 12594 12595 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 12596 { 12597 struct drm_device *dev = crtc->base.dev; 12598 12599 if (!dev->max_vblank_count) 12600 return drm_accurate_vblank_count(&crtc->base); 12601 12602 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12603 } 12604 12605 static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 12606 struct drm_i915_private *dev_priv, 12607 unsigned crtc_mask) 12608 { 12609 unsigned last_vblank_count[I915_MAX_PIPES]; 12610 enum i915_pipe pipe; 12611 int ret; 12612 12613 if (!crtc_mask) 12614 return; 12615 12616 for_each_pipe(dev_priv, pipe) { 12617 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12618 pipe); 12619 12620 if (!((1 << pipe) & crtc_mask)) 12621 continue; 12622 12623 ret = drm_crtc_vblank_get(&crtc->base); 12624 if (WARN_ON(ret != 0)) { 12625 crtc_mask &= ~(1 << pipe); 12626 continue; 12627 } 12628 12629 last_vblank_count[pipe] = drm_crtc_vblank_count(&crtc->base); 12630 } 12631 12632 for_each_pipe(dev_priv, pipe) { 12633 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, 12634 pipe); 12635 long lret; 12636 12637 if (!((1 << pipe) & crtc_mask)) 12638 continue; 12639 12640 lret = wait_event_timeout(dev->vblank[pipe].queue, 12641 last_vblank_count[pipe] != 12642 drm_crtc_vblank_count(&crtc->base), 12643 msecs_to_jiffies(50)); 12644 12645 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); 12646 12647 drm_crtc_vblank_put(&crtc->base); 12648 } 12649 } 12650 12651 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 12652 { 12653 /* fb updated, need to unpin old fb */ 12654 if (crtc_state->fb_changed) 12655 return true; 12656 12657 /* wm changes, need vblank before final wm's */ 12658 if (crtc_state->update_wm_post) 12659 return true; 12660 12661 if (crtc_state->wm.need_postvbl_update) 12662 return true; 12663 12664 return false; 12665 } 12666 12667 static void intel_update_crtc(struct drm_crtc *crtc, 12668 struct drm_atomic_state *state, 12669 struct drm_crtc_state *old_crtc_state, 12670 struct drm_crtc_state *new_crtc_state, 12671 unsigned int *crtc_vblank_mask) 12672 { 12673 struct drm_device *dev = crtc->dev; 12674 struct drm_i915_private *dev_priv = to_i915(dev); 12675 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12676 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state); 12677 bool modeset = needs_modeset(new_crtc_state); 12678 12679 if (modeset) { 12680 update_scanline_offset(intel_crtc); 12681 dev_priv->display.crtc_enable(pipe_config, state); 12682 } else { 12683 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12684 pipe_config); 12685 } 12686 12687 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12688 intel_fbc_enable( 12689 intel_crtc, pipe_config, 12690 to_intel_plane_state(crtc->primary->state)); 12691 } 12692 12693 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 12694 12695 if (needs_vblank_wait(pipe_config)) 12696 *crtc_vblank_mask |= drm_crtc_mask(crtc); 12697 } 12698 12699 static void intel_update_crtcs(struct drm_atomic_state *state, 12700 unsigned int *crtc_vblank_mask) 12701 { 12702 struct drm_crtc *crtc; 12703 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12704 int i; 12705 12706 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12707 if (!new_crtc_state->active) 12708 continue; 12709 12710 intel_update_crtc(crtc, state, old_crtc_state, 12711 new_crtc_state, crtc_vblank_mask); 12712 } 12713 } 12714 12715 static void skl_update_crtcs(struct drm_atomic_state *state, 12716 unsigned int *crtc_vblank_mask) 12717 { 12718 struct drm_i915_private *dev_priv = to_i915(state->dev); 12719 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12720 struct drm_crtc *crtc; 12721 struct intel_crtc *intel_crtc; 12722 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12723 struct intel_crtc_state *cstate; 12724 unsigned int updated = 0; 12725 bool progress; 12726 enum i915_pipe pipe; 12727 int i; 12728 12729 const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; 12730 12731 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) 12732 /* ignore allocations for crtc's that have been turned off. */ 12733 if (new_crtc_state->active) 12734 entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; 12735 12736 /* 12737 * Whenever the number of active pipes changes, we need to make sure we 12738 * update the pipes in the right order so that their ddb allocations 12739 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 12740 * cause pipe underruns and other bad stuff. 12741 */ 12742 do { 12743 progress = false; 12744 12745 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12746 bool vbl_wait = false; 12747 unsigned int cmask = drm_crtc_mask(crtc); 12748 12749 intel_crtc = to_intel_crtc(crtc); 12750 cstate = to_intel_crtc_state(crtc->state); 12751 pipe = intel_crtc->pipe; 12752 12753 if (updated & cmask || !cstate->base.active) 12754 continue; 12755 12756 if (skl_ddb_allocation_overlaps(entries, &cstate->wm.skl.ddb, i)) 12757 continue; 12758 12759 updated |= cmask; 12760 entries[i] = &cstate->wm.skl.ddb; 12761 12762 /* 12763 * If this is an already active pipe, it's DDB changed, 12764 * and this isn't the last pipe that needs updating 12765 * then we need to wait for a vblank to pass for the 12766 * new ddb allocation to take effect. 12767 */ 12768 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb, 12769 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) && 12770 !new_crtc_state->active_changed && 12771 intel_state->wm_results.dirty_pipes != updated) 12772 vbl_wait = true; 12773 12774 intel_update_crtc(crtc, state, old_crtc_state, 12775 new_crtc_state, crtc_vblank_mask); 12776 12777 if (vbl_wait) 12778 intel_wait_for_vblank(dev_priv, pipe); 12779 12780 progress = true; 12781 } 12782 } while (progress); 12783 } 12784 12785 static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) 12786 { 12787 struct intel_atomic_state *state, *next; 12788 struct llist_node *freed; 12789 12790 freed = llist_del_all(&dev_priv->atomic_helper.free_list); 12791 llist_for_each_entry_safe(state, next, freed, freed) 12792 drm_atomic_state_put(&state->base); 12793 } 12794 12795 static void intel_atomic_helper_free_state_worker(struct work_struct *work) 12796 { 12797 struct drm_i915_private *dev_priv = 12798 container_of(work, typeof(*dev_priv), atomic_helper.free_work); 12799 12800 intel_atomic_helper_free_state(dev_priv); 12801 } 12802 12803 static void intel_atomic_commit_tail(struct drm_atomic_state *state) 12804 { 12805 struct drm_device *dev = state->dev; 12806 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 12807 struct drm_i915_private *dev_priv = to_i915(dev); 12808 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 12809 struct drm_crtc *crtc; 12810 struct intel_crtc_state *intel_cstate; 12811 bool hw_check = intel_state->modeset; 12812 u64 put_domains[I915_MAX_PIPES] = {}; 12813 unsigned crtc_vblank_mask = 0; 12814 int i; 12815 12816 drm_atomic_helper_wait_for_dependencies(state); 12817 12818 if (intel_state->modeset) 12819 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 12820 12821 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12822 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12823 12824 if (needs_modeset(new_crtc_state) || 12825 to_intel_crtc_state(new_crtc_state)->update_pipe) { 12826 hw_check = true; 12827 12828 put_domains[to_intel_crtc(crtc)->pipe] = 12829 modeset_get_crtc_power_domains(crtc, 12830 to_intel_crtc_state(new_crtc_state)); 12831 } 12832 12833 if (!needs_modeset(new_crtc_state)) 12834 continue; 12835 12836 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state), 12837 to_intel_crtc_state(new_crtc_state)); 12838 12839 if (old_crtc_state->active) { 12840 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 12841 dev_priv->display.crtc_disable(to_intel_crtc_state(old_crtc_state), state); 12842 intel_crtc->active = false; 12843 intel_fbc_disable(intel_crtc); 12844 intel_disable_shared_dpll(intel_crtc); 12845 12846 /* 12847 * Underruns don't always raise 12848 * interrupts, so check manually. 12849 */ 12850 intel_check_cpu_fifo_underruns(dev_priv); 12851 intel_check_pch_fifo_underruns(dev_priv); 12852 12853 if (!crtc->state->active) { 12854 /* 12855 * Make sure we don't call initial_watermarks 12856 * for ILK-style watermark updates. 12857 * 12858 * No clue what this is supposed to achieve. 12859 */ 12860 if (INTEL_GEN(dev_priv) >= 9) 12861 dev_priv->display.initial_watermarks(intel_state, 12862 to_intel_crtc_state(crtc->state)); 12863 } 12864 } 12865 } 12866 12867 /* Only after disabling all output pipelines that will be changed can we 12868 * update the the output configuration. */ 12869 intel_modeset_update_crtc_state(state); 12870 12871 if (intel_state->modeset) { 12872 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 12873 12874 intel_set_cdclk(dev_priv, &dev_priv->cdclk.actual); 12875 12876 /* 12877 * SKL workaround: bspec recommends we disable the SAGV when we 12878 * have more then one pipe enabled 12879 */ 12880 if (!intel_can_enable_sagv(state)) 12881 intel_disable_sagv(dev_priv); 12882 12883 intel_modeset_verify_disabled(dev, state); 12884 } 12885 12886 /* Complete the events for pipes that have now been disabled */ 12887 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12888 bool modeset = needs_modeset(new_crtc_state); 12889 12890 /* Complete events for now disable pipes here. */ 12891 if (modeset && !new_crtc_state->active && new_crtc_state->event) { 12892 spin_lock_irq(&dev->event_lock); 12893 drm_crtc_send_vblank_event(crtc, new_crtc_state->event); 12894 spin_unlock_irq(&dev->event_lock); 12895 12896 new_crtc_state->event = NULL; 12897 } 12898 } 12899 12900 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12901 dev_priv->display.update_crtcs(state, &crtc_vblank_mask); 12902 12903 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 12904 * already, but still need the state for the delayed optimization. To 12905 * fix this: 12906 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 12907 * - schedule that vblank worker _before_ calling hw_done 12908 * - at the start of commit_tail, cancel it _synchrously 12909 * - switch over to the vblank wait helper in the core after that since 12910 * we don't need out special handling any more. 12911 */ 12912 if (!state->legacy_cursor_update) 12913 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 12914 12915 /* 12916 * Now that the vblank has passed, we can go ahead and program the 12917 * optimal watermarks on platforms that need two-step watermark 12918 * programming. 12919 * 12920 * TODO: Move this (and other cleanup) to an async worker eventually. 12921 */ 12922 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { 12923 intel_cstate = to_intel_crtc_state(new_crtc_state); 12924 12925 if (dev_priv->display.optimize_watermarks) 12926 dev_priv->display.optimize_watermarks(intel_state, 12927 intel_cstate); 12928 } 12929 12930 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 12931 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 12932 12933 if (put_domains[i]) 12934 modeset_put_power_domains(dev_priv, put_domains[i]); 12935 12936 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); 12937 } 12938 12939 if (intel_state->modeset && intel_can_enable_sagv(state)) 12940 intel_enable_sagv(dev_priv); 12941 12942 drm_atomic_helper_commit_hw_done(state); 12943 12944 if (intel_state->modeset) 12945 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 12946 12947 mutex_lock(&dev->struct_mutex); 12948 drm_atomic_helper_cleanup_planes(dev, state); 12949 mutex_unlock(&dev->struct_mutex); 12950 12951 drm_atomic_helper_commit_cleanup_done(state); 12952 12953 drm_atomic_state_put(state); 12954 12955 /* As one of the primary mmio accessors, KMS has a high likelihood 12956 * of triggering bugs in unclaimed access. After we finish 12957 * modesetting, see if an error has been flagged, and if so 12958 * enable debugging for the next modeset - and hope we catch 12959 * the culprit. 12960 * 12961 * XXX note that we assume display power is on at this point. 12962 * This might hold true now but we need to add pm helper to check 12963 * unclaimed only when the hardware is on, as atomic commits 12964 * can happen also when the device is completely off. 12965 */ 12966 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 12967 12968 intel_atomic_helper_free_state(dev_priv); 12969 } 12970 12971 static void intel_atomic_commit_work(struct work_struct *work) 12972 { 12973 struct drm_atomic_state *state = 12974 container_of(work, struct drm_atomic_state, commit_work); 12975 12976 intel_atomic_commit_tail(state); 12977 } 12978 12979 static int __i915_sw_fence_call 12980 intel_atomic_commit_ready(struct i915_sw_fence *fence, 12981 enum i915_sw_fence_notify notify) 12982 { 12983 struct intel_atomic_state *state = 12984 container_of(fence, struct intel_atomic_state, commit_ready); 12985 12986 switch (notify) { 12987 case FENCE_COMPLETE: 12988 if (state->base.commit_work.func) 12989 queue_work(system_unbound_wq, &state->base.commit_work); 12990 break; 12991 12992 case FENCE_FREE: 12993 { 12994 struct intel_atomic_helper *helper = 12995 &to_i915(state->base.dev)->atomic_helper; 12996 12997 if (llist_add(&state->freed, &helper->free_list)) 12998 schedule_work(&helper->free_work); 12999 break; 13000 } 13001 } 13002 13003 return NOTIFY_DONE; 13004 } 13005 13006 static void intel_atomic_track_fbs(struct drm_atomic_state *state) 13007 { 13008 struct drm_plane_state *old_plane_state, *new_plane_state; 13009 struct drm_plane *plane; 13010 int i; 13011 13012 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) 13013 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb), 13014 intel_fb_obj(new_plane_state->fb), 13015 to_intel_plane(plane)->frontbuffer_bit); 13016 } 13017 13018 /** 13019 * intel_atomic_commit - commit validated state object 13020 * @dev: DRM device 13021 * @state: the top-level driver state object 13022 * @nonblock: nonblocking commit 13023 * 13024 * This function commits a top-level state object that has been validated 13025 * with drm_atomic_helper_check(). 13026 * 13027 * RETURNS 13028 * Zero for success or -errno. 13029 */ 13030 static int intel_atomic_commit(struct drm_device *dev, 13031 struct drm_atomic_state *state, 13032 bool nonblock) 13033 { 13034 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13035 struct drm_i915_private *dev_priv = to_i915(dev); 13036 int ret = 0; 13037 13038 ret = drm_atomic_helper_setup_commit(state, nonblock); 13039 if (ret) 13040 return ret; 13041 13042 drm_atomic_state_get(state); 13043 i915_sw_fence_init(&intel_state->commit_ready, 13044 intel_atomic_commit_ready); 13045 13046 ret = intel_atomic_prepare_commit(dev, state); 13047 if (ret) { 13048 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13049 i915_sw_fence_commit(&intel_state->commit_ready); 13050 return ret; 13051 } 13052 13053 /* 13054 * The intel_legacy_cursor_update() fast path takes care 13055 * of avoiding the vblank waits for simple cursor 13056 * movement and flips. For cursor on/off and size changes, 13057 * we want to perform the vblank waits so that watermark 13058 * updates happen during the correct frames. Gen9+ have 13059 * double buffered watermarks and so shouldn't need this. 13060 * 13061 * Do this after drm_atomic_helper_setup_commit() and 13062 * intel_atomic_prepare_commit() because we still want 13063 * to skip the flip and fb cleanup waits. Although that 13064 * does risk yanking the mapping from under the display 13065 * engine. 13066 * 13067 * FIXME doing watermarks and fb cleanup from a vblank worker 13068 * (assuming we had any) would solve these problems. 13069 */ 13070 if (INTEL_GEN(dev_priv) < 9) 13071 state->legacy_cursor_update = false; 13072 13073 drm_atomic_helper_swap_state(state, true); 13074 dev_priv->wm.distrust_bios_wm = false; 13075 intel_shared_dpll_swap_state(state); 13076 intel_atomic_track_fbs(state); 13077 13078 if (intel_state->modeset) { 13079 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13080 sizeof(intel_state->min_pixclk)); 13081 dev_priv->active_crtcs = intel_state->active_crtcs; 13082 dev_priv->cdclk.logical = intel_state->cdclk.logical; 13083 dev_priv->cdclk.actual = intel_state->cdclk.actual; 13084 } 13085 13086 drm_atomic_state_get(state); 13087 INIT_WORK(&state->commit_work, 13088 nonblock ? intel_atomic_commit_work : NULL); 13089 13090 i915_sw_fence_commit(&intel_state->commit_ready); 13091 if (!nonblock) { 13092 i915_sw_fence_wait(&intel_state->commit_ready); 13093 intel_atomic_commit_tail(state); 13094 } 13095 13096 return 0; 13097 } 13098 13099 void intel_crtc_restore_mode(struct drm_crtc *crtc) 13100 { 13101 struct drm_device *dev = crtc->dev; 13102 struct drm_atomic_state *state; 13103 struct drm_crtc_state *crtc_state; 13104 int ret; 13105 13106 state = drm_atomic_state_alloc(dev); 13107 if (!state) { 13108 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", 13109 crtc->base.id, crtc->name); 13110 return; 13111 } 13112 13113 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; 13114 13115 retry: 13116 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13117 ret = PTR_ERR_OR_ZERO(crtc_state); 13118 if (!ret) { 13119 if (!crtc_state->active) 13120 goto out; 13121 13122 crtc_state->mode_changed = true; 13123 ret = drm_atomic_commit(state); 13124 } 13125 13126 if (ret == -EDEADLK) { 13127 drm_atomic_state_clear(state); 13128 drm_modeset_backoff(state->acquire_ctx); 13129 goto retry; 13130 } 13131 13132 out: 13133 drm_atomic_state_put(state); 13134 } 13135 13136 static const struct drm_crtc_funcs intel_crtc_funcs = { 13137 .gamma_set = drm_atomic_helper_legacy_gamma_set, 13138 .set_config = drm_atomic_helper_set_config, 13139 .set_property = drm_atomic_helper_crtc_set_property, 13140 .destroy = intel_crtc_destroy, 13141 .page_flip = drm_atomic_helper_page_flip, 13142 .atomic_duplicate_state = intel_crtc_duplicate_state, 13143 .atomic_destroy_state = intel_crtc_destroy_state, 13144 .set_crc_source = intel_crtc_set_crc_source, 13145 }; 13146 13147 /** 13148 * intel_prepare_plane_fb - Prepare fb for usage on plane 13149 * @plane: drm plane to prepare for 13150 * @fb: framebuffer to prepare for presentation 13151 * 13152 * Prepares a framebuffer for usage on a display plane. Generally this 13153 * involves pinning the underlying object and updating the frontbuffer tracking 13154 * bits. Some older platforms need special physical address handling for 13155 * cursor planes. 13156 * 13157 * Must be called with struct_mutex held. 13158 * 13159 * Returns 0 on success, negative error code on failure. 13160 */ 13161 int 13162 intel_prepare_plane_fb(struct drm_plane *plane, 13163 struct drm_plane_state *new_state) 13164 { 13165 struct intel_atomic_state *intel_state = 13166 to_intel_atomic_state(new_state->state); 13167 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13168 struct drm_framebuffer *fb = new_state->fb; 13169 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13170 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13171 int ret; 13172 13173 if (obj) { 13174 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13175 INTEL_INFO(dev_priv)->cursor_needs_physical) { 13176 const int align = IS_I830(dev_priv) ? 16 * 1024 : 256; 13177 13178 ret = i915_gem_object_attach_phys(obj, align); 13179 if (ret) { 13180 DRM_DEBUG_KMS("failed to attach phys object\n"); 13181 return ret; 13182 } 13183 } else { 13184 struct i915_vma *vma; 13185 13186 vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 13187 if (IS_ERR(vma)) { 13188 DRM_DEBUG_KMS("failed to pin object\n"); 13189 return PTR_ERR(vma); 13190 } 13191 13192 to_intel_plane_state(new_state)->vma = vma; 13193 } 13194 } 13195 13196 if (!obj && !old_obj) 13197 return 0; 13198 13199 if (old_obj) { 13200 struct drm_crtc_state *crtc_state = 13201 drm_atomic_get_existing_crtc_state(new_state->state, 13202 plane->state->crtc); 13203 13204 /* Big Hammer, we also need to ensure that any pending 13205 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13206 * current scanout is retired before unpinning the old 13207 * framebuffer. Note that we rely on userspace rendering 13208 * into the buffer attached to the pipe they are waiting 13209 * on. If not, userspace generates a GPU hang with IPEHR 13210 * point to the MI_WAIT_FOR_EVENT. 13211 * 13212 * This should only fail upon a hung GPU, in which case we 13213 * can safely continue. 13214 */ 13215 if (needs_modeset(crtc_state)) { 13216 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13217 old_obj->resv, NULL, 13218 false, 0, 13219 GFP_KERNEL); 13220 if (ret < 0) 13221 return ret; 13222 } 13223 } 13224 13225 if (new_state->fence) { /* explicit fencing */ 13226 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, 13227 new_state->fence, 13228 I915_FENCE_TIMEOUT, 13229 GFP_KERNEL); 13230 if (ret < 0) 13231 return ret; 13232 } 13233 13234 if (!obj) 13235 return 0; 13236 13237 if (!new_state->fence) { /* implicit fencing */ 13238 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, 13239 obj->resv, NULL, 13240 false, I915_FENCE_TIMEOUT, 13241 GFP_KERNEL); 13242 if (ret < 0) 13243 return ret; 13244 13245 i915_gem_object_wait_priority(obj, 0, I915_PRIORITY_DISPLAY); 13246 } 13247 13248 return 0; 13249 } 13250 13251 /** 13252 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13253 * @plane: drm plane to clean up for 13254 * @fb: old framebuffer that was on plane 13255 * 13256 * Cleans up a framebuffer that has just been removed from a plane. 13257 * 13258 * Must be called with struct_mutex held. 13259 */ 13260 void 13261 intel_cleanup_plane_fb(struct drm_plane *plane, 13262 struct drm_plane_state *old_state) 13263 { 13264 struct i915_vma *vma; 13265 13266 /* Should only be called after a successful intel_prepare_plane_fb()! */ 13267 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 13268 if (vma) 13269 intel_unpin_fb_vma(vma); 13270 } 13271 13272 int 13273 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13274 { 13275 struct drm_i915_private *dev_priv; 13276 int max_scale; 13277 int crtc_clock, max_dotclk; 13278 13279 if (!intel_crtc || !crtc_state->base.enable) 13280 return DRM_PLANE_HELPER_NO_SCALING; 13281 13282 dev_priv = to_i915(intel_crtc->base.dev); 13283 13284 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13285 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; 13286 13287 if (IS_GEMINILAKE(dev_priv)) 13288 max_dotclk *= 2; 13289 13290 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) 13291 return DRM_PLANE_HELPER_NO_SCALING; 13292 13293 /* 13294 * skl max scale is lower of: 13295 * close to 3 but not 3, -1 is for that purpose 13296 * or 13297 * cdclk/crtc_clock 13298 */ 13299 max_scale = min((1 << 16) * 3 - 1, 13300 (1 << 8) * ((max_dotclk << 8) / crtc_clock)); 13301 13302 return max_scale; 13303 } 13304 13305 static int 13306 intel_check_primary_plane(struct drm_plane *plane, 13307 struct intel_crtc_state *crtc_state, 13308 struct intel_plane_state *state) 13309 { 13310 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13311 struct drm_crtc *crtc = state->base.crtc; 13312 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13313 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13314 bool can_position = false; 13315 int ret; 13316 13317 if (INTEL_GEN(dev_priv) >= 9) { 13318 /* use scaler when colorkey is not required */ 13319 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 13320 min_scale = 1; 13321 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13322 } 13323 can_position = true; 13324 } 13325 13326 ret = drm_plane_helper_check_state(&state->base, 13327 &state->clip, 13328 min_scale, max_scale, 13329 can_position, true); 13330 if (ret) 13331 return ret; 13332 13333 if (!state->base.fb) 13334 return 0; 13335 13336 if (INTEL_GEN(dev_priv) >= 9) { 13337 ret = skl_check_plane_surface(state); 13338 if (ret) 13339 return ret; 13340 13341 state->ctl = skl_plane_ctl(crtc_state, state); 13342 } else { 13343 ret = i9xx_check_plane_surface(state); 13344 if (ret) 13345 return ret; 13346 13347 state->ctl = i9xx_plane_ctl(crtc_state, state); 13348 } 13349 13350 return 0; 13351 } 13352 13353 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13354 struct drm_crtc_state *old_crtc_state) 13355 { 13356 struct drm_device *dev = crtc->dev; 13357 struct drm_i915_private *dev_priv = to_i915(dev); 13358 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13359 struct intel_crtc_state *intel_cstate = 13360 to_intel_crtc_state(crtc->state); 13361 struct intel_crtc_state *old_intel_cstate = 13362 to_intel_crtc_state(old_crtc_state); 13363 struct intel_atomic_state *old_intel_state = 13364 to_intel_atomic_state(old_crtc_state->state); 13365 bool modeset = needs_modeset(crtc->state); 13366 13367 if (!modeset && 13368 (intel_cstate->base.color_mgmt_changed || 13369 intel_cstate->update_pipe)) { 13370 intel_color_set_csc(crtc->state); 13371 intel_color_load_luts(crtc->state); 13372 } 13373 13374 /* Perform vblank evasion around commit operation */ 13375 intel_pipe_update_start(intel_crtc); 13376 13377 if (modeset) 13378 goto out; 13379 13380 if (intel_cstate->update_pipe) 13381 intel_update_pipe_config(intel_crtc, old_intel_cstate); 13382 else if (INTEL_GEN(dev_priv) >= 9) 13383 skl_detach_scalers(intel_crtc); 13384 13385 out: 13386 if (dev_priv->display.atomic_update_watermarks) 13387 dev_priv->display.atomic_update_watermarks(old_intel_state, 13388 intel_cstate); 13389 } 13390 13391 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 13392 struct drm_crtc_state *old_crtc_state) 13393 { 13394 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13395 13396 intel_pipe_update_end(intel_crtc, NULL); 13397 } 13398 13399 /** 13400 * intel_plane_destroy - destroy a plane 13401 * @plane: plane to destroy 13402 * 13403 * Common destruction function for all types of planes (primary, cursor, 13404 * sprite). 13405 */ 13406 void intel_plane_destroy(struct drm_plane *plane) 13407 { 13408 drm_plane_cleanup(plane); 13409 kfree(to_intel_plane(plane)); 13410 } 13411 13412 const struct drm_plane_funcs intel_plane_funcs = { 13413 .update_plane = drm_atomic_helper_update_plane, 13414 .disable_plane = drm_atomic_helper_disable_plane, 13415 .destroy = intel_plane_destroy, 13416 .set_property = drm_atomic_helper_plane_set_property, 13417 .atomic_get_property = intel_plane_atomic_get_property, 13418 .atomic_set_property = intel_plane_atomic_set_property, 13419 .atomic_duplicate_state = intel_plane_duplicate_state, 13420 .atomic_destroy_state = intel_plane_destroy_state, 13421 }; 13422 13423 static int 13424 intel_legacy_cursor_update(struct drm_plane *plane, 13425 struct drm_crtc *crtc, 13426 struct drm_framebuffer *fb, 13427 int crtc_x, int crtc_y, 13428 unsigned int crtc_w, unsigned int crtc_h, 13429 uint32_t src_x, uint32_t src_y, 13430 uint32_t src_w, uint32_t src_h, 13431 struct drm_modeset_acquire_ctx *ctx) 13432 { 13433 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 13434 int ret; 13435 struct drm_plane_state *old_plane_state, *new_plane_state; 13436 struct intel_plane *intel_plane = to_intel_plane(plane); 13437 struct drm_framebuffer *old_fb; 13438 struct drm_crtc_state *crtc_state = crtc->state; 13439 struct i915_vma *old_vma; 13440 13441 /* 13442 * When crtc is inactive or there is a modeset pending, 13443 * wait for it to complete in the slowpath 13444 */ 13445 if (!crtc_state->active || needs_modeset(crtc_state) || 13446 to_intel_crtc_state(crtc_state)->update_pipe) 13447 goto slow; 13448 13449 old_plane_state = plane->state; 13450 13451 /* 13452 * If any parameters change that may affect watermarks, 13453 * take the slowpath. Only changing fb or position should be 13454 * in the fastpath. 13455 */ 13456 if (old_plane_state->crtc != crtc || 13457 old_plane_state->src_w != src_w || 13458 old_plane_state->src_h != src_h || 13459 old_plane_state->crtc_w != crtc_w || 13460 old_plane_state->crtc_h != crtc_h || 13461 !old_plane_state->fb != !fb) 13462 goto slow; 13463 13464 new_plane_state = intel_plane_duplicate_state(plane); 13465 if (!new_plane_state) 13466 return -ENOMEM; 13467 13468 drm_atomic_set_fb_for_plane(new_plane_state, fb); 13469 13470 new_plane_state->src_x = src_x; 13471 new_plane_state->src_y = src_y; 13472 new_plane_state->src_w = src_w; 13473 new_plane_state->src_h = src_h; 13474 new_plane_state->crtc_x = crtc_x; 13475 new_plane_state->crtc_y = crtc_y; 13476 new_plane_state->crtc_w = crtc_w; 13477 new_plane_state->crtc_h = crtc_h; 13478 13479 ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), 13480 to_intel_plane_state(new_plane_state)); 13481 if (ret) 13482 goto out_free; 13483 13484 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); 13485 if (ret) 13486 goto out_free; 13487 13488 if (INTEL_INFO(dev_priv)->cursor_needs_physical) { 13489 int align = IS_I830(dev_priv) ? 16 * 1024 : 256; 13490 13491 ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); 13492 if (ret) { 13493 DRM_DEBUG_KMS("failed to attach phys object\n"); 13494 goto out_unlock; 13495 } 13496 } else { 13497 struct i915_vma *vma; 13498 13499 vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13500 if (IS_ERR(vma)) { 13501 DRM_DEBUG_KMS("failed to pin object\n"); 13502 13503 ret = PTR_ERR(vma); 13504 goto out_unlock; 13505 } 13506 13507 to_intel_plane_state(new_plane_state)->vma = vma; 13508 } 13509 13510 old_fb = old_plane_state->fb; 13511 old_vma = to_intel_plane_state(old_plane_state)->vma; 13512 13513 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), 13514 intel_plane->frontbuffer_bit); 13515 13516 /* Swap plane state */ 13517 new_plane_state->fence = old_plane_state->fence; 13518 *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state); 13519 new_plane_state->fence = NULL; 13520 new_plane_state->fb = old_fb; 13521 to_intel_plane_state(new_plane_state)->vma = old_vma; 13522 13523 if (plane->state->visible) { 13524 trace_intel_update_plane(plane, to_intel_crtc(crtc)); 13525 intel_plane->update_plane(plane, 13526 to_intel_crtc_state(crtc->state), 13527 to_intel_plane_state(plane->state)); 13528 } else { 13529 trace_intel_disable_plane(plane, to_intel_crtc(crtc)); 13530 intel_plane->disable_plane(plane, crtc); 13531 } 13532 13533 intel_cleanup_plane_fb(plane, new_plane_state); 13534 13535 out_unlock: 13536 mutex_unlock(&dev_priv->drm.struct_mutex); 13537 out_free: 13538 intel_plane_destroy_state(plane, new_plane_state); 13539 return ret; 13540 13541 slow: 13542 return drm_atomic_helper_update_plane(plane, crtc, fb, 13543 crtc_x, crtc_y, crtc_w, crtc_h, 13544 src_x, src_y, src_w, src_h, ctx); 13545 } 13546 13547 static const struct drm_plane_funcs intel_cursor_plane_funcs = { 13548 .update_plane = intel_legacy_cursor_update, 13549 .disable_plane = drm_atomic_helper_disable_plane, 13550 .destroy = intel_plane_destroy, 13551 .set_property = drm_atomic_helper_plane_set_property, 13552 .atomic_get_property = intel_plane_atomic_get_property, 13553 .atomic_set_property = intel_plane_atomic_set_property, 13554 .atomic_duplicate_state = intel_plane_duplicate_state, 13555 .atomic_destroy_state = intel_plane_destroy_state, 13556 }; 13557 13558 static struct intel_plane * 13559 intel_primary_plane_create(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 13560 { 13561 struct intel_plane *primary = NULL; 13562 struct intel_plane_state *state = NULL; 13563 const uint32_t *intel_primary_formats; 13564 unsigned int supported_rotations; 13565 unsigned int num_formats; 13566 int ret; 13567 13568 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13569 if (!primary) { 13570 ret = -ENOMEM; 13571 goto fail; 13572 } 13573 13574 state = intel_create_plane_state(&primary->base); 13575 if (!state) { 13576 ret = -ENOMEM; 13577 goto fail; 13578 } 13579 13580 primary->base.state = &state->base; 13581 13582 primary->can_scale = false; 13583 primary->max_downscale = 1; 13584 if (INTEL_GEN(dev_priv) >= 9) { 13585 primary->can_scale = true; 13586 state->scaler_id = -1; 13587 } 13588 primary->pipe = pipe; 13589 /* 13590 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS 13591 * port is hooked to pipe B. Hence we want plane A feeding pipe B. 13592 */ 13593 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4) 13594 primary->plane = (enum plane) !pipe; 13595 else 13596 primary->plane = (enum plane) pipe; 13597 primary->id = PLANE_PRIMARY; 13598 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 13599 primary->check_plane = intel_check_primary_plane; 13600 13601 if (INTEL_GEN(dev_priv) >= 9) { 13602 intel_primary_formats = skl_primary_formats; 13603 num_formats = ARRAY_SIZE(skl_primary_formats); 13604 13605 primary->update_plane = skylake_update_primary_plane; 13606 primary->disable_plane = skylake_disable_primary_plane; 13607 } else if (INTEL_GEN(dev_priv) >= 4) { 13608 intel_primary_formats = i965_primary_formats; 13609 num_formats = ARRAY_SIZE(i965_primary_formats); 13610 13611 primary->update_plane = i9xx_update_primary_plane; 13612 primary->disable_plane = i9xx_disable_primary_plane; 13613 } else { 13614 intel_primary_formats = i8xx_primary_formats; 13615 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13616 13617 primary->update_plane = i9xx_update_primary_plane; 13618 primary->disable_plane = i9xx_disable_primary_plane; 13619 } 13620 13621 if (INTEL_GEN(dev_priv) >= 9) 13622 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13623 0, &intel_plane_funcs, 13624 intel_primary_formats, num_formats, 13625 DRM_PLANE_TYPE_PRIMARY, 13626 "plane 1%c", pipe_name(pipe)); 13627 else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 13628 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13629 0, &intel_plane_funcs, 13630 intel_primary_formats, num_formats, 13631 DRM_PLANE_TYPE_PRIMARY, 13632 "primary %c", pipe_name(pipe)); 13633 else 13634 ret = drm_universal_plane_init(&dev_priv->drm, &primary->base, 13635 0, &intel_plane_funcs, 13636 intel_primary_formats, num_formats, 13637 DRM_PLANE_TYPE_PRIMARY, 13638 "plane %c", plane_name(primary->plane)); 13639 if (ret) 13640 goto fail; 13641 13642 if (INTEL_GEN(dev_priv) >= 9) { 13643 supported_rotations = 13644 DRM_ROTATE_0 | DRM_ROTATE_90 | 13645 DRM_ROTATE_180 | DRM_ROTATE_270; 13646 } else if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { 13647 supported_rotations = 13648 DRM_ROTATE_0 | DRM_ROTATE_180 | 13649 DRM_REFLECT_X; 13650 } else if (INTEL_GEN(dev_priv) >= 4) { 13651 supported_rotations = 13652 DRM_ROTATE_0 | DRM_ROTATE_180; 13653 } else { 13654 supported_rotations = DRM_ROTATE_0; 13655 } 13656 13657 if (INTEL_GEN(dev_priv) >= 4) 13658 drm_plane_create_rotation_property(&primary->base, 13659 DRM_ROTATE_0, 13660 supported_rotations); 13661 13662 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13663 13664 return primary; 13665 13666 fail: 13667 kfree(state); 13668 kfree(primary); 13669 13670 return ERR_PTR(ret); 13671 } 13672 13673 static int 13674 intel_check_cursor_plane(struct drm_plane *plane, 13675 struct intel_crtc_state *crtc_state, 13676 struct intel_plane_state *state) 13677 { 13678 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13679 struct drm_framebuffer *fb = state->base.fb; 13680 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13681 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 13682 unsigned stride; 13683 int ret; 13684 13685 ret = drm_plane_helper_check_state(&state->base, 13686 &state->clip, 13687 DRM_PLANE_HELPER_NO_SCALING, 13688 DRM_PLANE_HELPER_NO_SCALING, 13689 true, true); 13690 if (ret) 13691 return ret; 13692 13693 /* if we want to turn off the cursor ignore width and height */ 13694 if (!obj) 13695 return 0; 13696 13697 /* Check for which cursor types we support */ 13698 if (!cursor_size_ok(dev_priv, state->base.crtc_w, 13699 state->base.crtc_h)) { 13700 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13701 state->base.crtc_w, state->base.crtc_h); 13702 return -EINVAL; 13703 } 13704 13705 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 13706 if (obj->base.size < stride * state->base.crtc_h) { 13707 DRM_DEBUG_KMS("buffer is too small\n"); 13708 return -ENOMEM; 13709 } 13710 13711 if (fb->modifier != DRM_FORMAT_MOD_LINEAR) { 13712 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13713 return -EINVAL; 13714 } 13715 13716 /* 13717 * There's something wrong with the cursor on CHV pipe C. 13718 * If it straddles the left edge of the screen then 13719 * moving it away from the edge or disabling it often 13720 * results in a pipe underrun, and often that can lead to 13721 * dead pipe (constant underrun reported, and it scans 13722 * out just a solid color). To recover from that, the 13723 * display power well must be turned off and on again. 13724 * Refuse the put the cursor into that compromised position. 13725 */ 13726 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && 13727 state->base.visible && state->base.crtc_x < 0) { 13728 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 13729 return -EINVAL; 13730 } 13731 13732 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) 13733 state->ctl = i845_cursor_ctl(crtc_state, state); 13734 else 13735 state->ctl = i9xx_cursor_ctl(crtc_state, state); 13736 13737 return 0; 13738 } 13739 13740 static void 13741 intel_disable_cursor_plane(struct drm_plane *plane, 13742 struct drm_crtc *crtc) 13743 { 13744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13745 13746 intel_crtc->cursor_addr = 0; 13747 intel_crtc_update_cursor(crtc, NULL); 13748 } 13749 13750 static void 13751 intel_update_cursor_plane(struct drm_plane *plane, 13752 const struct intel_crtc_state *crtc_state, 13753 const struct intel_plane_state *state) 13754 { 13755 struct drm_crtc *crtc = crtc_state->base.crtc; 13756 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13757 struct drm_i915_private *dev_priv = to_i915(plane->dev); 13758 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 13759 uint32_t addr; 13760 13761 if (!obj) 13762 addr = 0; 13763 else if (!INTEL_INFO(dev_priv)->cursor_needs_physical) 13764 addr = intel_plane_ggtt_offset(state); 13765 else 13766 addr = obj->phys_handle->busaddr; 13767 13768 intel_crtc->cursor_addr = addr; 13769 intel_crtc_update_cursor(crtc, state); 13770 } 13771 13772 static struct intel_plane * 13773 intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 13774 { 13775 struct intel_plane *cursor = NULL; 13776 struct intel_plane_state *state = NULL; 13777 int ret; 13778 13779 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13780 if (!cursor) { 13781 ret = -ENOMEM; 13782 goto fail; 13783 } 13784 13785 state = intel_create_plane_state(&cursor->base); 13786 if (!state) { 13787 ret = -ENOMEM; 13788 goto fail; 13789 } 13790 13791 cursor->base.state = &state->base; 13792 13793 cursor->can_scale = false; 13794 cursor->max_downscale = 1; 13795 cursor->pipe = pipe; 13796 cursor->plane = pipe; 13797 cursor->id = PLANE_CURSOR; 13798 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 13799 cursor->check_plane = intel_check_cursor_plane; 13800 cursor->update_plane = intel_update_cursor_plane; 13801 cursor->disable_plane = intel_disable_cursor_plane; 13802 13803 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, 13804 0, &intel_cursor_plane_funcs, 13805 intel_cursor_formats, 13806 ARRAY_SIZE(intel_cursor_formats), 13807 DRM_PLANE_TYPE_CURSOR, 13808 "cursor %c", pipe_name(pipe)); 13809 if (ret) 13810 goto fail; 13811 13812 if (INTEL_GEN(dev_priv) >= 4) 13813 drm_plane_create_rotation_property(&cursor->base, 13814 DRM_ROTATE_0, 13815 DRM_ROTATE_0 | 13816 DRM_ROTATE_180); 13817 13818 if (INTEL_GEN(dev_priv) >= 9) 13819 state->scaler_id = -1; 13820 13821 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13822 13823 return cursor; 13824 13825 fail: 13826 kfree(state); 13827 kfree(cursor); 13828 13829 return ERR_PTR(ret); 13830 } 13831 13832 static void intel_crtc_init_scalers(struct intel_crtc *crtc, 13833 struct intel_crtc_state *crtc_state) 13834 { 13835 struct intel_crtc_scaler_state *scaler_state = 13836 &crtc_state->scaler_state; 13837 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 13838 int i; 13839 13840 crtc->num_scalers = dev_priv->info.num_scalers[crtc->pipe]; 13841 if (!crtc->num_scalers) 13842 return; 13843 13844 for (i = 0; i < crtc->num_scalers; i++) { 13845 struct intel_scaler *scaler = &scaler_state->scalers[i]; 13846 13847 scaler->in_use = 0; 13848 scaler->mode = PS_SCALER_MODE_DYN; 13849 } 13850 13851 scaler_state->scaler_id = -1; 13852 } 13853 13854 static int intel_crtc_init(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 13855 { 13856 struct intel_crtc *intel_crtc; 13857 struct intel_crtc_state *crtc_state = NULL; 13858 struct intel_plane *primary = NULL; 13859 struct intel_plane *cursor = NULL; 13860 int sprite, ret; 13861 13862 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13863 if (!intel_crtc) 13864 return -ENOMEM; 13865 13866 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13867 if (!crtc_state) { 13868 ret = -ENOMEM; 13869 goto fail; 13870 } 13871 intel_crtc->config = crtc_state; 13872 intel_crtc->base.state = &crtc_state->base; 13873 crtc_state->base.crtc = &intel_crtc->base; 13874 13875 primary = intel_primary_plane_create(dev_priv, pipe); 13876 if (IS_ERR(primary)) { 13877 ret = PTR_ERR(primary); 13878 goto fail; 13879 } 13880 intel_crtc->plane_ids_mask |= BIT(primary->id); 13881 13882 for_each_sprite(dev_priv, pipe, sprite) { 13883 struct intel_plane *plane; 13884 13885 plane = intel_sprite_plane_create(dev_priv, pipe, sprite); 13886 if (IS_ERR(plane)) { 13887 ret = PTR_ERR(plane); 13888 goto fail; 13889 } 13890 intel_crtc->plane_ids_mask |= BIT(plane->id); 13891 } 13892 13893 cursor = intel_cursor_plane_create(dev_priv, pipe); 13894 if (IS_ERR(cursor)) { 13895 ret = PTR_ERR(cursor); 13896 goto fail; 13897 } 13898 intel_crtc->plane_ids_mask |= BIT(cursor->id); 13899 13900 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base, 13901 &primary->base, &cursor->base, 13902 &intel_crtc_funcs, 13903 "pipe %c", pipe_name(pipe)); 13904 if (ret) 13905 goto fail; 13906 13907 intel_crtc->pipe = pipe; 13908 intel_crtc->plane = primary->plane; 13909 13910 intel_crtc->cursor_base = ~0; 13911 intel_crtc->cursor_cntl = ~0; 13912 intel_crtc->cursor_size = ~0; 13913 13914 /* initialize shared scalers */ 13915 intel_crtc_init_scalers(intel_crtc, crtc_state); 13916 13917 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13918 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13919 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = intel_crtc; 13920 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = intel_crtc; 13921 13922 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13923 13924 intel_color_init(&intel_crtc->base); 13925 13926 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13927 13928 return 0; 13929 13930 fail: 13931 /* 13932 * drm_mode_config_cleanup() will free up any 13933 * crtcs/planes already initialized. 13934 */ 13935 kfree(crtc_state); 13936 kfree(intel_crtc); 13937 13938 return ret; 13939 } 13940 13941 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 13942 { 13943 struct drm_device *dev = connector->base.dev; 13944 13945 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 13946 13947 if (!connector->base.state->crtc) 13948 return INVALID_PIPE; 13949 13950 return to_intel_crtc(connector->base.state->crtc)->pipe; 13951 } 13952 13953 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13954 struct drm_file *file) 13955 { 13956 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13957 struct drm_crtc *drmmode_crtc; 13958 struct intel_crtc *crtc; 13959 13960 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 13961 if (!drmmode_crtc) 13962 return -ENOENT; 13963 13964 crtc = to_intel_crtc(drmmode_crtc); 13965 pipe_from_crtc_id->pipe = crtc->pipe; 13966 13967 return 0; 13968 } 13969 13970 static int intel_encoder_clones(struct intel_encoder *encoder) 13971 { 13972 struct drm_device *dev = encoder->base.dev; 13973 struct intel_encoder *source_encoder; 13974 int index_mask = 0; 13975 int entry = 0; 13976 13977 for_each_intel_encoder(dev, source_encoder) { 13978 if (encoders_cloneable(encoder, source_encoder)) 13979 index_mask |= (1 << entry); 13980 13981 entry++; 13982 } 13983 13984 return index_mask; 13985 } 13986 13987 static bool has_edp_a(struct drm_i915_private *dev_priv) 13988 { 13989 if (!IS_MOBILE(dev_priv)) 13990 return false; 13991 13992 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 13993 return false; 13994 13995 if (IS_GEN5(dev_priv) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13996 return false; 13997 13998 return true; 13999 } 14000 14001 static bool intel_crt_present(struct drm_i915_private *dev_priv) 14002 { 14003 if (INTEL_GEN(dev_priv) >= 9) 14004 return false; 14005 14006 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 14007 return false; 14008 14009 if (IS_CHERRYVIEW(dev_priv)) 14010 return false; 14011 14012 if (HAS_PCH_LPT_H(dev_priv) && 14013 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14014 return false; 14015 14016 /* DDI E can't be used if DDI A requires 4 lanes */ 14017 if (HAS_DDI(dev_priv) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14018 return false; 14019 14020 if (!dev_priv->vbt.int_crt_support) 14021 return false; 14022 14023 return true; 14024 } 14025 14026 void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv) 14027 { 14028 int pps_num; 14029 int pps_idx; 14030 14031 if (HAS_DDI(dev_priv)) 14032 return; 14033 /* 14034 * This w/a is needed at least on CPT/PPT, but to be sure apply it 14035 * everywhere where registers can be write protected. 14036 */ 14037 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14038 pps_num = 2; 14039 else 14040 pps_num = 1; 14041 14042 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) { 14043 u32 val = I915_READ(PP_CONTROL(pps_idx)); 14044 14045 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS; 14046 I915_WRITE(PP_CONTROL(pps_idx), val); 14047 } 14048 } 14049 14050 static void intel_pps_init(struct drm_i915_private *dev_priv) 14051 { 14052 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv)) 14053 dev_priv->pps_mmio_base = PCH_PPS_BASE; 14054 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 14055 dev_priv->pps_mmio_base = VLV_PPS_BASE; 14056 else 14057 dev_priv->pps_mmio_base = PPS_BASE; 14058 14059 intel_pps_unlock_regs_wa(dev_priv); 14060 } 14061 14062 static void intel_setup_outputs(struct drm_i915_private *dev_priv) 14063 { 14064 struct intel_encoder *encoder; 14065 bool dpd_is_edp = false; 14066 14067 intel_pps_init(dev_priv); 14068 14069 /* 14070 * intel_edp_init_connector() depends on this completing first, to 14071 * prevent the registeration of both eDP and LVDS and the incorrect 14072 * sharing of the PPS. 14073 */ 14074 intel_lvds_init(dev_priv); 14075 14076 if (intel_crt_present(dev_priv)) 14077 intel_crt_init(dev_priv); 14078 14079 if (IS_GEN9_LP(dev_priv)) { 14080 /* 14081 * FIXME: Broxton doesn't support port detection via the 14082 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14083 * detect the ports. 14084 */ 14085 intel_ddi_init(dev_priv, PORT_A); 14086 intel_ddi_init(dev_priv, PORT_B); 14087 intel_ddi_init(dev_priv, PORT_C); 14088 14089 intel_dsi_init(dev_priv); 14090 } else if (HAS_DDI(dev_priv)) { 14091 int found; 14092 14093 /* 14094 * Haswell uses DDI functions to detect digital outputs. 14095 * On SKL pre-D0 the strap isn't connected, so we assume 14096 * it's there. 14097 */ 14098 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14099 /* WaIgnoreDDIAStrap: skl */ 14100 if (found || IS_GEN9_BC(dev_priv)) 14101 intel_ddi_init(dev_priv, PORT_A); 14102 14103 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14104 * register */ 14105 found = I915_READ(SFUSE_STRAP); 14106 14107 if (found & SFUSE_STRAP_DDIB_DETECTED) 14108 intel_ddi_init(dev_priv, PORT_B); 14109 if (found & SFUSE_STRAP_DDIC_DETECTED) 14110 intel_ddi_init(dev_priv, PORT_C); 14111 if (found & SFUSE_STRAP_DDID_DETECTED) 14112 intel_ddi_init(dev_priv, PORT_D); 14113 /* 14114 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14115 */ 14116 if (IS_GEN9_BC(dev_priv) && 14117 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14118 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14119 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14120 intel_ddi_init(dev_priv, PORT_E); 14121 14122 } else if (HAS_PCH_SPLIT(dev_priv)) { 14123 int found; 14124 dpd_is_edp = intel_dp_is_edp(dev_priv, PORT_D); 14125 14126 if (has_edp_a(dev_priv)) 14127 intel_dp_init(dev_priv, DP_A, PORT_A); 14128 14129 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14130 /* PCH SDVOB multiplex with HDMIB */ 14131 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B); 14132 if (!found) 14133 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B); 14134 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14135 intel_dp_init(dev_priv, PCH_DP_B, PORT_B); 14136 } 14137 14138 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14139 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C); 14140 14141 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14142 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D); 14143 14144 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14145 intel_dp_init(dev_priv, PCH_DP_C, PORT_C); 14146 14147 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14148 intel_dp_init(dev_priv, PCH_DP_D, PORT_D); 14149 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 14150 bool has_edp, has_port; 14151 14152 /* 14153 * The DP_DETECTED bit is the latched state of the DDC 14154 * SDA pin at boot. However since eDP doesn't require DDC 14155 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14156 * eDP ports may have been muxed to an alternate function. 14157 * Thus we can't rely on the DP_DETECTED bit alone to detect 14158 * eDP ports. Consult the VBT as well as DP_DETECTED to 14159 * detect eDP ports. 14160 * 14161 * Sadly the straps seem to be missing sometimes even for HDMI 14162 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14163 * and VBT for the presence of the port. Additionally we can't 14164 * trust the port type the VBT declares as we've seen at least 14165 * HDMI ports that the VBT claim are DP or eDP. 14166 */ 14167 has_edp = intel_dp_is_edp(dev_priv, PORT_B); 14168 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14169 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14170 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B); 14171 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14172 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B); 14173 14174 has_edp = intel_dp_is_edp(dev_priv, PORT_C); 14175 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 14176 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 14177 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C); 14178 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 14179 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C); 14180 14181 if (IS_CHERRYVIEW(dev_priv)) { 14182 /* 14183 * eDP not supported on port D, 14184 * so no need to worry about it 14185 */ 14186 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 14187 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 14188 intel_dp_init(dev_priv, CHV_DP_D, PORT_D); 14189 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 14190 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D); 14191 } 14192 14193 intel_dsi_init(dev_priv); 14194 } else if (!IS_GEN2(dev_priv) && !IS_PINEVIEW(dev_priv)) { 14195 bool found = false; 14196 14197 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14198 DRM_DEBUG_KMS("probing SDVOB\n"); 14199 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B); 14200 if (!found && IS_G4X(dev_priv)) { 14201 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14202 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B); 14203 } 14204 14205 if (!found && IS_G4X(dev_priv)) 14206 intel_dp_init(dev_priv, DP_B, PORT_B); 14207 } 14208 14209 /* Before G4X SDVOC doesn't have its own detect register */ 14210 14211 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14212 DRM_DEBUG_KMS("probing SDVOC\n"); 14213 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C); 14214 } 14215 14216 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14217 14218 if (IS_G4X(dev_priv)) { 14219 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14220 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C); 14221 } 14222 if (IS_G4X(dev_priv)) 14223 intel_dp_init(dev_priv, DP_C, PORT_C); 14224 } 14225 14226 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED)) 14227 intel_dp_init(dev_priv, DP_D, PORT_D); 14228 } else if (IS_GEN2(dev_priv)) 14229 intel_dvo_init(dev_priv); 14230 14231 if (SUPPORTS_TV(dev_priv)) 14232 intel_tv_init(dev_priv); 14233 14234 intel_psr_init(dev_priv); 14235 14236 for_each_intel_encoder(&dev_priv->drm, encoder) { 14237 encoder->base.possible_crtcs = encoder->crtc_mask; 14238 encoder->base.possible_clones = 14239 intel_encoder_clones(encoder); 14240 } 14241 14242 intel_init_pch_refclk(dev_priv); 14243 14244 drm_helper_move_panel_connectors_to_head(&dev_priv->drm); 14245 } 14246 14247 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14248 { 14249 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14250 14251 drm_framebuffer_cleanup(fb); 14252 14253 i915_gem_object_lock(intel_fb->obj); 14254 WARN_ON(!intel_fb->obj->framebuffer_references--); 14255 i915_gem_object_unlock(intel_fb->obj); 14256 14257 i915_gem_object_put(intel_fb->obj); 14258 14259 kfree(intel_fb); 14260 } 14261 14262 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14263 struct drm_file *file, 14264 unsigned int *handle) 14265 { 14266 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14267 struct drm_i915_gem_object *obj = intel_fb->obj; 14268 14269 if (obj->userptr.mm) { 14270 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14271 return -EINVAL; 14272 } 14273 14274 return drm_gem_handle_create(file, &obj->base, handle); 14275 } 14276 14277 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14278 struct drm_file *file, 14279 unsigned flags, unsigned color, 14280 struct drm_clip_rect *clips, 14281 unsigned num_clips) 14282 { 14283 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14284 14285 i915_gem_object_flush_if_display(obj); 14286 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB); 14287 14288 return 0; 14289 } 14290 14291 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14292 .destroy = intel_user_framebuffer_destroy, 14293 .create_handle = intel_user_framebuffer_create_handle, 14294 .dirty = intel_user_framebuffer_dirty, 14295 }; 14296 14297 static 14298 u32 intel_fb_pitch_limit(struct drm_i915_private *dev_priv, 14299 uint64_t fb_modifier, uint32_t pixel_format) 14300 { 14301 u32 gen = INTEL_GEN(dev_priv); 14302 14303 if (gen >= 9) { 14304 int cpp = drm_format_plane_cpp(pixel_format, 0); 14305 14306 /* "The stride in bytes must not exceed the of the size of 8K 14307 * pixels and 32K bytes." 14308 */ 14309 return min(8192 * cpp, 32768); 14310 } else if (gen >= 5 && !HAS_GMCH_DISPLAY(dev_priv)) { 14311 return 32*1024; 14312 } else if (gen >= 4) { 14313 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14314 return 16*1024; 14315 else 14316 return 32*1024; 14317 } else if (gen >= 3) { 14318 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14319 return 8*1024; 14320 else 14321 return 16*1024; 14322 } else { 14323 /* XXX DSPC is limited to 4k tiled */ 14324 return 8*1024; 14325 } 14326 } 14327 14328 static int intel_framebuffer_init(struct intel_framebuffer *intel_fb, 14329 struct drm_i915_gem_object *obj, 14330 struct drm_mode_fb_cmd2 *mode_cmd) 14331 { 14332 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 14333 struct drm_format_name_buf format_name; 14334 u32 pitch_limit, stride_alignment; 14335 unsigned int tiling, stride; 14336 int ret = -EINVAL; 14337 14338 i915_gem_object_lock(obj); 14339 obj->framebuffer_references++; 14340 tiling = i915_gem_object_get_tiling(obj); 14341 stride = i915_gem_object_get_stride(obj); 14342 i915_gem_object_unlock(obj); 14343 14344 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14345 /* 14346 * If there's a fence, enforce that 14347 * the fb modifier and tiling mode match. 14348 */ 14349 if (tiling != I915_TILING_NONE && 14350 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14351 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n"); 14352 goto err; 14353 } 14354 } else { 14355 if (tiling == I915_TILING_X) { 14356 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14357 } else if (tiling == I915_TILING_Y) { 14358 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n"); 14359 goto err; 14360 } 14361 } 14362 14363 /* Passed in modifier sanity checking. */ 14364 switch (mode_cmd->modifier[0]) { 14365 case I915_FORMAT_MOD_Y_TILED: 14366 case I915_FORMAT_MOD_Yf_TILED: 14367 if (INTEL_GEN(dev_priv) < 9) { 14368 DRM_DEBUG_KMS("Unsupported tiling 0x%llx!\n", 14369 mode_cmd->modifier[0]); 14370 goto err; 14371 } 14372 case DRM_FORMAT_MOD_LINEAR: 14373 case I915_FORMAT_MOD_X_TILED: 14374 break; 14375 default: 14376 DRM_DEBUG_KMS("Unsupported fb modifier 0x%llx!\n", 14377 mode_cmd->modifier[0]); 14378 goto err; 14379 } 14380 14381 /* 14382 * gen2/3 display engine uses the fence if present, 14383 * so the tiling mode must match the fb modifier exactly. 14384 */ 14385 if (INTEL_INFO(dev_priv)->gen < 4 && 14386 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 14387 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 14388 goto err; 14389 } 14390 14391 pitch_limit = intel_fb_pitch_limit(dev_priv, mode_cmd->modifier[0], 14392 mode_cmd->pixel_format); 14393 if (mode_cmd->pitches[0] > pitch_limit) { 14394 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n", 14395 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ? 14396 "tiled" : "linear", 14397 mode_cmd->pitches[0], pitch_limit); 14398 goto err; 14399 } 14400 14401 /* 14402 * If there's a fence, enforce that 14403 * the fb pitch and fence stride match. 14404 */ 14405 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) { 14406 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n", 14407 mode_cmd->pitches[0], stride); 14408 goto err; 14409 } 14410 14411 /* Reject formats not supported by any plane early. */ 14412 switch (mode_cmd->pixel_format) { 14413 case DRM_FORMAT_C8: 14414 case DRM_FORMAT_RGB565: 14415 case DRM_FORMAT_XRGB8888: 14416 case DRM_FORMAT_ARGB8888: 14417 break; 14418 case DRM_FORMAT_XRGB1555: 14419 if (INTEL_GEN(dev_priv) > 3) { 14420 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14421 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14422 goto err; 14423 } 14424 break; 14425 case DRM_FORMAT_ABGR8888: 14426 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) && 14427 INTEL_GEN(dev_priv) < 9) { 14428 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14429 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14430 goto err; 14431 } 14432 break; 14433 case DRM_FORMAT_XBGR8888: 14434 case DRM_FORMAT_XRGB2101010: 14435 case DRM_FORMAT_XBGR2101010: 14436 if (INTEL_GEN(dev_priv) < 4) { 14437 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14438 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14439 goto err; 14440 } 14441 break; 14442 case DRM_FORMAT_ABGR2101010: 14443 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { 14444 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14445 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14446 goto err; 14447 } 14448 break; 14449 case DRM_FORMAT_YUYV: 14450 case DRM_FORMAT_UYVY: 14451 case DRM_FORMAT_YVYU: 14452 case DRM_FORMAT_VYUY: 14453 if (INTEL_GEN(dev_priv) < 5) { 14454 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14455 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14456 goto err; 14457 } 14458 break; 14459 default: 14460 DRM_DEBUG_KMS("unsupported pixel format: %s\n", 14461 drm_get_format_name(mode_cmd->pixel_format, &format_name)); 14462 goto err; 14463 } 14464 14465 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14466 if (mode_cmd->offsets[0] != 0) 14467 goto err; 14468 14469 drm_helper_mode_fill_fb_struct(&dev_priv->drm, 14470 &intel_fb->base, mode_cmd); 14471 14472 stride_alignment = intel_fb_stride_alignment(&intel_fb->base, 0); 14473 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14474 DRM_DEBUG_KMS("pitch (%d) must be at least %u byte aligned\n", 14475 mode_cmd->pitches[0], stride_alignment); 14476 goto err; 14477 } 14478 14479 intel_fb->obj = obj; 14480 14481 ret = intel_fill_fb_info(dev_priv, &intel_fb->base); 14482 if (ret) 14483 goto err; 14484 14485 ret = drm_framebuffer_init(obj->base.dev, 14486 &intel_fb->base, 14487 &intel_fb_funcs); 14488 if (ret) { 14489 DRM_ERROR("framebuffer init failed %d\n", ret); 14490 goto err; 14491 } 14492 14493 return 0; 14494 14495 err: 14496 i915_gem_object_lock(obj); 14497 obj->framebuffer_references--; 14498 i915_gem_object_unlock(obj); 14499 return ret; 14500 } 14501 14502 static struct drm_framebuffer * 14503 intel_user_framebuffer_create(struct drm_device *dev, 14504 struct drm_file *filp, 14505 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14506 { 14507 struct drm_framebuffer *fb; 14508 struct drm_i915_gem_object *obj; 14509 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14510 14511 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]); 14512 if (!obj) 14513 return ERR_PTR(-ENOENT); 14514 14515 fb = intel_framebuffer_create(obj, &mode_cmd); 14516 if (IS_ERR(fb)) 14517 i915_gem_object_put(obj); 14518 14519 return fb; 14520 } 14521 14522 static void intel_atomic_state_free(struct drm_atomic_state *state) 14523 { 14524 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 14525 14526 drm_atomic_state_default_release(state); 14527 14528 i915_sw_fence_fini(&intel_state->commit_ready); 14529 14530 kfree(state); 14531 } 14532 14533 static const struct drm_mode_config_funcs intel_mode_funcs = { 14534 .fb_create = intel_user_framebuffer_create, 14535 .output_poll_changed = intel_fbdev_output_poll_changed, 14536 .atomic_check = intel_atomic_check, 14537 .atomic_commit = intel_atomic_commit, 14538 .atomic_state_alloc = intel_atomic_state_alloc, 14539 .atomic_state_clear = intel_atomic_state_clear, 14540 .atomic_state_free = intel_atomic_state_free, 14541 }; 14542 14543 /** 14544 * intel_init_display_hooks - initialize the display modesetting hooks 14545 * @dev_priv: device private 14546 */ 14547 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 14548 { 14549 intel_init_cdclk_hooks(dev_priv); 14550 14551 if (INTEL_INFO(dev_priv)->gen >= 9) { 14552 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14553 dev_priv->display.get_initial_plane_config = 14554 skylake_get_initial_plane_config; 14555 dev_priv->display.crtc_compute_clock = 14556 haswell_crtc_compute_clock; 14557 dev_priv->display.crtc_enable = haswell_crtc_enable; 14558 dev_priv->display.crtc_disable = haswell_crtc_disable; 14559 } else if (HAS_DDI(dev_priv)) { 14560 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14561 dev_priv->display.get_initial_plane_config = 14562 ironlake_get_initial_plane_config; 14563 dev_priv->display.crtc_compute_clock = 14564 haswell_crtc_compute_clock; 14565 dev_priv->display.crtc_enable = haswell_crtc_enable; 14566 dev_priv->display.crtc_disable = haswell_crtc_disable; 14567 } else if (HAS_PCH_SPLIT(dev_priv)) { 14568 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14569 dev_priv->display.get_initial_plane_config = 14570 ironlake_get_initial_plane_config; 14571 dev_priv->display.crtc_compute_clock = 14572 ironlake_crtc_compute_clock; 14573 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14574 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14575 } else if (IS_CHERRYVIEW(dev_priv)) { 14576 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14577 dev_priv->display.get_initial_plane_config = 14578 i9xx_get_initial_plane_config; 14579 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 14580 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14581 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14582 } else if (IS_VALLEYVIEW(dev_priv)) { 14583 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14584 dev_priv->display.get_initial_plane_config = 14585 i9xx_get_initial_plane_config; 14586 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 14587 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14588 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14589 } else if (IS_G4X(dev_priv)) { 14590 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14591 dev_priv->display.get_initial_plane_config = 14592 i9xx_get_initial_plane_config; 14593 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 14594 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14595 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14596 } else if (IS_PINEVIEW(dev_priv)) { 14597 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14598 dev_priv->display.get_initial_plane_config = 14599 i9xx_get_initial_plane_config; 14600 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 14601 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14602 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14603 } else if (!IS_GEN2(dev_priv)) { 14604 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14605 dev_priv->display.get_initial_plane_config = 14606 i9xx_get_initial_plane_config; 14607 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14608 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14609 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14610 } else { 14611 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14612 dev_priv->display.get_initial_plane_config = 14613 i9xx_get_initial_plane_config; 14614 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 14615 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14616 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14617 } 14618 14619 if (IS_GEN5(dev_priv)) { 14620 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14621 } else if (IS_GEN6(dev_priv)) { 14622 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14623 } else if (IS_IVYBRIDGE(dev_priv)) { 14624 /* FIXME: detect B0+ stepping and use auto training */ 14625 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14626 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 14627 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14628 } 14629 14630 if (dev_priv->info.gen >= 9) 14631 dev_priv->display.update_crtcs = skl_update_crtcs; 14632 else 14633 dev_priv->display.update_crtcs = intel_update_crtcs; 14634 14635 switch (INTEL_INFO(dev_priv)->gen) { 14636 case 2: 14637 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14638 break; 14639 14640 case 3: 14641 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14642 break; 14643 14644 case 4: 14645 case 5: 14646 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14647 break; 14648 14649 case 6: 14650 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14651 break; 14652 case 7: 14653 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14654 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14655 break; 14656 case 9: 14657 /* Drop through - unsupported since execlist only. */ 14658 default: 14659 /* Default just returns -ENODEV to indicate unsupported */ 14660 dev_priv->display.queue_flip = intel_default_queue_flip; 14661 } 14662 } 14663 14664 /* 14665 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 14666 * resume, or other times. This quirk makes sure that's the case for 14667 * affected systems. 14668 */ 14669 static void quirk_pipea_force(struct drm_device *dev) 14670 { 14671 struct drm_i915_private *dev_priv = to_i915(dev); 14672 14673 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 14674 DRM_INFO("applying pipe a force quirk\n"); 14675 } 14676 14677 static void quirk_pipeb_force(struct drm_device *dev) 14678 { 14679 struct drm_i915_private *dev_priv = to_i915(dev); 14680 14681 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 14682 DRM_INFO("applying pipe b force quirk\n"); 14683 } 14684 14685 /* 14686 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14687 */ 14688 static void quirk_ssc_force_disable(struct drm_device *dev) 14689 { 14690 struct drm_i915_private *dev_priv = to_i915(dev); 14691 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14692 DRM_INFO("applying lvds SSC disable quirk\n"); 14693 } 14694 14695 /* 14696 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14697 * brightness value 14698 */ 14699 static void quirk_invert_brightness(struct drm_device *dev) 14700 { 14701 struct drm_i915_private *dev_priv = to_i915(dev); 14702 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14703 DRM_INFO("applying inverted panel brightness quirk\n"); 14704 } 14705 14706 /* Some VBT's incorrectly indicate no backlight is present */ 14707 static void quirk_backlight_present(struct drm_device *dev) 14708 { 14709 struct drm_i915_private *dev_priv = to_i915(dev); 14710 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14711 DRM_INFO("applying backlight present quirk\n"); 14712 } 14713 14714 struct intel_quirk { 14715 int device; 14716 int subsystem_vendor; 14717 int subsystem_device; 14718 void (*hook)(struct drm_device *dev); 14719 }; 14720 14721 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14722 struct intel_dmi_quirk { 14723 void (*hook)(struct drm_device *dev); 14724 const struct dmi_system_id (*dmi_id_list)[]; 14725 }; 14726 14727 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14728 { 14729 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14730 return 1; 14731 } 14732 14733 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14734 { 14735 .dmi_id_list = &(const struct dmi_system_id[]) { 14736 { 14737 .callback = intel_dmi_reverse_brightness, 14738 .ident = "NCR Corporation", 14739 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14740 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14741 }, 14742 }, 14743 { } /* terminating entry */ 14744 }, 14745 .hook = quirk_invert_brightness, 14746 }, 14747 }; 14748 14749 static struct intel_quirk intel_quirks[] = { 14750 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 14751 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 14752 14753 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 14754 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 14755 14756 /* 830 needs to leave pipe A & dpll A up */ 14757 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 14758 14759 /* 830 needs to leave pipe B & dpll B up */ 14760 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 14761 14762 /* Lenovo U160 cannot use SSC on LVDS */ 14763 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14764 14765 /* Sony Vaio Y cannot use SSC on LVDS */ 14766 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14767 14768 /* Acer Aspire 5734Z must invert backlight brightness */ 14769 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14770 14771 /* Acer/eMachines G725 */ 14772 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14773 14774 /* Acer/eMachines e725 */ 14775 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14776 14777 /* Acer/Packard Bell NCL20 */ 14778 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14779 14780 /* Acer Aspire 4736Z */ 14781 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14782 14783 /* Acer Aspire 5336 */ 14784 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14785 14786 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14787 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14788 14789 /* Acer C720 Chromebook (Core i3 4005U) */ 14790 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14791 14792 /* Apple Macbook 2,1 (Core 2 T7400) */ 14793 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14794 14795 /* Apple Macbook 4,1 */ 14796 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 14797 14798 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14799 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14800 14801 /* HP Chromebook 14 (Celeron 2955U) */ 14802 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14803 14804 /* Dell Chromebook 11 */ 14805 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14806 14807 /* Dell Chromebook 11 (2015 version) */ 14808 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 14809 }; 14810 14811 static void intel_init_quirks(struct drm_device *dev) 14812 { 14813 struct pci_dev *d = dev->pdev; 14814 int i; 14815 14816 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14817 struct intel_quirk *q = &intel_quirks[i]; 14818 14819 if (d->device == q->device && 14820 (d->subsystem_vendor == q->subsystem_vendor || 14821 q->subsystem_vendor == PCI_ANY_ID) && 14822 (d->subsystem_device == q->subsystem_device || 14823 q->subsystem_device == PCI_ANY_ID)) 14824 q->hook(dev); 14825 } 14826 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14827 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14828 intel_dmi_quirks[i].hook(dev); 14829 } 14830 } 14831 14832 /* Disable the VGA plane that we never use */ 14833 static void i915_disable_vga(struct drm_i915_private *dev_priv) 14834 { 14835 struct pci_dev *pdev = dev_priv->drm.pdev; 14836 u8 sr1; 14837 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 14838 14839 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14840 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); 14841 outb(SR01, VGA_SR_INDEX); 14842 sr1 = inb(VGA_SR_DATA); 14843 outb(sr1 | 1<<5, VGA_SR_DATA); 14844 vga_put(pdev, VGA_RSRC_LEGACY_IO); 14845 udelay(300); 14846 14847 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 14848 POSTING_READ(vga_reg); 14849 } 14850 14851 void intel_modeset_init_hw(struct drm_device *dev) 14852 { 14853 struct drm_i915_private *dev_priv = to_i915(dev); 14854 14855 intel_update_cdclk(dev_priv); 14856 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; 14857 14858 intel_init_clock_gating(dev_priv); 14859 } 14860 14861 /* 14862 * Calculate what we think the watermarks should be for the state we've read 14863 * out of the hardware and then immediately program those watermarks so that 14864 * we ensure the hardware settings match our internal state. 14865 * 14866 * We can calculate what we think WM's should be by creating a duplicate of the 14867 * current state (which was constructed during hardware readout) and running it 14868 * through the atomic check code to calculate new watermark values in the 14869 * state object. 14870 */ 14871 static void sanitize_watermarks(struct drm_device *dev) 14872 { 14873 struct drm_i915_private *dev_priv = to_i915(dev); 14874 struct drm_atomic_state *state; 14875 struct intel_atomic_state *intel_state; 14876 struct drm_crtc *crtc; 14877 struct drm_crtc_state *cstate; 14878 struct drm_modeset_acquire_ctx ctx; 14879 int ret; 14880 int i; 14881 14882 /* Only supported on platforms that use atomic watermark design */ 14883 if (!dev_priv->display.optimize_watermarks) 14884 return; 14885 14886 /* 14887 * We need to hold connection_mutex before calling duplicate_state so 14888 * that the connector loop is protected. 14889 */ 14890 drm_modeset_acquire_init(&ctx, 0); 14891 retry: 14892 ret = drm_modeset_lock_all_ctx(dev, &ctx); 14893 if (ret == -EDEADLK) { 14894 drm_modeset_backoff(&ctx); 14895 goto retry; 14896 } else if (WARN_ON(ret)) { 14897 goto fail; 14898 } 14899 14900 state = drm_atomic_helper_duplicate_state(dev, &ctx); 14901 if (WARN_ON(IS_ERR(state))) 14902 goto fail; 14903 14904 intel_state = to_intel_atomic_state(state); 14905 14906 /* 14907 * Hardware readout is the only time we don't want to calculate 14908 * intermediate watermarks (since we don't trust the current 14909 * watermarks). 14910 */ 14911 if (!HAS_GMCH_DISPLAY(dev_priv)) 14912 intel_state->skip_intermediate_wm = true; 14913 14914 ret = intel_atomic_check(dev, state); 14915 if (ret) { 14916 /* 14917 * If we fail here, it means that the hardware appears to be 14918 * programmed in a way that shouldn't be possible, given our 14919 * understanding of watermark requirements. This might mean a 14920 * mistake in the hardware readout code or a mistake in the 14921 * watermark calculations for a given platform. Raise a WARN 14922 * so that this is noticeable. 14923 * 14924 * If this actually happens, we'll have to just leave the 14925 * BIOS-programmed watermarks untouched and hope for the best. 14926 */ 14927 WARN(true, "Could not determine valid watermarks for inherited state\n"); 14928 goto put_state; 14929 } 14930 14931 /* Write calculated watermark values back */ 14932 for_each_new_crtc_in_state(state, crtc, cstate, i) { 14933 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 14934 14935 cs->wm.need_postvbl_update = true; 14936 dev_priv->display.optimize_watermarks(intel_state, cs); 14937 } 14938 14939 put_state: 14940 drm_atomic_state_put(state); 14941 fail: 14942 drm_modeset_drop_locks(&ctx); 14943 drm_modeset_acquire_fini(&ctx); 14944 } 14945 14946 int intel_modeset_init(struct drm_device *dev) 14947 { 14948 struct drm_i915_private *dev_priv = to_i915(dev); 14949 struct i915_ggtt *ggtt = &dev_priv->ggtt; 14950 enum i915_pipe pipe; 14951 struct intel_crtc *crtc; 14952 14953 drm_mode_config_init(dev); 14954 14955 dev->mode_config.min_width = 0; 14956 dev->mode_config.min_height = 0; 14957 14958 dev->mode_config.preferred_depth = 24; 14959 dev->mode_config.prefer_shadow = 1; 14960 14961 dev->mode_config.allow_fb_modifiers = true; 14962 14963 dev->mode_config.funcs = &intel_mode_funcs; 14964 14965 INIT_WORK(&dev_priv->atomic_helper.free_work, 14966 intel_atomic_helper_free_state_worker); 14967 14968 intel_init_quirks(dev); 14969 14970 intel_init_pm(dev_priv); 14971 14972 if (INTEL_INFO(dev_priv)->num_pipes == 0) 14973 return 0; 14974 14975 /* 14976 * There may be no VBT; and if the BIOS enabled SSC we can 14977 * just keep using it to avoid unnecessary flicker. Whereas if the 14978 * BIOS isn't using it, don't assume it will work even if the VBT 14979 * indicates as much. 14980 */ 14981 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { 14982 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 14983 DREF_SSC1_ENABLE); 14984 14985 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 14986 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 14987 bios_lvds_use_ssc ? "en" : "dis", 14988 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 14989 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 14990 } 14991 } 14992 14993 if (IS_GEN2(dev_priv)) { 14994 dev->mode_config.max_width = 2048; 14995 dev->mode_config.max_height = 2048; 14996 } else if (IS_GEN3(dev_priv)) { 14997 dev->mode_config.max_width = 4096; 14998 dev->mode_config.max_height = 4096; 14999 } else { 15000 dev->mode_config.max_width = 8192; 15001 dev->mode_config.max_height = 8192; 15002 } 15003 15004 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { 15005 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; 15006 dev->mode_config.cursor_height = 1023; 15007 } else if (IS_GEN2(dev_priv)) { 15008 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15009 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15010 } else { 15011 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15012 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15013 } 15014 15015 dev->mode_config.fb_base = ggtt->mappable_base; 15016 15017 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15018 INTEL_INFO(dev_priv)->num_pipes, 15019 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); 15020 15021 for_each_pipe(dev_priv, pipe) { 15022 int ret; 15023 15024 ret = intel_crtc_init(dev_priv, pipe); 15025 if (ret) { 15026 drm_mode_config_cleanup(dev); 15027 return ret; 15028 } 15029 } 15030 15031 intel_shared_dpll_init(dev); 15032 15033 intel_update_czclk(dev_priv); 15034 intel_modeset_init_hw(dev); 15035 15036 if (dev_priv->max_cdclk_freq == 0) 15037 intel_update_max_cdclk(dev_priv); 15038 15039 /* Just disable it once at startup */ 15040 i915_disable_vga(dev_priv); 15041 intel_setup_outputs(dev_priv); 15042 15043 drm_modeset_lock_all(dev); 15044 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); 15045 drm_modeset_unlock_all(dev); 15046 15047 for_each_intel_crtc(dev, crtc) { 15048 struct intel_initial_plane_config plane_config = {}; 15049 15050 if (!crtc->active) 15051 continue; 15052 15053 /* 15054 * Note that reserving the BIOS fb up front prevents us 15055 * from stuffing other stolen allocations like the ring 15056 * on top. This prevents some ugliness at boot time, and 15057 * can even allow for smooth boot transitions if the BIOS 15058 * fb is large enough for the active pipe configuration. 15059 */ 15060 dev_priv->display.get_initial_plane_config(crtc, 15061 &plane_config); 15062 15063 /* 15064 * If the fb is shared between multiple heads, we'll 15065 * just get the first one. 15066 */ 15067 intel_find_initial_plane_obj(crtc, &plane_config); 15068 } 15069 15070 /* 15071 * Make sure hardware watermarks really match the state we read out. 15072 * Note that we need to do this after reconstructing the BIOS fb's 15073 * since the watermark calculation done here will use pstate->fb. 15074 */ 15075 if (!HAS_GMCH_DISPLAY(dev_priv)) 15076 sanitize_watermarks(dev); 15077 15078 return 0; 15079 } 15080 15081 static void intel_enable_pipe_a(struct drm_device *dev, 15082 struct drm_modeset_acquire_ctx *ctx) 15083 { 15084 struct intel_connector *connector; 15085 struct drm_connector_list_iter conn_iter; 15086 struct drm_connector *crt = NULL; 15087 struct intel_load_detect_pipe load_detect_temp; 15088 int ret; 15089 15090 /* We can't just switch on the pipe A, we need to set things up with a 15091 * proper mode and output configuration. As a gross hack, enable pipe A 15092 * by enabling the load detect pipe once. */ 15093 drm_connector_list_iter_begin(dev, &conn_iter); 15094 for_each_intel_connector_iter(connector, &conn_iter) { 15095 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15096 crt = &connector->base; 15097 break; 15098 } 15099 } 15100 drm_connector_list_iter_end(&conn_iter); 15101 15102 if (!crt) 15103 return; 15104 15105 ret = intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx); 15106 WARN(ret < 0, "All modeset mutexes are locked, but intel_get_load_detect_pipe failed\n"); 15107 15108 if (ret > 0) 15109 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15110 } 15111 15112 static bool 15113 intel_check_plane_mapping(struct intel_crtc *crtc) 15114 { 15115 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 15116 u32 val; 15117 15118 if (INTEL_INFO(dev_priv)->num_pipes == 1) 15119 return true; 15120 15121 val = I915_READ(DSPCNTR(!crtc->plane)); 15122 15123 if ((val & DISPLAY_PLANE_ENABLE) && 15124 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15125 return false; 15126 15127 return true; 15128 } 15129 15130 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15131 { 15132 struct drm_device *dev = crtc->base.dev; 15133 struct intel_encoder *encoder; 15134 15135 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15136 return true; 15137 15138 return false; 15139 } 15140 15141 static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder) 15142 { 15143 struct drm_device *dev = encoder->base.dev; 15144 struct intel_connector *connector; 15145 15146 for_each_connector_on_encoder(dev, &encoder->base, connector) 15147 return connector; 15148 15149 return NULL; 15150 } 15151 15152 static bool has_pch_trancoder(struct drm_i915_private *dev_priv, 15153 enum transcoder pch_transcoder) 15154 { 15155 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 15156 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == TRANSCODER_A); 15157 } 15158 15159 static void intel_sanitize_crtc(struct intel_crtc *crtc, 15160 struct drm_modeset_acquire_ctx *ctx) 15161 { 15162 struct drm_device *dev = crtc->base.dev; 15163 struct drm_i915_private *dev_priv = to_i915(dev); 15164 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15165 15166 /* Clear any frame start delays used for debugging left by the BIOS */ 15167 if (!transcoder_is_dsi(cpu_transcoder)) { 15168 i915_reg_t reg = PIPECONF(cpu_transcoder); 15169 15170 I915_WRITE(reg, 15171 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15172 } 15173 15174 /* restore vblank interrupts to correct state */ 15175 drm_crtc_vblank_reset(&crtc->base); 15176 if (crtc->active) { 15177 struct intel_plane *plane; 15178 15179 drm_crtc_vblank_on(&crtc->base); 15180 15181 /* Disable everything but the primary plane */ 15182 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15183 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15184 continue; 15185 15186 trace_intel_disable_plane(&plane->base, crtc); 15187 plane->disable_plane(&plane->base, &crtc->base); 15188 } 15189 } 15190 15191 /* We need to sanitize the plane -> pipe mapping first because this will 15192 * disable the crtc (and hence change the state) if it is wrong. Note 15193 * that gen4+ has a fixed plane -> pipe mapping. */ 15194 if (INTEL_GEN(dev_priv) < 4 && !intel_check_plane_mapping(crtc)) { 15195 bool plane; 15196 15197 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", 15198 crtc->base.base.id, crtc->base.name); 15199 15200 /* Pipe has the wrong plane attached and the plane is active. 15201 * Temporarily change the plane mapping and disable everything 15202 * ... */ 15203 plane = crtc->plane; 15204 crtc->base.primary->state->visible = true; 15205 crtc->plane = !plane; 15206 intel_crtc_disable_noatomic(&crtc->base, ctx); 15207 crtc->plane = plane; 15208 } 15209 15210 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15211 crtc->pipe == PIPE_A && !crtc->active) { 15212 /* BIOS forgot to enable pipe A, this mostly happens after 15213 * resume. Force-enable the pipe to fix this, the update_dpms 15214 * call below we restore the pipe to the right state, but leave 15215 * the required bits on. */ 15216 intel_enable_pipe_a(dev, ctx); 15217 } 15218 15219 /* Adjust the state of the output pipe according to whether we 15220 * have active connectors/encoders. */ 15221 if (crtc->active && !intel_crtc_has_encoders(crtc)) 15222 intel_crtc_disable_noatomic(&crtc->base, ctx); 15223 15224 if (crtc->active || HAS_GMCH_DISPLAY(dev_priv)) { 15225 /* 15226 * We start out with underrun reporting disabled to avoid races. 15227 * For correct bookkeeping mark this on active crtcs. 15228 * 15229 * Also on gmch platforms we dont have any hardware bits to 15230 * disable the underrun reporting. Which means we need to start 15231 * out with underrun reporting disabled also on inactive pipes, 15232 * since otherwise we'll complain about the garbage we read when 15233 * e.g. coming up after runtime pm. 15234 * 15235 * No protection against concurrent access is required - at 15236 * worst a fifo underrun happens which also sets this to false. 15237 */ 15238 crtc->cpu_fifo_underrun_disabled = true; 15239 /* 15240 * We track the PCH trancoder underrun reporting state 15241 * within the crtc. With crtc for pipe A housing the underrun 15242 * reporting state for PCH transcoder A, crtc for pipe B housing 15243 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A, 15244 * and marking underrun reporting as disabled for the non-existing 15245 * PCH transcoders B and C would prevent enabling the south 15246 * error interrupt (see cpt_can_enable_serr_int()). 15247 */ 15248 if (has_pch_trancoder(dev_priv, (enum transcoder)crtc->pipe)) 15249 crtc->pch_fifo_underrun_disabled = true; 15250 } 15251 } 15252 15253 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15254 { 15255 struct intel_connector *connector; 15256 15257 /* We need to check both for a crtc link (meaning that the 15258 * encoder is active and trying to read from a pipe) and the 15259 * pipe itself being active. */ 15260 bool has_active_crtc = encoder->base.crtc && 15261 to_intel_crtc(encoder->base.crtc)->active; 15262 15263 connector = intel_encoder_find_connector(encoder); 15264 if (connector && !has_active_crtc) { 15265 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15266 encoder->base.base.id, 15267 encoder->base.name); 15268 15269 /* Connector is active, but has no active pipe. This is 15270 * fallout from our resume register restoring. Disable 15271 * the encoder manually again. */ 15272 if (encoder->base.crtc) { 15273 struct drm_crtc_state *crtc_state = encoder->base.crtc->state; 15274 15275 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15276 encoder->base.base.id, 15277 encoder->base.name); 15278 encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15279 if (encoder->post_disable) 15280 encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); 15281 } 15282 encoder->base.crtc = NULL; 15283 15284 /* Inconsistent output/port/pipe state happens presumably due to 15285 * a bug in one of the get_hw_state functions. Or someplace else 15286 * in our code, like the register restore mess on resume. Clamp 15287 * things to off as a safer default. */ 15288 15289 connector->base.dpms = DRM_MODE_DPMS_OFF; 15290 connector->base.encoder = NULL; 15291 } 15292 /* Enabled encoders without active connectors will be fixed in 15293 * the crtc fixup. */ 15294 } 15295 15296 void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) 15297 { 15298 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); 15299 15300 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15302 i915_disable_vga(dev_priv); 15303 } 15304 } 15305 15306 void i915_redisable_vga(struct drm_i915_private *dev_priv) 15307 { 15308 /* This function can be called both from intel_modeset_setup_hw_state or 15309 * at a very early point in our resume sequence, where the power well 15310 * structures are not yet restored. Since this function is at a very 15311 * paranoid "someone might have enabled VGA while we were not looking" 15312 * level, just check if the power well is enabled instead of trying to 15313 * follow the "don't touch the power well if we don't need it" policy 15314 * the rest of the driver uses. */ 15315 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 15316 return; 15317 15318 i915_redisable_vga_power_on(dev_priv); 15319 15320 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 15321 } 15322 15323 static bool primary_get_hw_state(struct intel_plane *plane) 15324 { 15325 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15326 15327 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15328 } 15329 15330 /* FIXME read out full plane state for all planes */ 15331 static void readout_plane_state(struct intel_crtc *crtc) 15332 { 15333 struct intel_plane *primary = to_intel_plane(crtc->base.primary); 15334 bool visible; 15335 15336 visible = crtc->active && primary_get_hw_state(primary); 15337 15338 intel_set_plane_visible(to_intel_crtc_state(crtc->base.state), 15339 to_intel_plane_state(primary->base.state), 15340 visible); 15341 } 15342 15343 static void intel_modeset_readout_hw_state(struct drm_device *dev) 15344 { 15345 struct drm_i915_private *dev_priv = to_i915(dev); 15346 enum i915_pipe pipe; 15347 struct intel_crtc *crtc; 15348 struct intel_encoder *encoder; 15349 struct intel_connector *connector; 15350 struct drm_connector_list_iter conn_iter; 15351 int i; 15352 15353 dev_priv->active_crtcs = 0; 15354 15355 for_each_intel_crtc(dev, crtc) { 15356 struct intel_crtc_state *crtc_state = 15357 to_intel_crtc_state(crtc->base.state); 15358 15359 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 15360 memset(crtc_state, 0, sizeof(*crtc_state)); 15361 crtc_state->base.crtc = &crtc->base; 15362 15363 crtc_state->base.active = crtc_state->base.enable = 15364 dev_priv->display.get_pipe_config(crtc, crtc_state); 15365 15366 crtc->base.enabled = crtc_state->base.enable; 15367 crtc->active = crtc_state->base.active; 15368 15369 if (crtc_state->base.active) 15370 dev_priv->active_crtcs |= 1 << crtc->pipe; 15371 15372 readout_plane_state(crtc); 15373 15374 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 15375 crtc->base.base.id, crtc->base.name, 15376 enableddisabled(crtc_state->base.active)); 15377 } 15378 15379 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15380 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15381 15382 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 15383 &pll->state.hw_state); 15384 pll->state.crtc_mask = 0; 15385 for_each_intel_crtc(dev, crtc) { 15386 struct intel_crtc_state *crtc_state = 15387 to_intel_crtc_state(crtc->base.state); 15388 15389 if (crtc_state->base.active && 15390 crtc_state->shared_dpll == pll) 15391 pll->state.crtc_mask |= 1 << crtc->pipe; 15392 } 15393 pll->active_mask = pll->state.crtc_mask; 15394 15395 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15396 pll->name, pll->state.crtc_mask, pll->on); 15397 } 15398 15399 for_each_intel_encoder(dev, encoder) { 15400 pipe = 0; 15401 15402 if (encoder->get_hw_state(encoder, &pipe)) { 15403 struct intel_crtc_state *crtc_state; 15404 15405 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15406 crtc_state = to_intel_crtc_state(crtc->base.state); 15407 15408 encoder->base.crtc = &crtc->base; 15409 crtc_state->output_types |= 1 << encoder->type; 15410 encoder->get_config(encoder, crtc_state); 15411 } else { 15412 encoder->base.crtc = NULL; 15413 } 15414 15415 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15416 encoder->base.base.id, encoder->base.name, 15417 enableddisabled(encoder->base.crtc), 15418 pipe_name(pipe)); 15419 } 15420 15421 drm_connector_list_iter_begin(dev, &conn_iter); 15422 for_each_intel_connector_iter(connector, &conn_iter) { 15423 if (connector->get_hw_state(connector)) { 15424 connector->base.dpms = DRM_MODE_DPMS_ON; 15425 15426 encoder = connector->encoder; 15427 connector->base.encoder = &encoder->base; 15428 15429 if (encoder->base.crtc && 15430 encoder->base.crtc->state->active) { 15431 /* 15432 * This has to be done during hardware readout 15433 * because anything calling .crtc_disable may 15434 * rely on the connector_mask being accurate. 15435 */ 15436 encoder->base.crtc->state->connector_mask |= 15437 1 << drm_connector_index(&connector->base); 15438 encoder->base.crtc->state->encoder_mask |= 15439 1 << drm_encoder_index(&encoder->base); 15440 } 15441 15442 } else { 15443 connector->base.dpms = DRM_MODE_DPMS_OFF; 15444 connector->base.encoder = NULL; 15445 } 15446 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15447 connector->base.base.id, connector->base.name, 15448 enableddisabled(connector->base.encoder)); 15449 } 15450 drm_connector_list_iter_end(&conn_iter); 15451 15452 for_each_intel_crtc(dev, crtc) { 15453 struct intel_crtc_state *crtc_state = 15454 to_intel_crtc_state(crtc->base.state); 15455 int pixclk = 0; 15456 15457 crtc->base.hwmode = crtc_state->base.adjusted_mode; 15458 15459 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15460 if (crtc_state->base.active) { 15461 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); 15462 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); 15463 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15464 15465 /* 15466 * The initial mode needs to be set in order to keep 15467 * the atomic core happy. It wants a valid mode if the 15468 * crtc's enabled, so we do the above call. 15469 * 15470 * But we don't set all the derived state fully, hence 15471 * set a flag to indicate that a full recalculation is 15472 * needed on the next commit. 15473 */ 15474 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED; 15475 15476 intel_crtc_compute_pixel_rate(crtc_state); 15477 15478 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv) || 15479 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15480 pixclk = crtc_state->pixel_rate; 15481 else 15482 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15483 15484 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 15485 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 15486 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 15487 15488 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 15489 update_scanline_offset(crtc); 15490 } 15491 15492 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15493 15494 intel_pipe_config_sanity_check(dev_priv, crtc_state); 15495 } 15496 } 15497 15498 static void 15499 get_encoder_power_domains(struct drm_i915_private *dev_priv) 15500 { 15501 struct intel_encoder *encoder; 15502 15503 for_each_intel_encoder(&dev_priv->drm, encoder) { 15504 u64 get_domains; 15505 enum intel_display_power_domain domain; 15506 15507 if (!encoder->get_power_domains) 15508 continue; 15509 15510 get_domains = encoder->get_power_domains(encoder); 15511 for_each_power_domain(domain, get_domains) 15512 intel_display_power_get(dev_priv, domain); 15513 } 15514 } 15515 15516 /* Scan out the current hw modeset state, 15517 * and sanitizes it to the current state 15518 */ 15519 static void 15520 intel_modeset_setup_hw_state(struct drm_device *dev, 15521 struct drm_modeset_acquire_ctx *ctx) 15522 { 15523 struct drm_i915_private *dev_priv = to_i915(dev); 15524 enum i915_pipe pipe; 15525 struct intel_crtc *crtc; 15526 struct intel_encoder *encoder; 15527 int i; 15528 15529 intel_modeset_readout_hw_state(dev); 15530 15531 /* HW state is read out, now we need to sanitize this mess. */ 15532 get_encoder_power_domains(dev_priv); 15533 15534 for_each_intel_encoder(dev, encoder) { 15535 intel_sanitize_encoder(encoder); 15536 } 15537 15538 for_each_pipe(dev_priv, pipe) { 15539 crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 15540 15541 intel_sanitize_crtc(crtc, ctx); 15542 intel_dump_pipe_config(crtc, crtc->config, 15543 "[setup_hw_state]"); 15544 } 15545 15546 intel_modeset_update_connector_atomic_state(dev); 15547 15548 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15549 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15550 15551 if (!pll->on || pll->active_mask) 15552 continue; 15553 15554 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15555 15556 pll->funcs.disable(dev_priv, pll); 15557 pll->on = false; 15558 } 15559 15560 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15561 vlv_wm_get_hw_state(dev); 15562 vlv_wm_sanitize(dev_priv); 15563 } else if (IS_GEN9(dev_priv)) { 15564 skl_wm_get_hw_state(dev); 15565 } else if (HAS_PCH_SPLIT(dev_priv)) { 15566 ilk_wm_get_hw_state(dev); 15567 } 15568 15569 for_each_intel_crtc(dev, crtc) { 15570 u64 put_domains; 15571 15572 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15573 if (WARN_ON(put_domains)) 15574 modeset_put_power_domains(dev_priv, put_domains); 15575 } 15576 intel_display_set_init_power(dev_priv, false); 15577 15578 intel_power_domains_verify_state(dev_priv); 15579 15580 intel_fbc_init_pipe_state(dev_priv); 15581 } 15582 15583 void intel_display_resume(struct drm_device *dev) 15584 { 15585 struct drm_i915_private *dev_priv = to_i915(dev); 15586 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15587 struct drm_modeset_acquire_ctx ctx; 15588 int ret; 15589 15590 dev_priv->modeset_restore_state = NULL; 15591 if (state) 15592 state->acquire_ctx = &ctx; 15593 15594 /* 15595 * This is a cludge because with real atomic modeset mode_config.mutex 15596 * won't be taken. Unfortunately some probed state like 15597 * audio_codec_enable is still protected by mode_config.mutex, so lock 15598 * it here for now. 15599 */ 15600 mutex_lock(&dev->mode_config.mutex); 15601 drm_modeset_acquire_init(&ctx, 0); 15602 15603 while (1) { 15604 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15605 if (ret != -EDEADLK) 15606 break; 15607 15608 drm_modeset_backoff(&ctx); 15609 } 15610 15611 if (!ret) 15612 ret = __intel_display_resume(dev, state, &ctx); 15613 15614 drm_modeset_drop_locks(&ctx); 15615 drm_modeset_acquire_fini(&ctx); 15616 mutex_unlock(&dev->mode_config.mutex); 15617 15618 if (ret) 15619 DRM_ERROR("Restoring old state failed with %i\n", ret); 15620 if (state) 15621 drm_atomic_state_put(state); 15622 } 15623 15624 void intel_modeset_gem_init(struct drm_device *dev) 15625 { 15626 struct drm_i915_private *dev_priv = to_i915(dev); 15627 15628 intel_init_gt_powersave(dev_priv); 15629 15630 intel_setup_overlay(dev_priv); 15631 } 15632 15633 int intel_connector_register(struct drm_connector *connector) 15634 { 15635 struct intel_connector *intel_connector = to_intel_connector(connector); 15636 int ret; 15637 15638 ret = intel_backlight_device_register(intel_connector); 15639 if (ret) 15640 goto err; 15641 15642 return 0; 15643 15644 err: 15645 return ret; 15646 } 15647 15648 void intel_connector_unregister(struct drm_connector *connector) 15649 { 15650 struct intel_connector *intel_connector = to_intel_connector(connector); 15651 15652 intel_backlight_device_unregister(intel_connector); 15653 intel_panel_destroy_backlight(connector); 15654 } 15655 15656 void intel_modeset_cleanup(struct drm_device *dev) 15657 { 15658 struct drm_i915_private *dev_priv = to_i915(dev); 15659 15660 flush_work(&dev_priv->atomic_helper.free_work); 15661 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); 15662 15663 intel_disable_gt_powersave(dev_priv); 15664 15665 /* 15666 * Interrupts and polling as the first thing to avoid creating havoc. 15667 * Too much stuff here (turning of connectors, ...) would 15668 * experience fancy races otherwise. 15669 */ 15670 intel_irq_uninstall(dev_priv); 15671 15672 /* 15673 * Due to the hpd irq storm handling the hotplug work can re-arm the 15674 * poll handlers. Hence disable polling after hpd handling is shut down. 15675 */ 15676 drm_kms_helper_poll_fini(dev); 15677 15678 intel_unregister_dsm_handler(); 15679 15680 intel_fbc_global_disable(dev_priv); 15681 15682 /* flush any delayed tasks or pending work */ 15683 flush_scheduled_work(); 15684 15685 drm_mode_config_cleanup(dev); 15686 15687 intel_cleanup_overlay(dev_priv); 15688 15689 intel_cleanup_gt_powersave(dev_priv); 15690 15691 intel_teardown_gmbus(dev_priv); 15692 } 15693 15694 void intel_connector_attach_encoder(struct intel_connector *connector, 15695 struct intel_encoder *encoder) 15696 { 15697 connector->encoder = encoder; 15698 drm_mode_connector_attach_encoder(&connector->base, 15699 &encoder->base); 15700 } 15701 15702 /* 15703 * set vga decode state - true == enable VGA decode 15704 */ 15705 int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) 15706 { 15707 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15708 u16 gmch_ctrl; 15709 15710 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15711 DRM_ERROR("failed to read control word\n"); 15712 return -EIO; 15713 } 15714 15715 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15716 return 0; 15717 15718 if (state) 15719 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15720 else 15721 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15722 15723 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15724 DRM_ERROR("failed to write control word\n"); 15725 return -EIO; 15726 } 15727 15728 return 0; 15729 } 15730 15731 #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) 15732 15733 struct intel_display_error_state { 15734 15735 u32 power_well_driver; 15736 15737 int num_transcoders; 15738 15739 struct intel_cursor_error_state { 15740 u32 control; 15741 u32 position; 15742 u32 base; 15743 u32 size; 15744 } cursor[I915_MAX_PIPES]; 15745 15746 struct intel_pipe_error_state { 15747 bool power_domain_on; 15748 u32 source; 15749 u32 stat; 15750 } pipe[I915_MAX_PIPES]; 15751 15752 struct intel_plane_error_state { 15753 u32 control; 15754 u32 stride; 15755 u32 size; 15756 u32 pos; 15757 u32 addr; 15758 u32 surface; 15759 u32 tile_offset; 15760 } plane[I915_MAX_PIPES]; 15761 15762 struct intel_transcoder_error_state { 15763 bool power_domain_on; 15764 enum transcoder cpu_transcoder; 15765 15766 u32 conf; 15767 15768 u32 htotal; 15769 u32 hblank; 15770 u32 hsync; 15771 u32 vtotal; 15772 u32 vblank; 15773 u32 vsync; 15774 } transcoder[4]; 15775 }; 15776 15777 struct intel_display_error_state * 15778 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 15779 { 15780 struct intel_display_error_state *error; 15781 int transcoders[] = { 15782 TRANSCODER_A, 15783 TRANSCODER_B, 15784 TRANSCODER_C, 15785 TRANSCODER_EDP, 15786 }; 15787 int i; 15788 15789 if (INTEL_INFO(dev_priv)->num_pipes == 0) 15790 return NULL; 15791 15792 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15793 if (error == NULL) 15794 return NULL; 15795 15796 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15797 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15798 15799 for_each_pipe(dev_priv, i) { 15800 error->pipe[i].power_domain_on = 15801 __intel_display_power_is_enabled(dev_priv, 15802 POWER_DOMAIN_PIPE(i)); 15803 if (!error->pipe[i].power_domain_on) 15804 continue; 15805 15806 error->cursor[i].control = I915_READ(CURCNTR(i)); 15807 error->cursor[i].position = I915_READ(CURPOS(i)); 15808 error->cursor[i].base = I915_READ(CURBASE(i)); 15809 15810 error->plane[i].control = I915_READ(DSPCNTR(i)); 15811 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15812 if (INTEL_GEN(dev_priv) <= 3) { 15813 error->plane[i].size = I915_READ(DSPSIZE(i)); 15814 error->plane[i].pos = I915_READ(DSPPOS(i)); 15815 } 15816 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15817 error->plane[i].addr = I915_READ(DSPADDR(i)); 15818 if (INTEL_GEN(dev_priv) >= 4) { 15819 error->plane[i].surface = I915_READ(DSPSURF(i)); 15820 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15821 } 15822 15823 error->pipe[i].source = I915_READ(PIPESRC(i)); 15824 15825 if (HAS_GMCH_DISPLAY(dev_priv)) 15826 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15827 } 15828 15829 /* Note: this does not include DSI transcoders. */ 15830 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 15831 if (HAS_DDI(dev_priv)) 15832 error->num_transcoders++; /* Account for eDP. */ 15833 15834 for (i = 0; i < error->num_transcoders; i++) { 15835 enum transcoder cpu_transcoder = transcoders[i]; 15836 15837 error->transcoder[i].power_domain_on = 15838 __intel_display_power_is_enabled(dev_priv, 15839 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15840 if (!error->transcoder[i].power_domain_on) 15841 continue; 15842 15843 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15844 15845 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15846 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15847 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15848 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15849 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15850 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15851 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15852 } 15853 15854 return error; 15855 } 15856 15857 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15858 15859 void 15860 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15861 struct intel_display_error_state *error) 15862 { 15863 struct drm_i915_private *dev_priv = m->i915; 15864 int i; 15865 15866 if (!error) 15867 return; 15868 15869 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); 15870 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 15871 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15872 error->power_well_driver); 15873 for_each_pipe(dev_priv, i) { 15874 err_printf(m, "Pipe [%d]:\n", i); 15875 err_printf(m, " Power: %s\n", 15876 onoff(error->pipe[i].power_domain_on)); 15877 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15878 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15879 15880 err_printf(m, "Plane [%d]:\n", i); 15881 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15882 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15883 if (INTEL_GEN(dev_priv) <= 3) { 15884 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15885 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15886 } 15887 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 15888 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15889 if (INTEL_GEN(dev_priv) >= 4) { 15890 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15891 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15892 } 15893 15894 err_printf(m, "Cursor [%d]:\n", i); 15895 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15896 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15897 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15898 } 15899 15900 for (i = 0; i < error->num_transcoders; i++) { 15901 err_printf(m, "CPU transcoder: %s\n", 15902 transcoder_name(error->transcoder[i].cpu_transcoder)); 15903 err_printf(m, " Power: %s\n", 15904 onoff(error->transcoder[i].power_domain_on)); 15905 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15906 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15907 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15908 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15909 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15910 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15911 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15912 } 15913 } 15914 15915 #endif 15916