1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/input.h> 30 #include <linux/i2c.h> 31 #include <linux/kernel.h> 32 #include <linux/slab.h> 33 #include <linux/vgaarb.h> 34 #include <drm/drm_edid.h> 35 #include <drm/drmP.h> 36 #include "intel_drv.h" 37 #include <drm/i915_drm.h> 38 #include "i915_drv.h" 39 #include "i915_gem_dmabuf.h" 40 #include "intel_dsi.h" 41 #include "i915_trace.h" 42 #include <drm/drm_atomic.h> 43 #include <drm/drm_atomic_helper.h> 44 #include <drm/drm_dp_helper.h> 45 #include <drm/drm_crtc_helper.h> 46 #include <drm/drm_plane_helper.h> 47 #include <drm/drm_rect.h> 48 #include <linux/dma_remapping.h> 49 #include <linux/reservation.h> 50 51 static bool is_mmio_work(struct intel_flip_work *work) 52 { 53 return work->mmio_work.func; 54 } 55 56 /* Primary plane formats for gen <= 3 */ 57 static const uint32_t i8xx_primary_formats[] = { 58 DRM_FORMAT_C8, 59 DRM_FORMAT_RGB565, 60 DRM_FORMAT_XRGB1555, 61 DRM_FORMAT_XRGB8888, 62 }; 63 64 /* Primary plane formats for gen >= 4 */ 65 static const uint32_t i965_primary_formats[] = { 66 DRM_FORMAT_C8, 67 DRM_FORMAT_RGB565, 68 DRM_FORMAT_XRGB8888, 69 DRM_FORMAT_XBGR8888, 70 DRM_FORMAT_XRGB2101010, 71 DRM_FORMAT_XBGR2101010, 72 }; 73 74 static const uint32_t skl_primary_formats[] = { 75 DRM_FORMAT_C8, 76 DRM_FORMAT_RGB565, 77 DRM_FORMAT_XRGB8888, 78 DRM_FORMAT_XBGR8888, 79 DRM_FORMAT_ARGB8888, 80 DRM_FORMAT_ABGR8888, 81 DRM_FORMAT_XRGB2101010, 82 DRM_FORMAT_XBGR2101010, 83 DRM_FORMAT_YUYV, 84 DRM_FORMAT_YVYU, 85 DRM_FORMAT_UYVY, 86 DRM_FORMAT_VYUY, 87 }; 88 89 /* Cursor formats */ 90 static const uint32_t intel_cursor_formats[] = { 91 DRM_FORMAT_ARGB8888, 92 }; 93 94 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 95 struct intel_crtc_state *pipe_config); 96 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 97 struct intel_crtc_state *pipe_config); 98 99 static int intel_framebuffer_init(struct drm_device *dev, 100 struct intel_framebuffer *ifb, 101 struct drm_mode_fb_cmd2 *mode_cmd, 102 struct drm_i915_gem_object *obj); 103 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 104 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 105 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc); 106 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 107 struct intel_link_m_n *m_n, 108 struct intel_link_m_n *m2_n2); 109 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 110 static void haswell_set_pipeconf(struct drm_crtc *crtc); 111 static void haswell_set_pipemisc(struct drm_crtc *crtc); 112 static void vlv_prepare_pll(struct intel_crtc *crtc, 113 const struct intel_crtc_state *pipe_config); 114 static void chv_prepare_pll(struct intel_crtc *crtc, 115 const struct intel_crtc_state *pipe_config); 116 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 117 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 118 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 119 struct intel_crtc_state *crtc_state); 120 static void skylake_pfit_enable(struct intel_crtc *crtc); 121 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 122 static void ironlake_pfit_enable(struct intel_crtc *crtc); 123 static void intel_modeset_setup_hw_state(struct drm_device *dev); 124 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 125 static int ilk_max_pixel_rate(struct drm_atomic_state *state); 126 static int bxt_calc_cdclk(int max_pixclk); 127 128 struct intel_limit { 129 struct { 130 int min, max; 131 } dot, vco, n, m, m1, m2, p, p1; 132 133 struct { 134 int dot_limit; 135 int p2_slow, p2_fast; 136 } p2; 137 }; 138 139 /* returns HPLL frequency in kHz */ 140 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 141 { 142 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 143 144 /* Obtain SKU information */ 145 mutex_lock(&dev_priv->sb_lock); 146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 147 CCK_FUSE_HPLL_FREQ_MASK; 148 mutex_unlock(&dev_priv->sb_lock); 149 150 return vco_freq[hpll_freq] * 1000; 151 } 152 153 int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 154 const char *name, u32 reg, int ref_freq) 155 { 156 u32 val; 157 int divider; 158 159 mutex_lock(&dev_priv->sb_lock); 160 val = vlv_cck_read(dev_priv, reg); 161 mutex_unlock(&dev_priv->sb_lock); 162 163 divider = val & CCK_FREQUENCY_VALUES; 164 165 WARN((val & CCK_FREQUENCY_STATUS) != 166 (divider << CCK_FREQUENCY_STATUS_SHIFT), 167 "%s change in progress\n", name); 168 169 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); 170 } 171 172 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 173 const char *name, u32 reg) 174 { 175 if (dev_priv->hpll_freq == 0) 176 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 177 178 return vlv_get_cck_clock(dev_priv, name, reg, 179 dev_priv->hpll_freq); 180 } 181 182 static int 183 intel_pch_rawclk(struct drm_i915_private *dev_priv) 184 { 185 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000; 186 } 187 188 static int 189 intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 190 { 191 /* RAWCLK_FREQ_VLV register updated from power well code */ 192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 193 CCK_DISPLAY_REF_CLOCK_CONTROL); 194 } 195 196 static int 197 intel_g4x_hrawclk(struct drm_i915_private *dev_priv) 198 { 199 uint32_t clkcfg; 200 201 /* hrawclock is 1/4 the FSB frequency */ 202 clkcfg = I915_READ(CLKCFG); 203 switch (clkcfg & CLKCFG_FSB_MASK) { 204 case CLKCFG_FSB_400: 205 return 100000; 206 case CLKCFG_FSB_533: 207 return 133333; 208 case CLKCFG_FSB_667: 209 return 166667; 210 case CLKCFG_FSB_800: 211 return 200000; 212 case CLKCFG_FSB_1067: 213 return 266667; 214 case CLKCFG_FSB_1333: 215 return 333333; 216 /* these two are just a guess; one of them might be right */ 217 case CLKCFG_FSB_1600: 218 case CLKCFG_FSB_1600_ALT: 219 return 400000; 220 default: 221 return 133333; 222 } 223 } 224 225 void intel_update_rawclk(struct drm_i915_private *dev_priv) 226 { 227 if (HAS_PCH_SPLIT(dev_priv)) 228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 229 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 230 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv); 231 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv)) 232 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv); 233 else 234 return; /* no rawclk on other platforms, or no need to know it */ 235 236 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq); 237 } 238 239 static void intel_update_czclk(struct drm_i915_private *dev_priv) 240 { 241 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 242 return; 243 244 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 245 CCK_CZ_CLOCK_CONTROL); 246 247 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 248 } 249 250 static inline u32 /* units of 100MHz */ 251 intel_fdi_link_freq(struct drm_i915_private *dev_priv, 252 const struct intel_crtc_state *pipe_config) 253 { 254 if (HAS_DDI(dev_priv)) 255 return pipe_config->port_clock; /* SPLL */ 256 else if (IS_GEN5(dev_priv)) 257 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000; 258 else 259 return 270000; 260 } 261 262 static const struct intel_limit intel_limits_i8xx_dac = { 263 .dot = { .min = 25000, .max = 350000 }, 264 .vco = { .min = 908000, .max = 1512000 }, 265 .n = { .min = 2, .max = 16 }, 266 .m = { .min = 96, .max = 140 }, 267 .m1 = { .min = 18, .max = 26 }, 268 .m2 = { .min = 6, .max = 16 }, 269 .p = { .min = 4, .max = 128 }, 270 .p1 = { .min = 2, .max = 33 }, 271 .p2 = { .dot_limit = 165000, 272 .p2_slow = 4, .p2_fast = 2 }, 273 }; 274 275 static const struct intel_limit intel_limits_i8xx_dvo = { 276 .dot = { .min = 25000, .max = 350000 }, 277 .vco = { .min = 908000, .max = 1512000 }, 278 .n = { .min = 2, .max = 16 }, 279 .m = { .min = 96, .max = 140 }, 280 .m1 = { .min = 18, .max = 26 }, 281 .m2 = { .min = 6, .max = 16 }, 282 .p = { .min = 4, .max = 128 }, 283 .p1 = { .min = 2, .max = 33 }, 284 .p2 = { .dot_limit = 165000, 285 .p2_slow = 4, .p2_fast = 4 }, 286 }; 287 288 static const struct intel_limit intel_limits_i8xx_lvds = { 289 .dot = { .min = 25000, .max = 350000 }, 290 .vco = { .min = 908000, .max = 1512000 }, 291 .n = { .min = 2, .max = 16 }, 292 .m = { .min = 96, .max = 140 }, 293 .m1 = { .min = 18, .max = 26 }, 294 .m2 = { .min = 6, .max = 16 }, 295 .p = { .min = 4, .max = 128 }, 296 .p1 = { .min = 1, .max = 6 }, 297 .p2 = { .dot_limit = 165000, 298 .p2_slow = 14, .p2_fast = 7 }, 299 }; 300 301 static const struct intel_limit intel_limits_i9xx_sdvo = { 302 .dot = { .min = 20000, .max = 400000 }, 303 .vco = { .min = 1400000, .max = 2800000 }, 304 .n = { .min = 1, .max = 6 }, 305 .m = { .min = 70, .max = 120 }, 306 .m1 = { .min = 8, .max = 18 }, 307 .m2 = { .min = 3, .max = 7 }, 308 .p = { .min = 5, .max = 80 }, 309 .p1 = { .min = 1, .max = 8 }, 310 .p2 = { .dot_limit = 200000, 311 .p2_slow = 10, .p2_fast = 5 }, 312 }; 313 314 static const struct intel_limit intel_limits_i9xx_lvds = { 315 .dot = { .min = 20000, .max = 400000 }, 316 .vco = { .min = 1400000, .max = 2800000 }, 317 .n = { .min = 1, .max = 6 }, 318 .m = { .min = 70, .max = 120 }, 319 .m1 = { .min = 8, .max = 18 }, 320 .m2 = { .min = 3, .max = 7 }, 321 .p = { .min = 7, .max = 98 }, 322 .p1 = { .min = 1, .max = 8 }, 323 .p2 = { .dot_limit = 112000, 324 .p2_slow = 14, .p2_fast = 7 }, 325 }; 326 327 328 static const struct intel_limit intel_limits_g4x_sdvo = { 329 .dot = { .min = 25000, .max = 270000 }, 330 .vco = { .min = 1750000, .max = 3500000}, 331 .n = { .min = 1, .max = 4 }, 332 .m = { .min = 104, .max = 138 }, 333 .m1 = { .min = 17, .max = 23 }, 334 .m2 = { .min = 5, .max = 11 }, 335 .p = { .min = 10, .max = 30 }, 336 .p1 = { .min = 1, .max = 3}, 337 .p2 = { .dot_limit = 270000, 338 .p2_slow = 10, 339 .p2_fast = 10 340 }, 341 }; 342 343 static const struct intel_limit intel_limits_g4x_hdmi = { 344 .dot = { .min = 22000, .max = 400000 }, 345 .vco = { .min = 1750000, .max = 3500000}, 346 .n = { .min = 1, .max = 4 }, 347 .m = { .min = 104, .max = 138 }, 348 .m1 = { .min = 16, .max = 23 }, 349 .m2 = { .min = 5, .max = 11 }, 350 .p = { .min = 5, .max = 80 }, 351 .p1 = { .min = 1, .max = 8}, 352 .p2 = { .dot_limit = 165000, 353 .p2_slow = 10, .p2_fast = 5 }, 354 }; 355 356 static const struct intel_limit intel_limits_g4x_single_channel_lvds = { 357 .dot = { .min = 20000, .max = 115000 }, 358 .vco = { .min = 1750000, .max = 3500000 }, 359 .n = { .min = 1, .max = 3 }, 360 .m = { .min = 104, .max = 138 }, 361 .m1 = { .min = 17, .max = 23 }, 362 .m2 = { .min = 5, .max = 11 }, 363 .p = { .min = 28, .max = 112 }, 364 .p1 = { .min = 2, .max = 8 }, 365 .p2 = { .dot_limit = 0, 366 .p2_slow = 14, .p2_fast = 14 367 }, 368 }; 369 370 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = { 371 .dot = { .min = 80000, .max = 224000 }, 372 .vco = { .min = 1750000, .max = 3500000 }, 373 .n = { .min = 1, .max = 3 }, 374 .m = { .min = 104, .max = 138 }, 375 .m1 = { .min = 17, .max = 23 }, 376 .m2 = { .min = 5, .max = 11 }, 377 .p = { .min = 14, .max = 42 }, 378 .p1 = { .min = 2, .max = 6 }, 379 .p2 = { .dot_limit = 0, 380 .p2_slow = 7, .p2_fast = 7 381 }, 382 }; 383 384 static const struct intel_limit intel_limits_pineview_sdvo = { 385 .dot = { .min = 20000, .max = 400000}, 386 .vco = { .min = 1700000, .max = 3500000 }, 387 /* Pineview's Ncounter is a ring counter */ 388 .n = { .min = 3, .max = 6 }, 389 .m = { .min = 2, .max = 256 }, 390 /* Pineview only has one combined m divider, which we treat as m2. */ 391 .m1 = { .min = 0, .max = 0 }, 392 .m2 = { .min = 0, .max = 254 }, 393 .p = { .min = 5, .max = 80 }, 394 .p1 = { .min = 1, .max = 8 }, 395 .p2 = { .dot_limit = 200000, 396 .p2_slow = 10, .p2_fast = 5 }, 397 }; 398 399 static const struct intel_limit intel_limits_pineview_lvds = { 400 .dot = { .min = 20000, .max = 400000 }, 401 .vco = { .min = 1700000, .max = 3500000 }, 402 .n = { .min = 3, .max = 6 }, 403 .m = { .min = 2, .max = 256 }, 404 .m1 = { .min = 0, .max = 0 }, 405 .m2 = { .min = 0, .max = 254 }, 406 .p = { .min = 7, .max = 112 }, 407 .p1 = { .min = 1, .max = 8 }, 408 .p2 = { .dot_limit = 112000, 409 .p2_slow = 14, .p2_fast = 14 }, 410 }; 411 412 /* Ironlake / Sandybridge 413 * 414 * We calculate clock using (register_value + 2) for N/M1/M2, so here 415 * the range value for them is (actual_value - 2). 416 */ 417 static const struct intel_limit intel_limits_ironlake_dac = { 418 .dot = { .min = 25000, .max = 350000 }, 419 .vco = { .min = 1760000, .max = 3510000 }, 420 .n = { .min = 1, .max = 5 }, 421 .m = { .min = 79, .max = 127 }, 422 .m1 = { .min = 12, .max = 22 }, 423 .m2 = { .min = 5, .max = 9 }, 424 .p = { .min = 5, .max = 80 }, 425 .p1 = { .min = 1, .max = 8 }, 426 .p2 = { .dot_limit = 225000, 427 .p2_slow = 10, .p2_fast = 5 }, 428 }; 429 430 static const struct intel_limit intel_limits_ironlake_single_lvds = { 431 .dot = { .min = 25000, .max = 350000 }, 432 .vco = { .min = 1760000, .max = 3510000 }, 433 .n = { .min = 1, .max = 3 }, 434 .m = { .min = 79, .max = 118 }, 435 .m1 = { .min = 12, .max = 22 }, 436 .m2 = { .min = 5, .max = 9 }, 437 .p = { .min = 28, .max = 112 }, 438 .p1 = { .min = 2, .max = 8 }, 439 .p2 = { .dot_limit = 225000, 440 .p2_slow = 14, .p2_fast = 14 }, 441 }; 442 443 static const struct intel_limit intel_limits_ironlake_dual_lvds = { 444 .dot = { .min = 25000, .max = 350000 }, 445 .vco = { .min = 1760000, .max = 3510000 }, 446 .n = { .min = 1, .max = 3 }, 447 .m = { .min = 79, .max = 127 }, 448 .m1 = { .min = 12, .max = 22 }, 449 .m2 = { .min = 5, .max = 9 }, 450 .p = { .min = 14, .max = 56 }, 451 .p1 = { .min = 2, .max = 8 }, 452 .p2 = { .dot_limit = 225000, 453 .p2_slow = 7, .p2_fast = 7 }, 454 }; 455 456 /* LVDS 100mhz refclk limits. */ 457 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = { 458 .dot = { .min = 25000, .max = 350000 }, 459 .vco = { .min = 1760000, .max = 3510000 }, 460 .n = { .min = 1, .max = 2 }, 461 .m = { .min = 79, .max = 126 }, 462 .m1 = { .min = 12, .max = 22 }, 463 .m2 = { .min = 5, .max = 9 }, 464 .p = { .min = 28, .max = 112 }, 465 .p1 = { .min = 2, .max = 8 }, 466 .p2 = { .dot_limit = 225000, 467 .p2_slow = 14, .p2_fast = 14 }, 468 }; 469 470 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = { 471 .dot = { .min = 25000, .max = 350000 }, 472 .vco = { .min = 1760000, .max = 3510000 }, 473 .n = { .min = 1, .max = 3 }, 474 .m = { .min = 79, .max = 126 }, 475 .m1 = { .min = 12, .max = 22 }, 476 .m2 = { .min = 5, .max = 9 }, 477 .p = { .min = 14, .max = 42 }, 478 .p1 = { .min = 2, .max = 6 }, 479 .p2 = { .dot_limit = 225000, 480 .p2_slow = 7, .p2_fast = 7 }, 481 }; 482 483 static const struct intel_limit intel_limits_vlv = { 484 /* 485 * These are the data rate limits (measured in fast clocks) 486 * since those are the strictest limits we have. The fast 487 * clock and actual rate limits are more relaxed, so checking 488 * them would make no difference. 489 */ 490 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 491 .vco = { .min = 4000000, .max = 6000000 }, 492 .n = { .min = 1, .max = 7 }, 493 .m1 = { .min = 2, .max = 3 }, 494 .m2 = { .min = 11, .max = 156 }, 495 .p1 = { .min = 2, .max = 3 }, 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 497 }; 498 499 static const struct intel_limit intel_limits_chv = { 500 /* 501 * These are the data rate limits (measured in fast clocks) 502 * since those are the strictest limits we have. The fast 503 * clock and actual rate limits are more relaxed, so checking 504 * them would make no difference. 505 */ 506 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 507 .vco = { .min = 4800000, .max = 6480000 }, 508 .n = { .min = 1, .max = 1 }, 509 .m1 = { .min = 2, .max = 2 }, 510 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 511 .p1 = { .min = 2, .max = 4 }, 512 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 513 }; 514 515 static const struct intel_limit intel_limits_bxt = { 516 /* FIXME: find real dot limits */ 517 .dot = { .min = 0, .max = INT_MAX }, 518 .vco = { .min = 4800000, .max = 6700000 }, 519 .n = { .min = 1, .max = 1 }, 520 .m1 = { .min = 2, .max = 2 }, 521 /* FIXME: find real m2 limits */ 522 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 523 .p1 = { .min = 2, .max = 4 }, 524 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 525 }; 526 527 static bool 528 needs_modeset(struct drm_crtc_state *state) 529 { 530 return drm_atomic_crtc_needs_modeset(state); 531 } 532 533 /* 534 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 535 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 536 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 537 * The helpers' return value is the rate of the clock that is fed to the 538 * display engine's pipe which can be the above fast dot clock rate or a 539 * divided-down version of it. 540 */ 541 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 542 static int pnv_calc_dpll_params(int refclk, struct dpll *clock) 543 { 544 clock->m = clock->m2 + 2; 545 clock->p = clock->p1 * clock->p2; 546 if (WARN_ON(clock->n == 0 || clock->p == 0)) 547 return 0; 548 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 549 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 550 551 return clock->dot; 552 } 553 554 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 555 { 556 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 557 } 558 559 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock) 560 { 561 clock->m = i9xx_dpll_compute_m(clock); 562 clock->p = clock->p1 * clock->p2; 563 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 564 return 0; 565 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 566 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 567 568 return clock->dot; 569 } 570 571 static int vlv_calc_dpll_params(int refclk, struct dpll *clock) 572 { 573 clock->m = clock->m1 * clock->m2; 574 clock->p = clock->p1 * clock->p2; 575 if (WARN_ON(clock->n == 0 || clock->p == 0)) 576 return 0; 577 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 578 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 579 580 return clock->dot / 5; 581 } 582 583 int chv_calc_dpll_params(int refclk, struct dpll *clock) 584 { 585 clock->m = clock->m1 * clock->m2; 586 clock->p = clock->p1 * clock->p2; 587 if (WARN_ON(clock->n == 0 || clock->p == 0)) 588 return 0; 589 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 590 clock->n << 22); 591 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 592 593 return clock->dot / 5; 594 } 595 596 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 597 /** 598 * Returns whether the given set of divisors are valid for a given refclk with 599 * the given connectors. 600 */ 601 602 static bool intel_PLL_is_valid(struct drm_device *dev, 603 const struct intel_limit *limit, 604 const struct dpll *clock) 605 { 606 if (clock->n < limit->n.min || limit->n.max < clock->n) 607 INTELPllInvalid("n out of range\n"); 608 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 609 INTELPllInvalid("p1 out of range\n"); 610 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 611 INTELPllInvalid("m2 out of range\n"); 612 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 613 INTELPllInvalid("m1 out of range\n"); 614 615 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && 616 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) 617 if (clock->m1 <= clock->m2) 618 INTELPllInvalid("m1 <= m2\n"); 619 620 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { 621 if (clock->p < limit->p.min || limit->p.max < clock->p) 622 INTELPllInvalid("p out of range\n"); 623 if (clock->m < limit->m.min || limit->m.max < clock->m) 624 INTELPllInvalid("m out of range\n"); 625 } 626 627 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 628 INTELPllInvalid("vco out of range\n"); 629 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 630 * connector, etc., rather than just a single range. 631 */ 632 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 633 INTELPllInvalid("dot out of range\n"); 634 635 return true; 636 } 637 638 static int 639 i9xx_select_p2_div(const struct intel_limit *limit, 640 const struct intel_crtc_state *crtc_state, 641 int target) 642 { 643 struct drm_device *dev = crtc_state->base.crtc->dev; 644 645 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 646 /* 647 * For LVDS just rely on its current settings for dual-channel. 648 * We haven't figured out how to reliably set up different 649 * single/dual channel state, if we even can. 650 */ 651 if (intel_is_dual_link_lvds(dev)) 652 return limit->p2.p2_fast; 653 else 654 return limit->p2.p2_slow; 655 } else { 656 if (target < limit->p2.dot_limit) 657 return limit->p2.p2_slow; 658 else 659 return limit->p2.p2_fast; 660 } 661 } 662 663 /* 664 * Returns a set of divisors for the desired target clock with the given 665 * refclk, or FALSE. The returned values represent the clock equation: 666 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 667 * 668 * Target and reference clocks are specified in kHz. 669 * 670 * If match_clock is provided, then best_clock P divider must match the P 671 * divider from @match_clock used for LVDS downclocking. 672 */ 673 static bool 674 i9xx_find_best_dpll(const struct intel_limit *limit, 675 struct intel_crtc_state *crtc_state, 676 int target, int refclk, struct dpll *match_clock, 677 struct dpll *best_clock) 678 { 679 struct drm_device *dev = crtc_state->base.crtc->dev; 680 struct dpll clock; 681 int err = target; 682 683 memset(best_clock, 0, sizeof(*best_clock)); 684 685 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 686 687 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 688 clock.m1++) { 689 for (clock.m2 = limit->m2.min; 690 clock.m2 <= limit->m2.max; clock.m2++) { 691 if (clock.m2 >= clock.m1) 692 break; 693 for (clock.n = limit->n.min; 694 clock.n <= limit->n.max; clock.n++) { 695 for (clock.p1 = limit->p1.min; 696 clock.p1 <= limit->p1.max; clock.p1++) { 697 int this_err; 698 699 i9xx_calc_dpll_params(refclk, &clock); 700 if (!intel_PLL_is_valid(dev, limit, 701 &clock)) 702 continue; 703 if (match_clock && 704 clock.p != match_clock->p) 705 continue; 706 707 this_err = abs(clock.dot - target); 708 if (this_err < err) { 709 *best_clock = clock; 710 err = this_err; 711 } 712 } 713 } 714 } 715 } 716 717 return (err != target); 718 } 719 720 /* 721 * Returns a set of divisors for the desired target clock with the given 722 * refclk, or FALSE. The returned values represent the clock equation: 723 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 724 * 725 * Target and reference clocks are specified in kHz. 726 * 727 * If match_clock is provided, then best_clock P divider must match the P 728 * divider from @match_clock used for LVDS downclocking. 729 */ 730 static bool 731 pnv_find_best_dpll(const struct intel_limit *limit, 732 struct intel_crtc_state *crtc_state, 733 int target, int refclk, struct dpll *match_clock, 734 struct dpll *best_clock) 735 { 736 struct drm_device *dev = crtc_state->base.crtc->dev; 737 struct dpll clock; 738 int err = target; 739 740 memset(best_clock, 0, sizeof(*best_clock)); 741 742 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 743 744 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 745 clock.m1++) { 746 for (clock.m2 = limit->m2.min; 747 clock.m2 <= limit->m2.max; clock.m2++) { 748 for (clock.n = limit->n.min; 749 clock.n <= limit->n.max; clock.n++) { 750 for (clock.p1 = limit->p1.min; 751 clock.p1 <= limit->p1.max; clock.p1++) { 752 int this_err; 753 754 pnv_calc_dpll_params(refclk, &clock); 755 if (!intel_PLL_is_valid(dev, limit, 756 &clock)) 757 continue; 758 if (match_clock && 759 clock.p != match_clock->p) 760 continue; 761 762 this_err = abs(clock.dot - target); 763 if (this_err < err) { 764 *best_clock = clock; 765 err = this_err; 766 } 767 } 768 } 769 } 770 } 771 772 return (err != target); 773 } 774 775 /* 776 * Returns a set of divisors for the desired target clock with the given 777 * refclk, or FALSE. The returned values represent the clock equation: 778 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 779 * 780 * Target and reference clocks are specified in kHz. 781 * 782 * If match_clock is provided, then best_clock P divider must match the P 783 * divider from @match_clock used for LVDS downclocking. 784 */ 785 static bool 786 g4x_find_best_dpll(const struct intel_limit *limit, 787 struct intel_crtc_state *crtc_state, 788 int target, int refclk, struct dpll *match_clock, 789 struct dpll *best_clock) 790 { 791 struct drm_device *dev = crtc_state->base.crtc->dev; 792 struct dpll clock; 793 int max_n; 794 bool found = false; 795 /* approximately equals target * 0.00585 */ 796 int err_most = (target >> 8) + (target >> 9); 797 798 memset(best_clock, 0, sizeof(*best_clock)); 799 800 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 801 802 max_n = limit->n.max; 803 /* based on hardware requirement, prefer smaller n to precision */ 804 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 805 /* based on hardware requirement, prefere larger m1,m2 */ 806 for (clock.m1 = limit->m1.max; 807 clock.m1 >= limit->m1.min; clock.m1--) { 808 for (clock.m2 = limit->m2.max; 809 clock.m2 >= limit->m2.min; clock.m2--) { 810 for (clock.p1 = limit->p1.max; 811 clock.p1 >= limit->p1.min; clock.p1--) { 812 int this_err; 813 814 i9xx_calc_dpll_params(refclk, &clock); 815 if (!intel_PLL_is_valid(dev, limit, 816 &clock)) 817 continue; 818 819 this_err = abs(clock.dot - target); 820 if (this_err < err_most) { 821 *best_clock = clock; 822 err_most = this_err; 823 max_n = clock.n; 824 found = true; 825 } 826 } 827 } 828 } 829 } 830 return found; 831 } 832 833 /* 834 * Check if the calculated PLL configuration is more optimal compared to the 835 * best configuration and error found so far. Return the calculated error. 836 */ 837 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 838 const struct dpll *calculated_clock, 839 const struct dpll *best_clock, 840 unsigned int best_error_ppm, 841 unsigned int *error_ppm) 842 { 843 /* 844 * For CHV ignore the error and consider only the P value. 845 * Prefer a bigger P value based on HW requirements. 846 */ 847 if (IS_CHERRYVIEW(dev)) { 848 *error_ppm = 0; 849 850 return calculated_clock->p > best_clock->p; 851 } 852 853 if (WARN_ON_ONCE(!target_freq)) 854 return false; 855 856 *error_ppm = div_u64(1000000ULL * 857 abs(target_freq - calculated_clock->dot), 858 target_freq); 859 /* 860 * Prefer a better P value over a better (smaller) error if the error 861 * is small. Ensure this preference for future configurations too by 862 * setting the error to 0. 863 */ 864 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 865 *error_ppm = 0; 866 867 return true; 868 } 869 870 return *error_ppm + 10 < best_error_ppm; 871 } 872 873 /* 874 * Returns a set of divisors for the desired target clock with the given 875 * refclk, or FALSE. The returned values represent the clock equation: 876 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 877 */ 878 static bool 879 vlv_find_best_dpll(const struct intel_limit *limit, 880 struct intel_crtc_state *crtc_state, 881 int target, int refclk, struct dpll *match_clock, 882 struct dpll *best_clock) 883 { 884 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 885 struct drm_device *dev = crtc->base.dev; 886 struct dpll clock; 887 unsigned int bestppm = 1000000; 888 /* min update 19.2 MHz */ 889 int max_n = min(limit->n.max, refclk / 19200); 890 bool found = false; 891 892 target *= 5; /* fast clock */ 893 894 memset(best_clock, 0, sizeof(*best_clock)); 895 896 /* based on hardware requirement, prefer smaller n to precision */ 897 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 898 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 899 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 900 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 901 clock.p = clock.p1 * clock.p2; 902 /* based on hardware requirement, prefer bigger m1,m2 values */ 903 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 904 unsigned int ppm; 905 906 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 907 refclk * clock.m1); 908 909 vlv_calc_dpll_params(refclk, &clock); 910 911 if (!intel_PLL_is_valid(dev, limit, 912 &clock)) 913 continue; 914 915 if (!vlv_PLL_is_optimal(dev, target, 916 &clock, 917 best_clock, 918 bestppm, &ppm)) 919 continue; 920 921 *best_clock = clock; 922 bestppm = ppm; 923 found = true; 924 } 925 } 926 } 927 } 928 929 return found; 930 } 931 932 /* 933 * Returns a set of divisors for the desired target clock with the given 934 * refclk, or FALSE. The returned values represent the clock equation: 935 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 936 */ 937 static bool 938 chv_find_best_dpll(const struct intel_limit *limit, 939 struct intel_crtc_state *crtc_state, 940 int target, int refclk, struct dpll *match_clock, 941 struct dpll *best_clock) 942 { 943 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 944 struct drm_device *dev = crtc->base.dev; 945 unsigned int best_error_ppm; 946 struct dpll clock; 947 uint64_t m2; 948 int found = false; 949 950 memset(best_clock, 0, sizeof(*best_clock)); 951 best_error_ppm = 1000000; 952 953 /* 954 * Based on hardware doc, the n always set to 1, and m1 always 955 * set to 2. If requires to support 200Mhz refclk, we need to 956 * revisit this because n may not 1 anymore. 957 */ 958 clock.n = 1, clock.m1 = 2; 959 target *= 5; /* fast clock */ 960 961 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 962 for (clock.p2 = limit->p2.p2_fast; 963 clock.p2 >= limit->p2.p2_slow; 964 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 965 unsigned int error_ppm; 966 967 clock.p = clock.p1 * clock.p2; 968 969 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 970 clock.n) << 22, refclk * clock.m1); 971 972 if (m2 > INT_MAX/clock.m1) 973 continue; 974 975 clock.m2 = m2; 976 977 chv_calc_dpll_params(refclk, &clock); 978 979 if (!intel_PLL_is_valid(dev, limit, &clock)) 980 continue; 981 982 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 983 best_error_ppm, &error_ppm)) 984 continue; 985 986 *best_clock = clock; 987 best_error_ppm = error_ppm; 988 found = true; 989 } 990 } 991 992 return found; 993 } 994 995 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 996 struct dpll *best_clock) 997 { 998 int refclk = 100000; 999 const struct intel_limit *limit = &intel_limits_bxt; 1000 1001 return chv_find_best_dpll(limit, crtc_state, 1002 target_clock, refclk, NULL, best_clock); 1003 } 1004 1005 bool intel_crtc_active(struct drm_crtc *crtc) 1006 { 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1008 1009 /* Be paranoid as we can arrive here with only partial 1010 * state retrieved from the hardware during setup. 1011 * 1012 * We can ditch the adjusted_mode.crtc_clock check as soon 1013 * as Haswell has gained clock readout/fastboot support. 1014 * 1015 * We can ditch the crtc->primary->fb check as soon as we can 1016 * properly reconstruct framebuffers. 1017 * 1018 * FIXME: The intel_crtc->active here should be switched to 1019 * crtc->state->active once we have proper CRTC states wired up 1020 * for atomic. 1021 */ 1022 return intel_crtc->active && crtc->primary->state->fb && 1023 intel_crtc->config->base.adjusted_mode.crtc_clock; 1024 } 1025 1026 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1027 enum i915_pipe pipe) 1028 { 1029 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1030 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1031 1032 return intel_crtc->config->cpu_transcoder; 1033 } 1034 1035 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1036 { 1037 struct drm_i915_private *dev_priv = to_i915(dev); 1038 i915_reg_t reg = PIPEDSL(pipe); 1039 u32 line1, line2; 1040 u32 line_mask; 1041 1042 if (IS_GEN2(dev)) 1043 line_mask = DSL_LINEMASK_GEN2; 1044 else 1045 line_mask = DSL_LINEMASK_GEN3; 1046 1047 line1 = I915_READ(reg) & line_mask; 1048 msleep(5); 1049 line2 = I915_READ(reg) & line_mask; 1050 1051 return line1 == line2; 1052 } 1053 1054 /* 1055 * intel_wait_for_pipe_off - wait for pipe to turn off 1056 * @crtc: crtc whose pipe to wait for 1057 * 1058 * After disabling a pipe, we can't wait for vblank in the usual way, 1059 * spinning on the vblank interrupt status bit, since we won't actually 1060 * see an interrupt when the pipe is disabled. 1061 * 1062 * On Gen4 and above: 1063 * wait for the pipe register state bit to turn off 1064 * 1065 * Otherwise: 1066 * wait for the display line value to settle (it usually 1067 * ends up stopping at the start of the next frame). 1068 * 1069 */ 1070 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1071 { 1072 struct drm_device *dev = crtc->base.dev; 1073 struct drm_i915_private *dev_priv = to_i915(dev); 1074 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1075 enum i915_pipe pipe = crtc->pipe; 1076 1077 if (INTEL_INFO(dev)->gen >= 4) { 1078 i915_reg_t reg = PIPECONF(cpu_transcoder); 1079 1080 /* Wait for the Pipe State to go off */ 1081 if (intel_wait_for_register(dev_priv, 1082 reg, I965_PIPECONF_ACTIVE, 0, 1083 100)) 1084 WARN(1, "pipe_off wait timed out\n"); 1085 } else { 1086 /* Wait for the display line to settle */ 1087 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1088 WARN(1, "pipe_off wait timed out\n"); 1089 } 1090 } 1091 1092 /* Only for pre-ILK configs */ 1093 void assert_pll(struct drm_i915_private *dev_priv, 1094 enum i915_pipe pipe, bool state) 1095 { 1096 u32 val; 1097 bool cur_state; 1098 1099 val = I915_READ(DPLL(pipe)); 1100 cur_state = !!(val & DPLL_VCO_ENABLE); 1101 I915_STATE_WARN(cur_state != state, 1102 "PLL state assertion failure (expected %s, current %s)\n", 1103 onoff(state), onoff(cur_state)); 1104 } 1105 1106 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1107 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1108 { 1109 u32 val; 1110 bool cur_state; 1111 1112 mutex_lock(&dev_priv->sb_lock); 1113 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1114 mutex_unlock(&dev_priv->sb_lock); 1115 1116 cur_state = val & DSI_PLL_VCO_EN; 1117 I915_STATE_WARN(cur_state != state, 1118 "DSI PLL state assertion failure (expected %s, current %s)\n", 1119 onoff(state), onoff(cur_state)); 1120 } 1121 1122 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1123 enum i915_pipe pipe, bool state) 1124 { 1125 bool cur_state; 1126 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1127 pipe); 1128 1129 if (HAS_DDI(dev_priv)) { 1130 /* DDI does not have a specific FDI_TX register */ 1131 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1132 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1133 } else { 1134 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1135 cur_state = !!(val & FDI_TX_ENABLE); 1136 } 1137 I915_STATE_WARN(cur_state != state, 1138 "FDI TX state assertion failure (expected %s, current %s)\n", 1139 onoff(state), onoff(cur_state)); 1140 } 1141 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1142 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1143 1144 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1145 enum i915_pipe pipe, bool state) 1146 { 1147 u32 val; 1148 bool cur_state; 1149 1150 val = I915_READ(FDI_RX_CTL(pipe)); 1151 cur_state = !!(val & FDI_RX_ENABLE); 1152 I915_STATE_WARN(cur_state != state, 1153 "FDI RX state assertion failure (expected %s, current %s)\n", 1154 onoff(state), onoff(cur_state)); 1155 } 1156 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1157 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1158 1159 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1160 enum i915_pipe pipe) 1161 { 1162 u32 val; 1163 1164 /* ILK FDI PLL is always enabled */ 1165 if (IS_GEN5(dev_priv)) 1166 return; 1167 1168 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1169 if (HAS_DDI(dev_priv)) 1170 return; 1171 1172 val = I915_READ(FDI_TX_CTL(pipe)); 1173 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1174 } 1175 1176 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1177 enum i915_pipe pipe, bool state) 1178 { 1179 u32 val; 1180 bool cur_state; 1181 1182 val = I915_READ(FDI_RX_CTL(pipe)); 1183 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1184 I915_STATE_WARN(cur_state != state, 1185 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1186 onoff(state), onoff(cur_state)); 1187 } 1188 1189 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1190 enum i915_pipe pipe) 1191 { 1192 struct drm_device *dev = &dev_priv->drm; 1193 i915_reg_t pp_reg; 1194 u32 val; 1195 enum i915_pipe panel_pipe = PIPE_A; 1196 bool locked = true; 1197 1198 if (WARN_ON(HAS_DDI(dev))) 1199 return; 1200 1201 if (HAS_PCH_SPLIT(dev)) { 1202 u32 port_sel; 1203 1204 pp_reg = PCH_PP_CONTROL; 1205 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1206 1207 if (port_sel == PANEL_PORT_SELECT_LVDS && 1208 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1209 panel_pipe = PIPE_B; 1210 /* XXX: else fix for eDP */ 1211 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1212 /* presumably write lock depends on pipe, not port select */ 1213 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1214 panel_pipe = pipe; 1215 } else { 1216 pp_reg = PP_CONTROL; 1217 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1218 panel_pipe = PIPE_B; 1219 } 1220 1221 val = I915_READ(pp_reg); 1222 if (!(val & PANEL_POWER_ON) || 1223 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1224 locked = false; 1225 1226 I915_STATE_WARN(panel_pipe == pipe && locked, 1227 "panel assertion failure, pipe %c regs locked\n", 1228 pipe_name(pipe)); 1229 } 1230 1231 static void assert_cursor(struct drm_i915_private *dev_priv, 1232 enum i915_pipe pipe, bool state) 1233 { 1234 struct drm_device *dev = &dev_priv->drm; 1235 bool cur_state; 1236 1237 if (IS_845G(dev) || IS_I865G(dev)) 1238 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1239 else 1240 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1241 1242 I915_STATE_WARN(cur_state != state, 1243 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1244 pipe_name(pipe), onoff(state), onoff(cur_state)); 1245 } 1246 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1247 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1248 1249 void assert_pipe(struct drm_i915_private *dev_priv, 1250 enum i915_pipe pipe, bool state) 1251 { 1252 bool cur_state; 1253 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1254 pipe); 1255 enum intel_display_power_domain power_domain; 1256 1257 /* if we need the pipe quirk it must be always on */ 1258 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1259 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1260 state = true; 1261 1262 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1263 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1264 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1265 cur_state = !!(val & PIPECONF_ENABLE); 1266 1267 intel_display_power_put(dev_priv, power_domain); 1268 } else { 1269 cur_state = false; 1270 } 1271 1272 I915_STATE_WARN(cur_state != state, 1273 "pipe %c assertion failure (expected %s, current %s)\n", 1274 pipe_name(pipe), onoff(state), onoff(cur_state)); 1275 } 1276 1277 static void assert_plane(struct drm_i915_private *dev_priv, 1278 enum plane plane, bool state) 1279 { 1280 u32 val; 1281 bool cur_state; 1282 1283 val = I915_READ(DSPCNTR(plane)); 1284 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1285 I915_STATE_WARN(cur_state != state, 1286 "plane %c assertion failure (expected %s, current %s)\n", 1287 plane_name(plane), onoff(state), onoff(cur_state)); 1288 } 1289 1290 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1291 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1292 1293 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1294 enum i915_pipe pipe) 1295 { 1296 struct drm_device *dev = &dev_priv->drm; 1297 int i; 1298 1299 /* Primary planes are fixed to pipes on gen4+ */ 1300 if (INTEL_INFO(dev)->gen >= 4) { 1301 u32 val = I915_READ(DSPCNTR(pipe)); 1302 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1303 "plane %c assertion failure, should be disabled but not\n", 1304 plane_name(pipe)); 1305 return; 1306 } 1307 1308 /* Need to check both planes against the pipe */ 1309 for_each_pipe(dev_priv, i) { 1310 u32 val = I915_READ(DSPCNTR(i)); 1311 enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1312 DISPPLANE_SEL_PIPE_SHIFT; 1313 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1314 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1315 plane_name(i), pipe_name(pipe)); 1316 } 1317 } 1318 1319 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1320 enum i915_pipe pipe) 1321 { 1322 struct drm_device *dev = &dev_priv->drm; 1323 int sprite; 1324 1325 if (INTEL_INFO(dev)->gen >= 9) { 1326 for_each_sprite(dev_priv, pipe, sprite) { 1327 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1328 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1329 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1330 sprite, pipe_name(pipe)); 1331 } 1332 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1333 for_each_sprite(dev_priv, pipe, sprite) { 1334 u32 val = I915_READ(SPCNTR(pipe, sprite)); 1335 I915_STATE_WARN(val & SP_ENABLE, 1336 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1337 sprite_name(pipe, sprite), pipe_name(pipe)); 1338 } 1339 } else if (INTEL_INFO(dev)->gen >= 7) { 1340 u32 val = I915_READ(SPRCTL(pipe)); 1341 I915_STATE_WARN(val & SPRITE_ENABLE, 1342 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1343 plane_name(pipe), pipe_name(pipe)); 1344 } else if (INTEL_INFO(dev)->gen >= 5) { 1345 u32 val = I915_READ(DVSCNTR(pipe)); 1346 I915_STATE_WARN(val & DVS_ENABLE, 1347 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1348 plane_name(pipe), pipe_name(pipe)); 1349 } 1350 } 1351 1352 static void assert_vblank_disabled(struct drm_crtc *crtc) 1353 { 1354 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1355 drm_crtc_vblank_put(crtc); 1356 } 1357 1358 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1359 enum i915_pipe pipe) 1360 { 1361 u32 val; 1362 bool enabled; 1363 1364 val = I915_READ(PCH_TRANSCONF(pipe)); 1365 enabled = !!(val & TRANS_ENABLE); 1366 I915_STATE_WARN(enabled, 1367 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1368 pipe_name(pipe)); 1369 } 1370 1371 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1372 enum i915_pipe pipe, u32 port_sel, u32 val) 1373 { 1374 if ((val & DP_PORT_EN) == 0) 1375 return false; 1376 1377 if (HAS_PCH_CPT(dev_priv)) { 1378 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1379 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1380 return false; 1381 } else if (IS_CHERRYVIEW(dev_priv)) { 1382 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1383 return false; 1384 } else { 1385 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1386 return false; 1387 } 1388 return true; 1389 } 1390 1391 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1392 enum i915_pipe pipe, u32 val) 1393 { 1394 if ((val & SDVO_ENABLE) == 0) 1395 return false; 1396 1397 if (HAS_PCH_CPT(dev_priv)) { 1398 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1399 return false; 1400 } else if (IS_CHERRYVIEW(dev_priv)) { 1401 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1402 return false; 1403 } else { 1404 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1405 return false; 1406 } 1407 return true; 1408 } 1409 1410 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1411 enum i915_pipe pipe, u32 val) 1412 { 1413 if ((val & LVDS_PORT_EN) == 0) 1414 return false; 1415 1416 if (HAS_PCH_CPT(dev_priv)) { 1417 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1418 return false; 1419 } else { 1420 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1421 return false; 1422 } 1423 return true; 1424 } 1425 1426 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1427 enum i915_pipe pipe, u32 val) 1428 { 1429 if ((val & ADPA_DAC_ENABLE) == 0) 1430 return false; 1431 if (HAS_PCH_CPT(dev_priv)) { 1432 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1433 return false; 1434 } else { 1435 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1436 return false; 1437 } 1438 return true; 1439 } 1440 1441 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1442 enum i915_pipe pipe, i915_reg_t reg, 1443 u32 port_sel) 1444 { 1445 u32 val = I915_READ(reg); 1446 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1447 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1448 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1449 1450 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0 1451 && (val & DP_PIPEB_SELECT), 1452 "IBX PCH dp port still using transcoder B\n"); 1453 } 1454 1455 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1456 enum i915_pipe pipe, i915_reg_t reg) 1457 { 1458 u32 val = I915_READ(reg); 1459 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1460 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1461 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1462 1463 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0 1464 && (val & SDVO_PIPE_B_SELECT), 1465 "IBX PCH hdmi port still using transcoder B\n"); 1466 } 1467 1468 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1469 enum i915_pipe pipe) 1470 { 1471 u32 val; 1472 1473 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1474 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1475 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1476 1477 val = I915_READ(PCH_ADPA); 1478 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1479 "PCH VGA enabled on transcoder %c, should be disabled\n", 1480 pipe_name(pipe)); 1481 1482 val = I915_READ(PCH_LVDS); 1483 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1484 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1485 pipe_name(pipe)); 1486 1487 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1488 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1489 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1490 } 1491 1492 static void _vlv_enable_pll(struct intel_crtc *crtc, 1493 const struct intel_crtc_state *pipe_config) 1494 { 1495 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1496 enum i915_pipe pipe = crtc->pipe; 1497 1498 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1499 POSTING_READ(DPLL(pipe)); 1500 udelay(150); 1501 1502 if (intel_wait_for_register(dev_priv, 1503 DPLL(pipe), 1504 DPLL_LOCK_VLV, 1505 DPLL_LOCK_VLV, 1506 1)) 1507 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1508 } 1509 1510 static void vlv_enable_pll(struct intel_crtc *crtc, 1511 const struct intel_crtc_state *pipe_config) 1512 { 1513 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1514 enum i915_pipe pipe = crtc->pipe; 1515 1516 assert_pipe_disabled(dev_priv, pipe); 1517 1518 /* PLL is protected by panel, make sure we can write it */ 1519 assert_panel_unlocked(dev_priv, pipe); 1520 1521 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1522 _vlv_enable_pll(crtc, pipe_config); 1523 1524 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1525 POSTING_READ(DPLL_MD(pipe)); 1526 } 1527 1528 1529 static void _chv_enable_pll(struct intel_crtc *crtc, 1530 const struct intel_crtc_state *pipe_config) 1531 { 1532 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1533 enum i915_pipe pipe = crtc->pipe; 1534 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1535 u32 tmp; 1536 1537 mutex_lock(&dev_priv->sb_lock); 1538 1539 /* Enable back the 10bit clock to display controller */ 1540 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1541 tmp |= DPIO_DCLKP_EN; 1542 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1543 1544 mutex_unlock(&dev_priv->sb_lock); 1545 1546 /* 1547 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1548 */ 1549 udelay(1); 1550 1551 /* Enable PLL */ 1552 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1553 1554 /* Check PLL is locked */ 1555 if (intel_wait_for_register(dev_priv, 1556 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV, 1557 1)) 1558 DRM_ERROR("PLL %d failed to lock\n", pipe); 1559 } 1560 1561 static void chv_enable_pll(struct intel_crtc *crtc, 1562 const struct intel_crtc_state *pipe_config) 1563 { 1564 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1565 enum i915_pipe pipe = crtc->pipe; 1566 1567 assert_pipe_disabled(dev_priv, pipe); 1568 1569 /* PLL is protected by panel, make sure we can write it */ 1570 assert_panel_unlocked(dev_priv, pipe); 1571 1572 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) 1573 _chv_enable_pll(crtc, pipe_config); 1574 1575 if (pipe != PIPE_A) { 1576 /* 1577 * WaPixelRepeatModeFixForC0:chv 1578 * 1579 * DPLLCMD is AWOL. Use chicken bits to propagate 1580 * the value from DPLLBMD to either pipe B or C. 1581 */ 1582 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C); 1583 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md); 1584 I915_WRITE(CBR4_VLV, 0); 1585 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md; 1586 1587 /* 1588 * DPLLB VGA mode also seems to cause problems. 1589 * We should always have it disabled. 1590 */ 1591 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0); 1592 } else { 1593 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1594 POSTING_READ(DPLL_MD(pipe)); 1595 } 1596 } 1597 1598 static int intel_num_dvo_pipes(struct drm_device *dev) 1599 { 1600 struct intel_crtc *crtc; 1601 int count = 0; 1602 1603 for_each_intel_crtc(dev, crtc) { 1604 count += crtc->base.state->active && 1605 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO); 1606 } 1607 1608 return count; 1609 } 1610 1611 static void i9xx_enable_pll(struct intel_crtc *crtc) 1612 { 1613 struct drm_device *dev = crtc->base.dev; 1614 struct drm_i915_private *dev_priv = to_i915(dev); 1615 i915_reg_t reg = DPLL(crtc->pipe); 1616 u32 dpll = crtc->config->dpll_hw_state.dpll; 1617 1618 assert_pipe_disabled(dev_priv, crtc->pipe); 1619 1620 /* PLL is protected by panel, make sure we can write it */ 1621 if (IS_MOBILE(dev) && !IS_I830(dev)) 1622 assert_panel_unlocked(dev_priv, crtc->pipe); 1623 1624 /* Enable DVO 2x clock on both PLLs if necessary */ 1625 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1626 /* 1627 * It appears to be important that we don't enable this 1628 * for the current pipe before otherwise configuring the 1629 * PLL. No idea how this should be handled if multiple 1630 * DVO outputs are enabled simultaneosly. 1631 */ 1632 dpll |= DPLL_DVO_2X_MODE; 1633 I915_WRITE(DPLL(!crtc->pipe), 1634 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1635 } 1636 1637 /* 1638 * Apparently we need to have VGA mode enabled prior to changing 1639 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1640 * dividers, even though the register value does change. 1641 */ 1642 I915_WRITE(reg, 0); 1643 1644 I915_WRITE(reg, dpll); 1645 1646 /* Wait for the clocks to stabilize. */ 1647 POSTING_READ(reg); 1648 udelay(150); 1649 1650 if (INTEL_INFO(dev)->gen >= 4) { 1651 I915_WRITE(DPLL_MD(crtc->pipe), 1652 crtc->config->dpll_hw_state.dpll_md); 1653 } else { 1654 /* The pixel multiplier can only be updated once the 1655 * DPLL is enabled and the clocks are stable. 1656 * 1657 * So write it again. 1658 */ 1659 I915_WRITE(reg, dpll); 1660 } 1661 1662 /* We do this three times for luck */ 1663 I915_WRITE(reg, dpll); 1664 POSTING_READ(reg); 1665 udelay(150); /* wait for warmup */ 1666 I915_WRITE(reg, dpll); 1667 POSTING_READ(reg); 1668 udelay(150); /* wait for warmup */ 1669 I915_WRITE(reg, dpll); 1670 POSTING_READ(reg); 1671 udelay(150); /* wait for warmup */ 1672 } 1673 1674 /** 1675 * i9xx_disable_pll - disable a PLL 1676 * @dev_priv: i915 private structure 1677 * @pipe: pipe PLL to disable 1678 * 1679 * Disable the PLL for @pipe, making sure the pipe is off first. 1680 * 1681 * Note! This is for pre-ILK only. 1682 */ 1683 static void i9xx_disable_pll(struct intel_crtc *crtc) 1684 { 1685 struct drm_device *dev = crtc->base.dev; 1686 struct drm_i915_private *dev_priv = to_i915(dev); 1687 enum i915_pipe pipe = crtc->pipe; 1688 1689 /* Disable DVO 2x clock on both PLLs if necessary */ 1690 if (IS_I830(dev) && 1691 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) && 1692 !intel_num_dvo_pipes(dev)) { 1693 I915_WRITE(DPLL(PIPE_B), 1694 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1695 I915_WRITE(DPLL(PIPE_A), 1696 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1697 } 1698 1699 /* Don't disable pipe or pipe PLLs if needed */ 1700 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1701 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1702 return; 1703 1704 /* Make sure the pipe isn't still relying on us */ 1705 assert_pipe_disabled(dev_priv, pipe); 1706 1707 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1708 POSTING_READ(DPLL(pipe)); 1709 } 1710 1711 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1712 { 1713 u32 val; 1714 1715 /* Make sure the pipe isn't still relying on us */ 1716 assert_pipe_disabled(dev_priv, pipe); 1717 1718 val = DPLL_INTEGRATED_REF_CLK_VLV | 1719 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1720 if (pipe != PIPE_A) 1721 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1722 1723 I915_WRITE(DPLL(pipe), val); 1724 POSTING_READ(DPLL(pipe)); 1725 } 1726 1727 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1728 { 1729 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1730 u32 val; 1731 1732 /* Make sure the pipe isn't still relying on us */ 1733 assert_pipe_disabled(dev_priv, pipe); 1734 1735 val = DPLL_SSC_REF_CLK_CHV | 1736 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1737 if (pipe != PIPE_A) 1738 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1739 1740 I915_WRITE(DPLL(pipe), val); 1741 POSTING_READ(DPLL(pipe)); 1742 1743 mutex_lock(&dev_priv->sb_lock); 1744 1745 /* Disable 10bit clock to display controller */ 1746 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1747 val &= ~DPIO_DCLKP_EN; 1748 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1749 1750 mutex_unlock(&dev_priv->sb_lock); 1751 } 1752 1753 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1754 struct intel_digital_port *dport, 1755 unsigned int expected_mask) 1756 { 1757 u32 port_mask; 1758 i915_reg_t dpll_reg; 1759 1760 switch (dport->port) { 1761 case PORT_B: 1762 port_mask = DPLL_PORTB_READY_MASK; 1763 dpll_reg = DPLL(0); 1764 break; 1765 case PORT_C: 1766 port_mask = DPLL_PORTC_READY_MASK; 1767 dpll_reg = DPLL(0); 1768 expected_mask <<= 4; 1769 break; 1770 case PORT_D: 1771 port_mask = DPLL_PORTD_READY_MASK; 1772 dpll_reg = DPIO_PHY_STATUS; 1773 break; 1774 default: 1775 BUG(); 1776 } 1777 1778 if (intel_wait_for_register(dev_priv, 1779 dpll_reg, port_mask, expected_mask, 1780 1000)) 1781 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1782 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1783 } 1784 1785 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1786 enum i915_pipe pipe) 1787 { 1788 struct drm_device *dev = &dev_priv->drm; 1789 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1791 i915_reg_t reg; 1792 uint32_t val, pipeconf_val; 1793 1794 /* Make sure PCH DPLL is enabled */ 1795 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll); 1796 1797 /* FDI must be feeding us bits for PCH ports */ 1798 assert_fdi_tx_enabled(dev_priv, pipe); 1799 assert_fdi_rx_enabled(dev_priv, pipe); 1800 1801 if (HAS_PCH_CPT(dev)) { 1802 /* Workaround: Set the timing override bit before enabling the 1803 * pch transcoder. */ 1804 reg = TRANS_CHICKEN2(pipe); 1805 val = I915_READ(reg); 1806 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1807 I915_WRITE(reg, val); 1808 } 1809 1810 reg = PCH_TRANSCONF(pipe); 1811 val = I915_READ(reg); 1812 pipeconf_val = I915_READ(PIPECONF(pipe)); 1813 1814 if (HAS_PCH_IBX(dev_priv)) { 1815 /* 1816 * Make the BPC in transcoder be consistent with 1817 * that in pipeconf reg. For HDMI we must use 8bpc 1818 * here for both 8bpc and 12bpc. 1819 */ 1820 val &= ~PIPECONF_BPC_MASK; 1821 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI)) 1822 val |= PIPECONF_8BPC; 1823 else 1824 val |= pipeconf_val & PIPECONF_BPC_MASK; 1825 } 1826 1827 val &= ~TRANS_INTERLACE_MASK; 1828 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1829 if (HAS_PCH_IBX(dev_priv) && 1830 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 1831 val |= TRANS_LEGACY_INTERLACED_ILK; 1832 else 1833 val |= TRANS_INTERLACED; 1834 else 1835 val |= TRANS_PROGRESSIVE; 1836 1837 I915_WRITE(reg, val | TRANS_ENABLE); 1838 if (intel_wait_for_register(dev_priv, 1839 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE, 1840 100)) 1841 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1842 } 1843 1844 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1845 enum transcoder cpu_transcoder) 1846 { 1847 u32 val, pipeconf_val; 1848 1849 /* FDI must be feeding us bits for PCH ports */ 1850 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 1851 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 1852 1853 /* Workaround: set timing override bit. */ 1854 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1855 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1856 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1857 1858 val = TRANS_ENABLE; 1859 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 1860 1861 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 1862 PIPECONF_INTERLACED_ILK) 1863 val |= TRANS_INTERLACED; 1864 else 1865 val |= TRANS_PROGRESSIVE; 1866 1867 I915_WRITE(LPT_TRANSCONF, val); 1868 if (intel_wait_for_register(dev_priv, 1869 LPT_TRANSCONF, 1870 TRANS_STATE_ENABLE, 1871 TRANS_STATE_ENABLE, 1872 100)) 1873 DRM_ERROR("Failed to enable PCH transcoder\n"); 1874 } 1875 1876 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1877 enum i915_pipe pipe) 1878 { 1879 struct drm_device *dev = &dev_priv->drm; 1880 i915_reg_t reg; 1881 uint32_t val; 1882 1883 /* FDI relies on the transcoder */ 1884 assert_fdi_tx_disabled(dev_priv, pipe); 1885 assert_fdi_rx_disabled(dev_priv, pipe); 1886 1887 /* Ports must be off as well */ 1888 assert_pch_ports_disabled(dev_priv, pipe); 1889 1890 reg = PCH_TRANSCONF(pipe); 1891 val = I915_READ(reg); 1892 val &= ~TRANS_ENABLE; 1893 I915_WRITE(reg, val); 1894 /* wait for PCH transcoder off, transcoder state */ 1895 if (intel_wait_for_register(dev_priv, 1896 reg, TRANS_STATE_ENABLE, 0, 1897 50)) 1898 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1899 1900 if (HAS_PCH_CPT(dev)) { 1901 /* Workaround: Clear the timing override chicken bit again. */ 1902 reg = TRANS_CHICKEN2(pipe); 1903 val = I915_READ(reg); 1904 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1905 I915_WRITE(reg, val); 1906 } 1907 } 1908 1909 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 1910 { 1911 u32 val; 1912 1913 val = I915_READ(LPT_TRANSCONF); 1914 val &= ~TRANS_ENABLE; 1915 I915_WRITE(LPT_TRANSCONF, val); 1916 /* wait for PCH transcoder off, transcoder state */ 1917 if (intel_wait_for_register(dev_priv, 1918 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0, 1919 50)) 1920 DRM_ERROR("Failed to disable PCH transcoder\n"); 1921 1922 /* Workaround: clear timing override bit. */ 1923 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 1924 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 1925 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 1926 } 1927 1928 /** 1929 * intel_enable_pipe - enable a pipe, asserting requirements 1930 * @crtc: crtc responsible for the pipe 1931 * 1932 * Enable @crtc's pipe, making sure that various hardware specific requirements 1933 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 1934 */ 1935 static void intel_enable_pipe(struct intel_crtc *crtc) 1936 { 1937 struct drm_device *dev = crtc->base.dev; 1938 struct drm_i915_private *dev_priv = to_i915(dev); 1939 enum i915_pipe pipe = crtc->pipe; 1940 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1941 enum i915_pipe pch_transcoder; 1942 i915_reg_t reg; 1943 u32 val; 1944 1945 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 1946 1947 assert_planes_disabled(dev_priv, pipe); 1948 assert_cursor_disabled(dev_priv, pipe); 1949 assert_sprites_disabled(dev_priv, pipe); 1950 1951 if (HAS_PCH_LPT(dev_priv)) 1952 pch_transcoder = TRANSCODER_A; 1953 else 1954 pch_transcoder = pipe; 1955 1956 /* 1957 * A pipe without a PLL won't actually be able to drive bits from 1958 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 1959 * need the check. 1960 */ 1961 if (HAS_GMCH_DISPLAY(dev_priv)) 1962 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI)) 1963 assert_dsi_pll_enabled(dev_priv); 1964 else 1965 assert_pll_enabled(dev_priv, pipe); 1966 else { 1967 if (crtc->config->has_pch_encoder) { 1968 /* if driving the PCH, we need FDI enabled */ 1969 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 1970 assert_fdi_tx_pll_enabled(dev_priv, 1971 (enum i915_pipe) cpu_transcoder); 1972 } 1973 /* FIXME: assert CPU port conditions for SNB+ */ 1974 } 1975 1976 reg = PIPECONF(cpu_transcoder); 1977 val = I915_READ(reg); 1978 if (val & PIPECONF_ENABLE) { 1979 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1980 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 1981 return; 1982 } 1983 1984 I915_WRITE(reg, val | PIPECONF_ENABLE); 1985 POSTING_READ(reg); 1986 1987 /* 1988 * Until the pipe starts DSL will read as 0, which would cause 1989 * an apparent vblank timestamp jump, which messes up also the 1990 * frame count when it's derived from the timestamps. So let's 1991 * wait for the pipe to start properly before we call 1992 * drm_crtc_vblank_on() 1993 */ 1994 if (dev->max_vblank_count == 0 && 1995 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 1996 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 1997 } 1998 1999 /** 2000 * intel_disable_pipe - disable a pipe, asserting requirements 2001 * @crtc: crtc whose pipes is to be disabled 2002 * 2003 * Disable the pipe of @crtc, making sure that various hardware 2004 * specific requirements are met, if applicable, e.g. plane 2005 * disabled, panel fitter off, etc. 2006 * 2007 * Will wait until the pipe has shut down before returning. 2008 */ 2009 static void intel_disable_pipe(struct intel_crtc *crtc) 2010 { 2011 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 2012 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2013 enum i915_pipe pipe = crtc->pipe; 2014 i915_reg_t reg; 2015 u32 val; 2016 2017 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2018 2019 /* 2020 * Make sure planes won't keep trying to pump pixels to us, 2021 * or we might hang the display. 2022 */ 2023 assert_planes_disabled(dev_priv, pipe); 2024 assert_cursor_disabled(dev_priv, pipe); 2025 assert_sprites_disabled(dev_priv, pipe); 2026 2027 reg = PIPECONF(cpu_transcoder); 2028 val = I915_READ(reg); 2029 if ((val & PIPECONF_ENABLE) == 0) 2030 return; 2031 2032 /* 2033 * Double wide has implications for planes 2034 * so best keep it disabled when not needed. 2035 */ 2036 if (crtc->config->double_wide) 2037 val &= ~PIPECONF_DOUBLE_WIDE; 2038 2039 /* Don't disable pipe or pipe PLLs if needed */ 2040 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2041 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2042 val &= ~PIPECONF_ENABLE; 2043 2044 I915_WRITE(reg, val); 2045 if ((val & PIPECONF_ENABLE) == 0) 2046 intel_wait_for_pipe_off(crtc); 2047 } 2048 2049 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 2050 { 2051 return IS_GEN2(dev_priv) ? 2048 : 4096; 2052 } 2053 2054 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv, 2055 uint64_t fb_modifier, unsigned int cpp) 2056 { 2057 switch (fb_modifier) { 2058 case DRM_FORMAT_MOD_NONE: 2059 return cpp; 2060 case I915_FORMAT_MOD_X_TILED: 2061 if (IS_GEN2(dev_priv)) 2062 return 128; 2063 else 2064 return 512; 2065 case I915_FORMAT_MOD_Y_TILED: 2066 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2067 return 128; 2068 else 2069 return 512; 2070 case I915_FORMAT_MOD_Yf_TILED: 2071 switch (cpp) { 2072 case 1: 2073 return 64; 2074 case 2: 2075 case 4: 2076 return 128; 2077 case 8: 2078 case 16: 2079 return 256; 2080 default: 2081 MISSING_CASE(cpp); 2082 return cpp; 2083 } 2084 break; 2085 default: 2086 MISSING_CASE(fb_modifier); 2087 return cpp; 2088 } 2089 } 2090 2091 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, 2092 uint64_t fb_modifier, unsigned int cpp) 2093 { 2094 if (fb_modifier == DRM_FORMAT_MOD_NONE) 2095 return 1; 2096 else 2097 return intel_tile_size(dev_priv) / 2098 intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2099 } 2100 2101 /* Return the tile dimensions in pixel units */ 2102 static void intel_tile_dims(const struct drm_i915_private *dev_priv, 2103 unsigned int *tile_width, 2104 unsigned int *tile_height, 2105 uint64_t fb_modifier, 2106 unsigned int cpp) 2107 { 2108 unsigned int tile_width_bytes = 2109 intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2110 2111 *tile_width = tile_width_bytes / cpp; 2112 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes; 2113 } 2114 2115 unsigned int 2116 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2117 uint32_t pixel_format, uint64_t fb_modifier) 2118 { 2119 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0); 2120 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp); 2121 2122 return ALIGN(height, tile_height); 2123 } 2124 2125 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) 2126 { 2127 unsigned int size = 0; 2128 int i; 2129 2130 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) 2131 size += rot_info->plane[i].width * rot_info->plane[i].height; 2132 2133 return size; 2134 } 2135 2136 static void 2137 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, 2138 struct drm_framebuffer *fb, 2139 unsigned int rotation) 2140 { 2141 if (intel_rotation_90_or_270(rotation)) { 2142 *view = i915_ggtt_view_rotated; 2143 view->params.rotated = to_intel_framebuffer(fb)->rot_info; 2144 } else { 2145 *view = i915_ggtt_view_normal; 2146 } 2147 } 2148 2149 static void 2150 intel_fill_fb_info(struct drm_i915_private *dev_priv, 2151 struct drm_framebuffer *fb) 2152 { 2153 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info; 2154 unsigned int tile_size, tile_width, tile_height, cpp; 2155 2156 tile_size = intel_tile_size(dev_priv); 2157 2158 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2159 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2160 fb->modifier[0], cpp); 2161 2162 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp); 2163 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height); 2164 2165 if (info->pixel_format == DRM_FORMAT_NV12) { 2166 cpp = drm_format_plane_cpp(fb->pixel_format, 1); 2167 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2168 fb->modifier[1], cpp); 2169 2170 info->uv_offset = fb->offsets[1]; 2171 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp); 2172 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height); 2173 } 2174 } 2175 2176 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2177 { 2178 if (INTEL_INFO(dev_priv)->gen >= 9) 2179 return 256 * 1024; 2180 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || 2181 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2182 return 128 * 1024; 2183 else if (INTEL_INFO(dev_priv)->gen >= 4) 2184 return 4 * 1024; 2185 else 2186 return 0; 2187 } 2188 2189 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv, 2190 uint64_t fb_modifier) 2191 { 2192 switch (fb_modifier) { 2193 case DRM_FORMAT_MOD_NONE: 2194 return intel_linear_alignment(dev_priv); 2195 case I915_FORMAT_MOD_X_TILED: 2196 if (INTEL_INFO(dev_priv)->gen >= 9) 2197 return 256 * 1024; 2198 return 0; 2199 case I915_FORMAT_MOD_Y_TILED: 2200 case I915_FORMAT_MOD_Yf_TILED: 2201 return 1 * 1024 * 1024; 2202 default: 2203 MISSING_CASE(fb_modifier); 2204 return 0; 2205 } 2206 } 2207 2208 int 2209 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2210 unsigned int rotation) 2211 { 2212 struct drm_device *dev = fb->dev; 2213 struct drm_i915_private *dev_priv = to_i915(dev); 2214 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2215 struct i915_ggtt_view view; 2216 u32 alignment; 2217 int ret; 2218 2219 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2220 2221 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); 2222 2223 intel_fill_fb_ggtt_view(&view, fb, rotation); 2224 2225 /* Note that the w/a also requires 64 PTE of padding following the 2226 * bo. We currently fill all unused PTE with the shadow page and so 2227 * we should always have valid PTE following the scanout preventing 2228 * the VT-d warning. 2229 */ 2230 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024) 2231 alignment = 256 * 1024; 2232 2233 /* 2234 * Global gtt pte registers are special registers which actually forward 2235 * writes to a chunk of system memory. Which means that there is no risk 2236 * that the register values disappear as soon as we call 2237 * intel_runtime_pm_put(), so it is correct to wrap only the 2238 * pin/unpin/fence and not more. 2239 */ 2240 intel_runtime_pm_get(dev_priv); 2241 2242 ret = i915_gem_object_pin_to_display_plane(obj, alignment, 2243 &view); 2244 if (ret) 2245 goto err_pm; 2246 2247 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2248 * fence, whereas 965+ only requires a fence if using 2249 * framebuffer compression. For simplicity, we always install 2250 * a fence as the cost is not that onerous. 2251 */ 2252 if (view.type == I915_GGTT_VIEW_NORMAL) { 2253 ret = i915_gem_object_get_fence(obj); 2254 if (ret == -EDEADLK) { 2255 /* 2256 * -EDEADLK means there are no free fences 2257 * no pending flips. 2258 * 2259 * This is propagated to atomic, but it uses 2260 * -EDEADLK to force a locking recovery, so 2261 * change the returned error to -EBUSY. 2262 */ 2263 ret = -EBUSY; 2264 goto err_unpin; 2265 } else if (ret) 2266 goto err_unpin; 2267 2268 i915_gem_object_pin_fence(obj); 2269 } 2270 2271 intel_runtime_pm_put(dev_priv); 2272 return 0; 2273 2274 err_unpin: 2275 i915_gem_object_unpin_from_display_plane(obj, &view); 2276 err_pm: 2277 intel_runtime_pm_put(dev_priv); 2278 return ret; 2279 } 2280 2281 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2282 { 2283 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2284 struct i915_ggtt_view view; 2285 2286 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2287 2288 intel_fill_fb_ggtt_view(&view, fb, rotation); 2289 2290 if (view.type == I915_GGTT_VIEW_NORMAL) 2291 i915_gem_object_unpin_fence(obj); 2292 2293 i915_gem_object_unpin_from_display_plane(obj, &view); 2294 } 2295 2296 /* 2297 * Adjust the tile offset by moving the difference into 2298 * the x/y offsets. 2299 * 2300 * Input tile dimensions and pitch must already be 2301 * rotated to match x and y, and in pixel units. 2302 */ 2303 static u32 intel_adjust_tile_offset(int *x, int *y, 2304 unsigned int tile_width, 2305 unsigned int tile_height, 2306 unsigned int tile_size, 2307 unsigned int pitch_tiles, 2308 u32 old_offset, 2309 u32 new_offset) 2310 { 2311 unsigned int tiles; 2312 2313 WARN_ON(old_offset & (tile_size - 1)); 2314 WARN_ON(new_offset & (tile_size - 1)); 2315 WARN_ON(new_offset > old_offset); 2316 2317 tiles = (old_offset - new_offset) / tile_size; 2318 2319 *y += tiles / pitch_tiles * tile_height; 2320 *x += tiles % pitch_tiles * tile_width; 2321 2322 return new_offset; 2323 } 2324 2325 /* 2326 * Computes the linear offset to the base tile and adjusts 2327 * x, y. bytes per pixel is assumed to be a power-of-two. 2328 * 2329 * In the 90/270 rotated case, x and y are assumed 2330 * to be already rotated to match the rotated GTT view, and 2331 * pitch is the tile_height aligned framebuffer height. 2332 */ 2333 u32 intel_compute_tile_offset(int *x, int *y, 2334 const struct drm_framebuffer *fb, int plane, 2335 unsigned int pitch, 2336 unsigned int rotation) 2337 { 2338 const struct drm_i915_private *dev_priv = to_i915(fb->dev); 2339 uint64_t fb_modifier = fb->modifier[plane]; 2340 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); 2341 u32 offset, offset_aligned, alignment; 2342 2343 alignment = intel_surf_alignment(dev_priv, fb_modifier); 2344 if (alignment) 2345 alignment--; 2346 2347 if (fb_modifier != DRM_FORMAT_MOD_NONE) { 2348 unsigned int tile_size, tile_width, tile_height; 2349 unsigned int tile_rows, tiles, pitch_tiles; 2350 2351 tile_size = intel_tile_size(dev_priv); 2352 intel_tile_dims(dev_priv, &tile_width, &tile_height, 2353 fb_modifier, cpp); 2354 2355 if (intel_rotation_90_or_270(rotation)) { 2356 pitch_tiles = pitch / tile_height; 2357 swap(tile_width, tile_height); 2358 } else { 2359 pitch_tiles = pitch / (tile_width * cpp); 2360 } 2361 2362 tile_rows = *y / tile_height; 2363 *y %= tile_height; 2364 2365 tiles = *x / tile_width; 2366 *x %= tile_width; 2367 2368 offset = (tile_rows * pitch_tiles + tiles) * tile_size; 2369 offset_aligned = offset & ~alignment; 2370 2371 intel_adjust_tile_offset(x, y, tile_width, tile_height, 2372 tile_size, pitch_tiles, 2373 offset, offset_aligned); 2374 } else { 2375 offset = *y * pitch + *x * cpp; 2376 offset_aligned = offset & ~alignment; 2377 2378 *y = (offset & alignment) / pitch; 2379 *x = ((offset & alignment) - *y * pitch) / cpp; 2380 } 2381 2382 return offset_aligned; 2383 } 2384 2385 static int i9xx_format_to_fourcc(int format) 2386 { 2387 switch (format) { 2388 case DISPPLANE_8BPP: 2389 return DRM_FORMAT_C8; 2390 case DISPPLANE_BGRX555: 2391 return DRM_FORMAT_XRGB1555; 2392 case DISPPLANE_BGRX565: 2393 return DRM_FORMAT_RGB565; 2394 default: 2395 case DISPPLANE_BGRX888: 2396 return DRM_FORMAT_XRGB8888; 2397 case DISPPLANE_RGBX888: 2398 return DRM_FORMAT_XBGR8888; 2399 case DISPPLANE_BGRX101010: 2400 return DRM_FORMAT_XRGB2101010; 2401 case DISPPLANE_RGBX101010: 2402 return DRM_FORMAT_XBGR2101010; 2403 } 2404 } 2405 2406 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2407 { 2408 switch (format) { 2409 case PLANE_CTL_FORMAT_RGB_565: 2410 return DRM_FORMAT_RGB565; 2411 default: 2412 case PLANE_CTL_FORMAT_XRGB_8888: 2413 if (rgb_order) { 2414 if (alpha) 2415 return DRM_FORMAT_ABGR8888; 2416 else 2417 return DRM_FORMAT_XBGR8888; 2418 } else { 2419 if (alpha) 2420 return DRM_FORMAT_ARGB8888; 2421 else 2422 return DRM_FORMAT_XRGB8888; 2423 } 2424 case PLANE_CTL_FORMAT_XRGB_2101010: 2425 if (rgb_order) 2426 return DRM_FORMAT_XBGR2101010; 2427 else 2428 return DRM_FORMAT_XRGB2101010; 2429 } 2430 } 2431 2432 static bool 2433 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2434 struct intel_initial_plane_config *plane_config) 2435 { 2436 struct drm_device *dev = crtc->base.dev; 2437 struct drm_i915_private *dev_priv = to_i915(dev); 2438 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2439 struct drm_i915_gem_object *obj = NULL; 2440 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2441 struct drm_framebuffer *fb = &plane_config->fb->base; 2442 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2443 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2444 PAGE_SIZE); 2445 2446 size_aligned -= base_aligned; 2447 2448 if (plane_config->size == 0) 2449 return false; 2450 2451 /* If the FB is too big, just don't use it since fbdev is not very 2452 * important and we should probably use that space with FBC or other 2453 * features. */ 2454 if (size_aligned * 2 > ggtt->stolen_usable_size) 2455 return false; 2456 2457 mutex_lock(&dev->struct_mutex); 2458 2459 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2460 base_aligned, 2461 base_aligned, 2462 size_aligned); 2463 if (!obj) { 2464 mutex_unlock(&dev->struct_mutex); 2465 return false; 2466 } 2467 2468 obj->tiling_mode = plane_config->tiling; 2469 if (obj->tiling_mode == I915_TILING_X) 2470 obj->stride = fb->pitches[0]; 2471 2472 mode_cmd.pixel_format = fb->pixel_format; 2473 mode_cmd.width = fb->width; 2474 mode_cmd.height = fb->height; 2475 mode_cmd.pitches[0] = fb->pitches[0]; 2476 mode_cmd.modifier[0] = fb->modifier[0]; 2477 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2478 2479 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2480 &mode_cmd, obj)) { 2481 DRM_DEBUG_KMS("intel fb init failed\n"); 2482 goto out_unref_obj; 2483 } 2484 2485 mutex_unlock(&dev->struct_mutex); 2486 2487 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2488 return true; 2489 2490 out_unref_obj: 2491 drm_gem_object_unreference(&obj->base); 2492 mutex_unlock(&dev->struct_mutex); 2493 return false; 2494 } 2495 2496 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2497 static void 2498 update_state_fb(struct drm_plane *plane) 2499 { 2500 if (plane->fb == plane->state->fb) 2501 return; 2502 2503 if (plane->state->fb) 2504 drm_framebuffer_unreference(plane->state->fb); 2505 plane->state->fb = plane->fb; 2506 if (plane->state->fb) 2507 drm_framebuffer_reference(plane->state->fb); 2508 } 2509 2510 static void 2511 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2512 struct intel_initial_plane_config *plane_config) 2513 { 2514 struct drm_device *dev = intel_crtc->base.dev; 2515 struct drm_i915_private *dev_priv = to_i915(dev); 2516 struct drm_crtc *c; 2517 struct intel_crtc *i; 2518 struct drm_i915_gem_object *obj; 2519 struct drm_plane *primary = intel_crtc->base.primary; 2520 struct drm_plane_state *plane_state = primary->state; 2521 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2522 struct intel_plane *intel_plane = to_intel_plane(primary); 2523 struct intel_plane_state *intel_state = 2524 to_intel_plane_state(plane_state); 2525 struct drm_framebuffer *fb; 2526 2527 if (!plane_config->fb) 2528 return; 2529 2530 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2531 fb = &plane_config->fb->base; 2532 goto valid_fb; 2533 } 2534 2535 kfree(plane_config->fb); 2536 2537 /* 2538 * Failed to alloc the obj, check to see if we should share 2539 * an fb with another CRTC instead 2540 */ 2541 for_each_crtc(dev, c) { 2542 i = to_intel_crtc(c); 2543 2544 if (c == &intel_crtc->base) 2545 continue; 2546 2547 if (!i->active) 2548 continue; 2549 2550 fb = c->primary->fb; 2551 if (!fb) 2552 continue; 2553 2554 obj = intel_fb_obj(fb); 2555 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2556 drm_framebuffer_reference(fb); 2557 goto valid_fb; 2558 } 2559 } 2560 2561 /* 2562 * We've failed to reconstruct the BIOS FB. Current display state 2563 * indicates that the primary plane is visible, but has a NULL FB, 2564 * which will lead to problems later if we don't fix it up. The 2565 * simplest solution is to just disable the primary plane now and 2566 * pretend the BIOS never had it enabled. 2567 */ 2568 to_intel_plane_state(plane_state)->visible = false; 2569 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); 2570 intel_pre_disable_primary_noatomic(&intel_crtc->base); 2571 intel_plane->disable_plane(primary, &intel_crtc->base); 2572 2573 return; 2574 2575 valid_fb: 2576 plane_state->src_x = 0; 2577 plane_state->src_y = 0; 2578 plane_state->src_w = fb->width << 16; 2579 plane_state->src_h = fb->height << 16; 2580 2581 plane_state->crtc_x = 0; 2582 plane_state->crtc_y = 0; 2583 plane_state->crtc_w = fb->width; 2584 plane_state->crtc_h = fb->height; 2585 2586 intel_state->src.x1 = plane_state->src_x; 2587 intel_state->src.y1 = plane_state->src_y; 2588 intel_state->src.x2 = plane_state->src_x + plane_state->src_w; 2589 intel_state->src.y2 = plane_state->src_y + plane_state->src_h; 2590 intel_state->dst.x1 = plane_state->crtc_x; 2591 intel_state->dst.y1 = plane_state->crtc_y; 2592 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; 2593 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; 2594 2595 obj = intel_fb_obj(fb); 2596 if (obj->tiling_mode != I915_TILING_NONE) 2597 dev_priv->preserve_bios_swizzle = true; 2598 2599 drm_framebuffer_reference(fb); 2600 primary->fb = primary->state->fb = fb; 2601 primary->crtc = primary->state->crtc = &intel_crtc->base; 2602 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); 2603 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; 2604 } 2605 2606 static void i9xx_update_primary_plane(struct drm_plane *primary, 2607 const struct intel_crtc_state *crtc_state, 2608 const struct intel_plane_state *plane_state) 2609 { 2610 struct drm_device *dev = primary->dev; 2611 struct drm_i915_private *dev_priv = to_i915(dev); 2612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2613 struct drm_framebuffer *fb = plane_state->base.fb; 2614 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2615 int plane = intel_crtc->plane; 2616 u32 linear_offset; 2617 u32 dspcntr; 2618 i915_reg_t reg = DSPCNTR(plane); 2619 unsigned int rotation = plane_state->base.rotation; 2620 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2621 int x = plane_state->src.x1 >> 16; 2622 int y = plane_state->src.y1 >> 16; 2623 2624 dspcntr = DISPPLANE_GAMMA_ENABLE; 2625 2626 dspcntr |= DISPLAY_PLANE_ENABLE; 2627 2628 if (INTEL_INFO(dev)->gen < 4) { 2629 if (intel_crtc->pipe == PIPE_B) 2630 dspcntr |= DISPPLANE_SEL_PIPE_B; 2631 2632 /* pipesrc and dspsize control the size that is scaled from, 2633 * which should always be the user's requested size. 2634 */ 2635 I915_WRITE(DSPSIZE(plane), 2636 ((crtc_state->pipe_src_h - 1) << 16) | 2637 (crtc_state->pipe_src_w - 1)); 2638 I915_WRITE(DSPPOS(plane), 0); 2639 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2640 I915_WRITE(PRIMSIZE(plane), 2641 ((crtc_state->pipe_src_h - 1) << 16) | 2642 (crtc_state->pipe_src_w - 1)); 2643 I915_WRITE(PRIMPOS(plane), 0); 2644 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2645 } 2646 2647 switch (fb->pixel_format) { 2648 case DRM_FORMAT_C8: 2649 dspcntr |= DISPPLANE_8BPP; 2650 break; 2651 case DRM_FORMAT_XRGB1555: 2652 dspcntr |= DISPPLANE_BGRX555; 2653 break; 2654 case DRM_FORMAT_RGB565: 2655 dspcntr |= DISPPLANE_BGRX565; 2656 break; 2657 case DRM_FORMAT_XRGB8888: 2658 dspcntr |= DISPPLANE_BGRX888; 2659 break; 2660 case DRM_FORMAT_XBGR8888: 2661 dspcntr |= DISPPLANE_RGBX888; 2662 break; 2663 case DRM_FORMAT_XRGB2101010: 2664 dspcntr |= DISPPLANE_BGRX101010; 2665 break; 2666 case DRM_FORMAT_XBGR2101010: 2667 dspcntr |= DISPPLANE_RGBX101010; 2668 break; 2669 default: 2670 BUG(); 2671 } 2672 2673 if (INTEL_INFO(dev)->gen >= 4 && 2674 obj->tiling_mode != I915_TILING_NONE) 2675 dspcntr |= DISPPLANE_TILED; 2676 2677 if (IS_G4X(dev)) 2678 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2679 2680 linear_offset = y * fb->pitches[0] + x * cpp; 2681 2682 if (INTEL_INFO(dev)->gen >= 4) { 2683 intel_crtc->dspaddr_offset = 2684 intel_compute_tile_offset(&x, &y, fb, 0, 2685 fb->pitches[0], rotation); 2686 linear_offset -= intel_crtc->dspaddr_offset; 2687 } else { 2688 intel_crtc->dspaddr_offset = linear_offset; 2689 } 2690 2691 if (rotation == DRM_ROTATE_180) { 2692 dspcntr |= DISPPLANE_ROTATE_180; 2693 2694 x += (crtc_state->pipe_src_w - 1); 2695 y += (crtc_state->pipe_src_h - 1); 2696 2697 /* Finding the last pixel of the last line of the display 2698 data and adding to linear_offset*/ 2699 linear_offset += 2700 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2701 (crtc_state->pipe_src_w - 1) * cpp; 2702 } 2703 2704 intel_crtc->adjusted_x = x; 2705 intel_crtc->adjusted_y = y; 2706 2707 I915_WRITE(reg, dspcntr); 2708 2709 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2710 if (INTEL_INFO(dev)->gen >= 4) { 2711 I915_WRITE(DSPSURF(plane), 2712 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2713 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2714 I915_WRITE(DSPLINOFF(plane), linear_offset); 2715 } else 2716 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2717 POSTING_READ(reg); 2718 } 2719 2720 static void i9xx_disable_primary_plane(struct drm_plane *primary, 2721 struct drm_crtc *crtc) 2722 { 2723 struct drm_device *dev = crtc->dev; 2724 struct drm_i915_private *dev_priv = to_i915(dev); 2725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2726 int plane = intel_crtc->plane; 2727 2728 I915_WRITE(DSPCNTR(plane), 0); 2729 if (INTEL_INFO(dev_priv)->gen >= 4) 2730 I915_WRITE(DSPSURF(plane), 0); 2731 else 2732 I915_WRITE(DSPADDR(plane), 0); 2733 POSTING_READ(DSPCNTR(plane)); 2734 } 2735 2736 static void ironlake_update_primary_plane(struct drm_plane *primary, 2737 const struct intel_crtc_state *crtc_state, 2738 const struct intel_plane_state *plane_state) 2739 { 2740 struct drm_device *dev = primary->dev; 2741 struct drm_i915_private *dev_priv = to_i915(dev); 2742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2743 struct drm_framebuffer *fb = plane_state->base.fb; 2744 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2745 int plane = intel_crtc->plane; 2746 u32 linear_offset; 2747 u32 dspcntr; 2748 i915_reg_t reg = DSPCNTR(plane); 2749 unsigned int rotation = plane_state->base.rotation; 2750 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2751 int x = plane_state->src.x1 >> 16; 2752 int y = plane_state->src.y1 >> 16; 2753 2754 dspcntr = DISPPLANE_GAMMA_ENABLE; 2755 dspcntr |= DISPLAY_PLANE_ENABLE; 2756 2757 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2758 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2759 2760 switch (fb->pixel_format) { 2761 case DRM_FORMAT_C8: 2762 dspcntr |= DISPPLANE_8BPP; 2763 break; 2764 case DRM_FORMAT_RGB565: 2765 dspcntr |= DISPPLANE_BGRX565; 2766 break; 2767 case DRM_FORMAT_XRGB8888: 2768 dspcntr |= DISPPLANE_BGRX888; 2769 break; 2770 case DRM_FORMAT_XBGR8888: 2771 dspcntr |= DISPPLANE_RGBX888; 2772 break; 2773 case DRM_FORMAT_XRGB2101010: 2774 dspcntr |= DISPPLANE_BGRX101010; 2775 break; 2776 case DRM_FORMAT_XBGR2101010: 2777 dspcntr |= DISPPLANE_RGBX101010; 2778 break; 2779 default: 2780 BUG(); 2781 } 2782 2783 if (obj->tiling_mode != I915_TILING_NONE) 2784 dspcntr |= DISPPLANE_TILED; 2785 2786 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2787 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2788 2789 linear_offset = y * fb->pitches[0] + x * cpp; 2790 intel_crtc->dspaddr_offset = 2791 intel_compute_tile_offset(&x, &y, fb, 0, 2792 fb->pitches[0], rotation); 2793 linear_offset -= intel_crtc->dspaddr_offset; 2794 if (rotation == DRM_ROTATE_180) { 2795 dspcntr |= DISPPLANE_ROTATE_180; 2796 2797 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2798 x += (crtc_state->pipe_src_w - 1); 2799 y += (crtc_state->pipe_src_h - 1); 2800 2801 /* Finding the last pixel of the last line of the display 2802 data and adding to linear_offset*/ 2803 linear_offset += 2804 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2805 (crtc_state->pipe_src_w - 1) * cpp; 2806 } 2807 } 2808 2809 intel_crtc->adjusted_x = x; 2810 intel_crtc->adjusted_y = y; 2811 2812 I915_WRITE(reg, dspcntr); 2813 2814 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2815 I915_WRITE(DSPSURF(plane), 2816 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2817 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2818 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2819 } else { 2820 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2821 I915_WRITE(DSPLINOFF(plane), linear_offset); 2822 } 2823 POSTING_READ(reg); 2824 } 2825 2826 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, 2827 uint64_t fb_modifier, uint32_t pixel_format) 2828 { 2829 if (fb_modifier == DRM_FORMAT_MOD_NONE) { 2830 return 64; 2831 } else { 2832 int cpp = drm_format_plane_cpp(pixel_format, 0); 2833 2834 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp); 2835 } 2836 } 2837 2838 u32 intel_plane_obj_offset(struct intel_plane *intel_plane, 2839 struct drm_i915_gem_object *obj, 2840 unsigned int plane) 2841 { 2842 struct i915_ggtt_view view; 2843 struct i915_vma *vma; 2844 u64 offset; 2845 2846 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, 2847 intel_plane->base.state->rotation); 2848 2849 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2850 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2851 view.type)) 2852 return -1; 2853 2854 offset = vma->node.start; 2855 2856 if (plane == 1) { 2857 offset += vma->ggtt_view.params.rotated.uv_start_page * 2858 PAGE_SIZE; 2859 } 2860 2861 WARN_ON(upper_32_bits(offset)); 2862 2863 return lower_32_bits(offset); 2864 } 2865 2866 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2867 { 2868 struct drm_device *dev = intel_crtc->base.dev; 2869 struct drm_i915_private *dev_priv = to_i915(dev); 2870 2871 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2872 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2873 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2874 } 2875 2876 /* 2877 * This function detaches (aka. unbinds) unused scalers in hardware 2878 */ 2879 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 2880 { 2881 struct intel_crtc_scaler_state *scaler_state; 2882 int i; 2883 2884 scaler_state = &intel_crtc->config->scaler_state; 2885 2886 /* loop through and disable scalers that aren't in use */ 2887 for (i = 0; i < intel_crtc->num_scalers; i++) { 2888 if (!scaler_state->scalers[i].in_use) 2889 skl_detach_scaler(intel_crtc, i); 2890 } 2891 } 2892 2893 u32 skl_plane_ctl_format(uint32_t pixel_format) 2894 { 2895 switch (pixel_format) { 2896 case DRM_FORMAT_C8: 2897 return PLANE_CTL_FORMAT_INDEXED; 2898 case DRM_FORMAT_RGB565: 2899 return PLANE_CTL_FORMAT_RGB_565; 2900 case DRM_FORMAT_XBGR8888: 2901 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 2902 case DRM_FORMAT_XRGB8888: 2903 return PLANE_CTL_FORMAT_XRGB_8888; 2904 /* 2905 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 2906 * to be already pre-multiplied. We need to add a knob (or a different 2907 * DRM_FORMAT) for user-space to configure that. 2908 */ 2909 case DRM_FORMAT_ABGR8888: 2910 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 2911 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2912 case DRM_FORMAT_ARGB8888: 2913 return PLANE_CTL_FORMAT_XRGB_8888 | 2914 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2915 case DRM_FORMAT_XRGB2101010: 2916 return PLANE_CTL_FORMAT_XRGB_2101010; 2917 case DRM_FORMAT_XBGR2101010: 2918 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 2919 case DRM_FORMAT_YUYV: 2920 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 2921 case DRM_FORMAT_YVYU: 2922 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 2923 case DRM_FORMAT_UYVY: 2924 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 2925 case DRM_FORMAT_VYUY: 2926 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 2927 default: 2928 MISSING_CASE(pixel_format); 2929 } 2930 2931 return 0; 2932 } 2933 2934 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 2935 { 2936 switch (fb_modifier) { 2937 case DRM_FORMAT_MOD_NONE: 2938 break; 2939 case I915_FORMAT_MOD_X_TILED: 2940 return PLANE_CTL_TILED_X; 2941 case I915_FORMAT_MOD_Y_TILED: 2942 return PLANE_CTL_TILED_Y; 2943 case I915_FORMAT_MOD_Yf_TILED: 2944 return PLANE_CTL_TILED_YF; 2945 default: 2946 MISSING_CASE(fb_modifier); 2947 } 2948 2949 return 0; 2950 } 2951 2952 u32 skl_plane_ctl_rotation(unsigned int rotation) 2953 { 2954 switch (rotation) { 2955 case DRM_ROTATE_0: 2956 break; 2957 /* 2958 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 2959 * while i915 HW rotation is clockwise, thats why this swapping. 2960 */ 2961 case DRM_ROTATE_90: 2962 return PLANE_CTL_ROTATE_270; 2963 case DRM_ROTATE_180: 2964 return PLANE_CTL_ROTATE_180; 2965 case DRM_ROTATE_270: 2966 return PLANE_CTL_ROTATE_90; 2967 default: 2968 MISSING_CASE(rotation); 2969 } 2970 2971 return 0; 2972 } 2973 2974 static void skylake_update_primary_plane(struct drm_plane *plane, 2975 const struct intel_crtc_state *crtc_state, 2976 const struct intel_plane_state *plane_state) 2977 { 2978 struct drm_device *dev = plane->dev; 2979 struct drm_i915_private *dev_priv = to_i915(dev); 2980 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2981 struct drm_framebuffer *fb = plane_state->base.fb; 2982 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2983 const struct skl_wm_values *wm = &dev_priv->wm.skl_results; 2984 int pipe = intel_crtc->pipe; 2985 u32 plane_ctl, stride_div, stride; 2986 u32 tile_height, plane_offset, plane_size; 2987 unsigned int rotation = plane_state->base.rotation; 2988 int x_offset, y_offset; 2989 u32 surf_addr; 2990 int scaler_id = plane_state->scaler_id; 2991 int src_x = plane_state->src.x1 >> 16; 2992 int src_y = plane_state->src.y1 >> 16; 2993 int src_w = drm_rect_width(&plane_state->src) >> 16; 2994 int src_h = drm_rect_height(&plane_state->src) >> 16; 2995 int dst_x = plane_state->dst.x1; 2996 int dst_y = plane_state->dst.y1; 2997 int dst_w = drm_rect_width(&plane_state->dst); 2998 int dst_h = drm_rect_height(&plane_state->dst); 2999 3000 plane_ctl = PLANE_CTL_ENABLE | 3001 PLANE_CTL_PIPE_GAMMA_ENABLE | 3002 PLANE_CTL_PIPE_CSC_ENABLE; 3003 3004 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3005 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3006 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3007 plane_ctl |= skl_plane_ctl_rotation(rotation); 3008 3009 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 3010 fb->pixel_format); 3011 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); 3012 3013 WARN_ON(drm_rect_width(&plane_state->src) == 0); 3014 3015 if (intel_rotation_90_or_270(rotation)) { 3016 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3017 3018 /* stride = Surface height in tiles */ 3019 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); 3020 stride = DIV_ROUND_UP(fb->height, tile_height); 3021 x_offset = stride * tile_height - src_y - src_h; 3022 y_offset = src_x; 3023 plane_size = (src_w - 1) << 16 | (src_h - 1); 3024 } else { 3025 stride = fb->pitches[0] / stride_div; 3026 x_offset = src_x; 3027 y_offset = src_y; 3028 plane_size = (src_h - 1) << 16 | (src_w - 1); 3029 } 3030 plane_offset = y_offset << 16 | x_offset; 3031 3032 intel_crtc->adjusted_x = x_offset; 3033 intel_crtc->adjusted_y = y_offset; 3034 3035 if (wm->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) 3036 skl_write_plane_wm(intel_crtc, wm, 0); 3037 3038 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3039 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3040 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3041 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3042 3043 if (scaler_id >= 0) { 3044 uint32_t ps_ctrl = 0; 3045 3046 WARN_ON(!dst_w || !dst_h); 3047 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3048 crtc_state->scaler_state.scalers[scaler_id].mode; 3049 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3050 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3051 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3052 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3053 I915_WRITE(PLANE_POS(pipe, 0), 0); 3054 } else { 3055 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3056 } 3057 3058 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3059 3060 POSTING_READ(PLANE_SURF(pipe, 0)); 3061 } 3062 3063 static void skylake_disable_primary_plane(struct drm_plane *primary, 3064 struct drm_crtc *crtc) 3065 { 3066 struct drm_device *dev = crtc->dev; 3067 struct drm_i915_private *dev_priv = to_i915(dev); 3068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3069 int pipe = intel_crtc->pipe; 3070 3071 /* 3072 * We only populate skl_results on watermark updates, and if the 3073 * plane's visiblity isn't actually changing neither is its watermarks. 3074 */ 3075 if (!to_intel_plane_state(crtc->primary->state)->visible) 3076 skl_write_plane_wm(intel_crtc, &dev_priv->wm.skl_results, 0); 3077 3078 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3079 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3080 POSTING_READ(PLANE_SURF(pipe, 0)); 3081 } 3082 3083 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3084 static int 3085 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3086 int x, int y, enum mode_set_atomic state) 3087 { 3088 /* Support for kgdboc is disabled, this needs a major rework. */ 3089 DRM_ERROR("legacy panic handler not supported any more.\n"); 3090 3091 return -ENODEV; 3092 } 3093 3094 static void intel_complete_page_flips(struct drm_i915_private *dev_priv) 3095 { 3096 struct intel_crtc *crtc; 3097 3098 for_each_intel_crtc(&dev_priv->drm, crtc) 3099 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3100 } 3101 3102 static void intel_update_primary_planes(struct drm_device *dev) 3103 { 3104 struct drm_crtc *crtc; 3105 3106 for_each_crtc(dev, crtc) { 3107 struct intel_plane *plane = to_intel_plane(crtc->primary); 3108 struct intel_plane_state *plane_state = 3109 to_intel_plane_state(plane->base.state); 3110 3111 if (plane_state->visible) 3112 plane->update_plane(&plane->base, 3113 to_intel_crtc_state(crtc->state), 3114 plane_state); 3115 } 3116 } 3117 3118 static int 3119 __intel_display_resume(struct drm_device *dev, 3120 struct drm_atomic_state *state) 3121 { 3122 struct drm_crtc_state *crtc_state; 3123 struct drm_crtc *crtc; 3124 int i, ret; 3125 3126 intel_modeset_setup_hw_state(dev); 3127 i915_redisable_vga(dev); 3128 3129 if (!state) 3130 return 0; 3131 3132 for_each_crtc_in_state(state, crtc, crtc_state, i) { 3133 /* 3134 * Force recalculation even if we restore 3135 * current state. With fast modeset this may not result 3136 * in a modeset when the state is compatible. 3137 */ 3138 crtc_state->mode_changed = true; 3139 } 3140 3141 /* ignore any reset values/BIOS leftovers in the WM registers */ 3142 to_intel_atomic_state(state)->skip_intermediate_wm = true; 3143 3144 ret = drm_atomic_commit(state); 3145 3146 WARN_ON(ret == -EDEADLK); 3147 return ret; 3148 } 3149 3150 void intel_prepare_reset(struct drm_i915_private *dev_priv) 3151 { 3152 struct drm_device *dev = &dev_priv->drm; 3153 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3154 struct drm_atomic_state *state; 3155 int ret; 3156 3157 /* no reset support for gen2 */ 3158 if (IS_GEN2(dev_priv)) 3159 return; 3160 3161 /* 3162 * Need mode_config.mutex so that we don't 3163 * trample ongoing ->detect() and whatnot. 3164 */ 3165 mutex_lock(&dev->mode_config.mutex); 3166 drm_modeset_acquire_init(ctx, 0); 3167 while (1) { 3168 ret = drm_modeset_lock_all_ctx(dev, ctx); 3169 if (ret != -EDEADLK) 3170 break; 3171 3172 drm_modeset_backoff(ctx); 3173 } 3174 3175 /* reset doesn't touch the display, but flips might get nuked anyway, */ 3176 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 3177 return; 3178 3179 /* 3180 * Disabling the crtcs gracefully seems nicer. Also the 3181 * g33 docs say we should at least disable all the planes. 3182 */ 3183 state = drm_atomic_helper_duplicate_state(dev, ctx); 3184 if (IS_ERR(state)) { 3185 ret = PTR_ERR(state); 3186 state = NULL; 3187 DRM_ERROR("Duplicating state failed with %i\n", ret); 3188 goto err; 3189 } 3190 3191 ret = drm_atomic_helper_disable_all(dev, ctx); 3192 if (ret) { 3193 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 3194 goto err; 3195 } 3196 3197 dev_priv->modeset_restore_state = state; 3198 state->acquire_ctx = ctx; 3199 return; 3200 3201 err: 3202 drm_atomic_state_free(state); 3203 } 3204 3205 void intel_finish_reset(struct drm_i915_private *dev_priv) 3206 { 3207 struct drm_device *dev = &dev_priv->drm; 3208 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx; 3209 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3210 int ret; 3211 3212 /* 3213 * Flips in the rings will be nuked by the reset, 3214 * so complete all pending flips so that user space 3215 * will get its events and not get stuck. 3216 */ 3217 intel_complete_page_flips(dev_priv); 3218 3219 /* no reset support for gen2 */ 3220 if (IS_GEN2(dev_priv)) 3221 return; 3222 3223 dev_priv->modeset_restore_state = NULL; 3224 3225 /* reset doesn't touch the display */ 3226 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) { 3227 /* 3228 * Flips in the rings have been nuked by the reset, 3229 * so update the base address of all primary 3230 * planes to the the last fb to make sure we're 3231 * showing the correct fb after a reset. 3232 * 3233 * FIXME: Atomic will make this obsolete since we won't schedule 3234 * CS-based flips (which might get lost in gpu resets) any more. 3235 */ 3236 intel_update_primary_planes(dev); 3237 } else { 3238 /* 3239 * The display has been reset as well, 3240 * so need a full re-initialization. 3241 */ 3242 intel_runtime_pm_disable_interrupts(dev_priv); 3243 intel_runtime_pm_enable_interrupts(dev_priv); 3244 3245 intel_modeset_init_hw(dev); 3246 3247 spin_lock_irq(&dev_priv->irq_lock); 3248 if (dev_priv->display.hpd_irq_setup) 3249 dev_priv->display.hpd_irq_setup(dev_priv); 3250 spin_unlock_irq(&dev_priv->irq_lock); 3251 3252 ret = __intel_display_resume(dev, state); 3253 if (ret) 3254 DRM_ERROR("Restoring old state failed with %i\n", ret); 3255 3256 intel_hpd_init(dev_priv); 3257 } 3258 3259 drm_modeset_drop_locks(ctx); 3260 drm_modeset_acquire_fini(ctx); 3261 mutex_unlock(&dev->mode_config.mutex); 3262 } 3263 3264 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3265 { 3266 struct drm_device *dev = crtc->dev; 3267 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3268 unsigned reset_counter; 3269 bool pending; 3270 3271 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error); 3272 if (intel_crtc->reset_counter != reset_counter) 3273 return false; 3274 3275 spin_lock_irq(&dev->event_lock); 3276 pending = to_intel_crtc(crtc)->flip_work != NULL; 3277 spin_unlock_irq(&dev->event_lock); 3278 3279 return pending; 3280 } 3281 3282 static void intel_update_pipe_config(struct intel_crtc *crtc, 3283 struct intel_crtc_state *old_crtc_state) 3284 { 3285 struct drm_device *dev = crtc->base.dev; 3286 struct drm_i915_private *dev_priv = to_i915(dev); 3287 struct intel_crtc_state *pipe_config = 3288 to_intel_crtc_state(crtc->base.state); 3289 3290 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3291 crtc->base.mode = crtc->base.state->mode; 3292 3293 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", 3294 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, 3295 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3296 3297 /* 3298 * Update pipe size and adjust fitter if needed: the reason for this is 3299 * that in compute_mode_changes we check the native mode (not the pfit 3300 * mode) to see if we can flip rather than do a full mode set. In the 3301 * fastboot case, we'll flip, but if we don't update the pipesrc and 3302 * pfit state, we'll end up with a big fb scanned out into the wrong 3303 * sized surface. 3304 */ 3305 3306 I915_WRITE(PIPESRC(crtc->pipe), 3307 ((pipe_config->pipe_src_w - 1) << 16) | 3308 (pipe_config->pipe_src_h - 1)); 3309 3310 /* on skylake this is done by detaching scalers */ 3311 if (INTEL_INFO(dev)->gen >= 9) { 3312 skl_detach_scalers(crtc); 3313 3314 if (pipe_config->pch_pfit.enabled) 3315 skylake_pfit_enable(crtc); 3316 } else if (HAS_PCH_SPLIT(dev)) { 3317 if (pipe_config->pch_pfit.enabled) 3318 ironlake_pfit_enable(crtc); 3319 else if (old_crtc_state->pch_pfit.enabled) 3320 ironlake_pfit_disable(crtc, true); 3321 } 3322 } 3323 3324 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3325 { 3326 struct drm_device *dev = crtc->dev; 3327 struct drm_i915_private *dev_priv = to_i915(dev); 3328 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3329 int pipe = intel_crtc->pipe; 3330 i915_reg_t reg; 3331 u32 temp; 3332 3333 /* enable normal train */ 3334 reg = FDI_TX_CTL(pipe); 3335 temp = I915_READ(reg); 3336 if (IS_IVYBRIDGE(dev)) { 3337 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3338 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3339 } else { 3340 temp &= ~FDI_LINK_TRAIN_NONE; 3341 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3342 } 3343 I915_WRITE(reg, temp); 3344 3345 reg = FDI_RX_CTL(pipe); 3346 temp = I915_READ(reg); 3347 if (HAS_PCH_CPT(dev)) { 3348 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3349 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3350 } else { 3351 temp &= ~FDI_LINK_TRAIN_NONE; 3352 temp |= FDI_LINK_TRAIN_NONE; 3353 } 3354 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3355 3356 /* wait one idle pattern time */ 3357 POSTING_READ(reg); 3358 udelay(1000); 3359 3360 /* IVB wants error correction enabled */ 3361 if (IS_IVYBRIDGE(dev)) 3362 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3363 FDI_FE_ERRC_ENABLE); 3364 } 3365 3366 /* The FDI link training functions for ILK/Ibexpeak. */ 3367 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3368 { 3369 struct drm_device *dev = crtc->dev; 3370 struct drm_i915_private *dev_priv = to_i915(dev); 3371 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3372 int pipe = intel_crtc->pipe; 3373 i915_reg_t reg; 3374 u32 temp, tries; 3375 3376 /* FDI needs bits from pipe first */ 3377 assert_pipe_enabled(dev_priv, pipe); 3378 3379 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3380 for train result */ 3381 reg = FDI_RX_IMR(pipe); 3382 temp = I915_READ(reg); 3383 temp &= ~FDI_RX_SYMBOL_LOCK; 3384 temp &= ~FDI_RX_BIT_LOCK; 3385 I915_WRITE(reg, temp); 3386 I915_READ(reg); 3387 udelay(150); 3388 3389 /* enable CPU FDI TX and PCH FDI RX */ 3390 reg = FDI_TX_CTL(pipe); 3391 temp = I915_READ(reg); 3392 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3393 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3394 temp &= ~FDI_LINK_TRAIN_NONE; 3395 temp |= FDI_LINK_TRAIN_PATTERN_1; 3396 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3397 3398 reg = FDI_RX_CTL(pipe); 3399 temp = I915_READ(reg); 3400 temp &= ~FDI_LINK_TRAIN_NONE; 3401 temp |= FDI_LINK_TRAIN_PATTERN_1; 3402 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3403 3404 POSTING_READ(reg); 3405 udelay(150); 3406 3407 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3408 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3409 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3410 FDI_RX_PHASE_SYNC_POINTER_EN); 3411 3412 reg = FDI_RX_IIR(pipe); 3413 for (tries = 0; tries < 5; tries++) { 3414 temp = I915_READ(reg); 3415 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3416 3417 if ((temp & FDI_RX_BIT_LOCK)) { 3418 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3419 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3420 break; 3421 } 3422 } 3423 if (tries == 5) 3424 DRM_ERROR("FDI train 1 fail!\n"); 3425 3426 /* Train 2 */ 3427 reg = FDI_TX_CTL(pipe); 3428 temp = I915_READ(reg); 3429 temp &= ~FDI_LINK_TRAIN_NONE; 3430 temp |= FDI_LINK_TRAIN_PATTERN_2; 3431 I915_WRITE(reg, temp); 3432 3433 reg = FDI_RX_CTL(pipe); 3434 temp = I915_READ(reg); 3435 temp &= ~FDI_LINK_TRAIN_NONE; 3436 temp |= FDI_LINK_TRAIN_PATTERN_2; 3437 I915_WRITE(reg, temp); 3438 3439 POSTING_READ(reg); 3440 udelay(150); 3441 3442 reg = FDI_RX_IIR(pipe); 3443 for (tries = 0; tries < 5; tries++) { 3444 temp = I915_READ(reg); 3445 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3446 3447 if (temp & FDI_RX_SYMBOL_LOCK) { 3448 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3449 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3450 break; 3451 } 3452 } 3453 if (tries == 5) 3454 DRM_ERROR("FDI train 2 fail!\n"); 3455 3456 DRM_DEBUG_KMS("FDI train done\n"); 3457 3458 } 3459 3460 static const int snb_b_fdi_train_param[] = { 3461 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3462 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3463 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3464 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3465 }; 3466 3467 /* The FDI link training functions for SNB/Cougarpoint. */ 3468 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3469 { 3470 struct drm_device *dev = crtc->dev; 3471 struct drm_i915_private *dev_priv = to_i915(dev); 3472 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3473 int pipe = intel_crtc->pipe; 3474 i915_reg_t reg; 3475 u32 temp, i, retry; 3476 3477 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3478 for train result */ 3479 reg = FDI_RX_IMR(pipe); 3480 temp = I915_READ(reg); 3481 temp &= ~FDI_RX_SYMBOL_LOCK; 3482 temp &= ~FDI_RX_BIT_LOCK; 3483 I915_WRITE(reg, temp); 3484 3485 POSTING_READ(reg); 3486 udelay(150); 3487 3488 /* enable CPU FDI TX and PCH FDI RX */ 3489 reg = FDI_TX_CTL(pipe); 3490 temp = I915_READ(reg); 3491 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3492 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3493 temp &= ~FDI_LINK_TRAIN_NONE; 3494 temp |= FDI_LINK_TRAIN_PATTERN_1; 3495 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3496 /* SNB-B */ 3497 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3498 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3499 3500 I915_WRITE(FDI_RX_MISC(pipe), 3501 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3502 3503 reg = FDI_RX_CTL(pipe); 3504 temp = I915_READ(reg); 3505 if (HAS_PCH_CPT(dev)) { 3506 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3507 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3508 } else { 3509 temp &= ~FDI_LINK_TRAIN_NONE; 3510 temp |= FDI_LINK_TRAIN_PATTERN_1; 3511 } 3512 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3513 3514 POSTING_READ(reg); 3515 udelay(150); 3516 3517 for (i = 0; i < 4; i++) { 3518 reg = FDI_TX_CTL(pipe); 3519 temp = I915_READ(reg); 3520 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3521 temp |= snb_b_fdi_train_param[i]; 3522 I915_WRITE(reg, temp); 3523 3524 POSTING_READ(reg); 3525 udelay(500); 3526 3527 for (retry = 0; retry < 5; retry++) { 3528 reg = FDI_RX_IIR(pipe); 3529 temp = I915_READ(reg); 3530 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3531 if (temp & FDI_RX_BIT_LOCK) { 3532 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3533 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3534 break; 3535 } 3536 udelay(50); 3537 } 3538 if (retry < 5) 3539 break; 3540 } 3541 if (i == 4) 3542 DRM_ERROR("FDI train 1 fail!\n"); 3543 3544 /* Train 2 */ 3545 reg = FDI_TX_CTL(pipe); 3546 temp = I915_READ(reg); 3547 temp &= ~FDI_LINK_TRAIN_NONE; 3548 temp |= FDI_LINK_TRAIN_PATTERN_2; 3549 if (IS_GEN6(dev)) { 3550 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3551 /* SNB-B */ 3552 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3553 } 3554 I915_WRITE(reg, temp); 3555 3556 reg = FDI_RX_CTL(pipe); 3557 temp = I915_READ(reg); 3558 if (HAS_PCH_CPT(dev)) { 3559 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3560 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3561 } else { 3562 temp &= ~FDI_LINK_TRAIN_NONE; 3563 temp |= FDI_LINK_TRAIN_PATTERN_2; 3564 } 3565 I915_WRITE(reg, temp); 3566 3567 POSTING_READ(reg); 3568 udelay(150); 3569 3570 for (i = 0; i < 4; i++) { 3571 reg = FDI_TX_CTL(pipe); 3572 temp = I915_READ(reg); 3573 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3574 temp |= snb_b_fdi_train_param[i]; 3575 I915_WRITE(reg, temp); 3576 3577 POSTING_READ(reg); 3578 udelay(500); 3579 3580 for (retry = 0; retry < 5; retry++) { 3581 reg = FDI_RX_IIR(pipe); 3582 temp = I915_READ(reg); 3583 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3584 if (temp & FDI_RX_SYMBOL_LOCK) { 3585 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3586 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3587 break; 3588 } 3589 udelay(50); 3590 } 3591 if (retry < 5) 3592 break; 3593 } 3594 if (i == 4) 3595 DRM_ERROR("FDI train 2 fail!\n"); 3596 3597 DRM_DEBUG_KMS("FDI train done.\n"); 3598 } 3599 3600 /* Manual link training for Ivy Bridge A0 parts */ 3601 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3602 { 3603 struct drm_device *dev = crtc->dev; 3604 struct drm_i915_private *dev_priv = to_i915(dev); 3605 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3606 int pipe = intel_crtc->pipe; 3607 i915_reg_t reg; 3608 u32 temp, i, j; 3609 3610 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3611 for train result */ 3612 reg = FDI_RX_IMR(pipe); 3613 temp = I915_READ(reg); 3614 temp &= ~FDI_RX_SYMBOL_LOCK; 3615 temp &= ~FDI_RX_BIT_LOCK; 3616 I915_WRITE(reg, temp); 3617 3618 POSTING_READ(reg); 3619 udelay(150); 3620 3621 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3622 I915_READ(FDI_RX_IIR(pipe))); 3623 3624 /* Try each vswing and preemphasis setting twice before moving on */ 3625 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3626 /* disable first in case we need to retry */ 3627 reg = FDI_TX_CTL(pipe); 3628 temp = I915_READ(reg); 3629 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3630 temp &= ~FDI_TX_ENABLE; 3631 I915_WRITE(reg, temp); 3632 3633 reg = FDI_RX_CTL(pipe); 3634 temp = I915_READ(reg); 3635 temp &= ~FDI_LINK_TRAIN_AUTO; 3636 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3637 temp &= ~FDI_RX_ENABLE; 3638 I915_WRITE(reg, temp); 3639 3640 /* enable CPU FDI TX and PCH FDI RX */ 3641 reg = FDI_TX_CTL(pipe); 3642 temp = I915_READ(reg); 3643 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3644 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3645 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3646 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3647 temp |= snb_b_fdi_train_param[j/2]; 3648 temp |= FDI_COMPOSITE_SYNC; 3649 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3650 3651 I915_WRITE(FDI_RX_MISC(pipe), 3652 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3653 3654 reg = FDI_RX_CTL(pipe); 3655 temp = I915_READ(reg); 3656 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3657 temp |= FDI_COMPOSITE_SYNC; 3658 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3659 3660 POSTING_READ(reg); 3661 udelay(1); /* should be 0.5us */ 3662 3663 for (i = 0; i < 4; i++) { 3664 reg = FDI_RX_IIR(pipe); 3665 temp = I915_READ(reg); 3666 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3667 3668 if (temp & FDI_RX_BIT_LOCK || 3669 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3670 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3671 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3672 i); 3673 break; 3674 } 3675 udelay(1); /* should be 0.5us */ 3676 } 3677 if (i == 4) { 3678 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3679 continue; 3680 } 3681 3682 /* Train 2 */ 3683 reg = FDI_TX_CTL(pipe); 3684 temp = I915_READ(reg); 3685 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3686 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3687 I915_WRITE(reg, temp); 3688 3689 reg = FDI_RX_CTL(pipe); 3690 temp = I915_READ(reg); 3691 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3692 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3693 I915_WRITE(reg, temp); 3694 3695 POSTING_READ(reg); 3696 udelay(2); /* should be 1.5us */ 3697 3698 for (i = 0; i < 4; i++) { 3699 reg = FDI_RX_IIR(pipe); 3700 temp = I915_READ(reg); 3701 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3702 3703 if (temp & FDI_RX_SYMBOL_LOCK || 3704 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3705 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3706 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3707 i); 3708 goto train_done; 3709 } 3710 udelay(2); /* should be 1.5us */ 3711 } 3712 if (i == 4) 3713 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3714 } 3715 3716 train_done: 3717 DRM_DEBUG_KMS("FDI train done.\n"); 3718 } 3719 3720 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3721 { 3722 struct drm_device *dev = intel_crtc->base.dev; 3723 struct drm_i915_private *dev_priv = to_i915(dev); 3724 int pipe = intel_crtc->pipe; 3725 i915_reg_t reg; 3726 u32 temp; 3727 3728 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3729 reg = FDI_RX_CTL(pipe); 3730 temp = I915_READ(reg); 3731 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3732 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3733 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3734 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3735 3736 POSTING_READ(reg); 3737 udelay(200); 3738 3739 /* Switch from Rawclk to PCDclk */ 3740 temp = I915_READ(reg); 3741 I915_WRITE(reg, temp | FDI_PCDCLK); 3742 3743 POSTING_READ(reg); 3744 udelay(200); 3745 3746 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3747 reg = FDI_TX_CTL(pipe); 3748 temp = I915_READ(reg); 3749 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3750 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3751 3752 POSTING_READ(reg); 3753 udelay(100); 3754 } 3755 } 3756 3757 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3758 { 3759 struct drm_device *dev = intel_crtc->base.dev; 3760 struct drm_i915_private *dev_priv = to_i915(dev); 3761 int pipe = intel_crtc->pipe; 3762 i915_reg_t reg; 3763 u32 temp; 3764 3765 /* Switch from PCDclk to Rawclk */ 3766 reg = FDI_RX_CTL(pipe); 3767 temp = I915_READ(reg); 3768 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3769 3770 /* Disable CPU FDI TX PLL */ 3771 reg = FDI_TX_CTL(pipe); 3772 temp = I915_READ(reg); 3773 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3774 3775 POSTING_READ(reg); 3776 udelay(100); 3777 3778 reg = FDI_RX_CTL(pipe); 3779 temp = I915_READ(reg); 3780 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3781 3782 /* Wait for the clocks to turn off. */ 3783 POSTING_READ(reg); 3784 udelay(100); 3785 } 3786 3787 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3788 { 3789 struct drm_device *dev = crtc->dev; 3790 struct drm_i915_private *dev_priv = to_i915(dev); 3791 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3792 int pipe = intel_crtc->pipe; 3793 i915_reg_t reg; 3794 u32 temp; 3795 3796 /* disable CPU FDI tx and PCH FDI rx */ 3797 reg = FDI_TX_CTL(pipe); 3798 temp = I915_READ(reg); 3799 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3800 POSTING_READ(reg); 3801 3802 reg = FDI_RX_CTL(pipe); 3803 temp = I915_READ(reg); 3804 temp &= ~(0x7 << 16); 3805 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3806 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3807 3808 POSTING_READ(reg); 3809 udelay(100); 3810 3811 /* Ironlake workaround, disable clock pointer after downing FDI */ 3812 if (HAS_PCH_IBX(dev)) 3813 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3814 3815 /* still set train pattern 1 */ 3816 reg = FDI_TX_CTL(pipe); 3817 temp = I915_READ(reg); 3818 temp &= ~FDI_LINK_TRAIN_NONE; 3819 temp |= FDI_LINK_TRAIN_PATTERN_1; 3820 I915_WRITE(reg, temp); 3821 3822 reg = FDI_RX_CTL(pipe); 3823 temp = I915_READ(reg); 3824 if (HAS_PCH_CPT(dev)) { 3825 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3826 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3827 } else { 3828 temp &= ~FDI_LINK_TRAIN_NONE; 3829 temp |= FDI_LINK_TRAIN_PATTERN_1; 3830 } 3831 /* BPC in FDI rx is consistent with that in PIPECONF */ 3832 temp &= ~(0x07 << 16); 3833 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3834 I915_WRITE(reg, temp); 3835 3836 POSTING_READ(reg); 3837 udelay(100); 3838 } 3839 3840 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3841 { 3842 struct intel_crtc *crtc; 3843 3844 /* Note that we don't need to be called with mode_config.lock here 3845 * as our list of CRTC objects is static for the lifetime of the 3846 * device and so cannot disappear as we iterate. Similarly, we can 3847 * happily treat the predicates as racy, atomic checks as userspace 3848 * cannot claim and pin a new fb without at least acquring the 3849 * struct_mutex and so serialising with us. 3850 */ 3851 for_each_intel_crtc(dev, crtc) { 3852 if (atomic_read(&crtc->unpin_work_count) == 0) 3853 continue; 3854 3855 if (crtc->flip_work) 3856 intel_wait_for_vblank(dev, crtc->pipe); 3857 3858 return true; 3859 } 3860 3861 return false; 3862 } 3863 3864 static void page_flip_completed(struct intel_crtc *intel_crtc) 3865 { 3866 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3867 struct intel_flip_work *work = intel_crtc->flip_work; 3868 3869 intel_crtc->flip_work = NULL; 3870 3871 if (work->event) 3872 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3873 3874 drm_crtc_vblank_put(&intel_crtc->base); 3875 3876 wake_up_all(&dev_priv->pending_flip_queue); 3877 queue_work(dev_priv->wq, &work->unpin_work); 3878 3879 trace_i915_flip_complete(intel_crtc->plane, 3880 work->pending_flip_obj); 3881 } 3882 3883 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3884 { 3885 struct drm_device *dev = crtc->dev; 3886 struct drm_i915_private *dev_priv = to_i915(dev); 3887 long ret; 3888 3889 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3890 3891 ret = wait_event_interruptible_timeout( 3892 dev_priv->pending_flip_queue, 3893 !intel_crtc_has_pending_flip(crtc), 3894 60*HZ); 3895 3896 if (ret < 0) 3897 return ret; 3898 3899 if (ret == 0) { 3900 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3901 struct intel_flip_work *work; 3902 3903 spin_lock_irq(&dev->event_lock); 3904 work = intel_crtc->flip_work; 3905 if (work && !is_mmio_work(work)) { 3906 WARN_ONCE(1, "Removing stuck page flip\n"); 3907 page_flip_completed(intel_crtc); 3908 } 3909 spin_unlock_irq(&dev->event_lock); 3910 } 3911 3912 return 0; 3913 } 3914 3915 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 3916 { 3917 u32 temp; 3918 3919 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3920 3921 mutex_lock(&dev_priv->sb_lock); 3922 3923 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3924 temp |= SBI_SSCCTL_DISABLE; 3925 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3926 3927 mutex_unlock(&dev_priv->sb_lock); 3928 } 3929 3930 /* Program iCLKIP clock to the desired frequency */ 3931 static void lpt_program_iclkip(struct drm_crtc *crtc) 3932 { 3933 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 3934 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3935 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3936 u32 temp; 3937 3938 lpt_disable_iclkip(dev_priv); 3939 3940 /* The iCLK virtual clock root frequency is in MHz, 3941 * but the adjusted_mode->crtc_clock in in KHz. To get the 3942 * divisors, it is necessary to divide one by another, so we 3943 * convert the virtual clock precision to KHz here for higher 3944 * precision. 3945 */ 3946 for (auxdiv = 0; auxdiv < 2; auxdiv++) { 3947 u32 iclk_virtual_root_freq = 172800 * 1000; 3948 u32 iclk_pi_range = 64; 3949 u32 desired_divisor; 3950 3951 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 3952 clock << auxdiv); 3953 divsel = (desired_divisor / iclk_pi_range) - 2; 3954 phaseinc = desired_divisor % iclk_pi_range; 3955 3956 /* 3957 * Near 20MHz is a corner case which is 3958 * out of range for the 7-bit divisor 3959 */ 3960 if (divsel <= 0x7f) 3961 break; 3962 } 3963 3964 /* This should not happen with any sane values */ 3965 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3966 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3967 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3968 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3969 3970 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3971 clock, 3972 auxdiv, 3973 divsel, 3974 phasedir, 3975 phaseinc); 3976 3977 mutex_lock(&dev_priv->sb_lock); 3978 3979 /* Program SSCDIVINTPHASE6 */ 3980 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3981 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3982 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3983 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3984 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3985 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3986 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3987 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3988 3989 /* Program SSCAUXDIV */ 3990 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3991 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3992 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3993 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3994 3995 /* Enable modulator and associated divider */ 3996 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3997 temp &= ~SBI_SSCCTL_DISABLE; 3998 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3999 4000 mutex_unlock(&dev_priv->sb_lock); 4001 4002 /* Wait for initialization time */ 4003 udelay(24); 4004 4005 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4006 } 4007 4008 int lpt_get_iclkip(struct drm_i915_private *dev_priv) 4009 { 4010 u32 divsel, phaseinc, auxdiv; 4011 u32 iclk_virtual_root_freq = 172800 * 1000; 4012 u32 iclk_pi_range = 64; 4013 u32 desired_divisor; 4014 u32 temp; 4015 4016 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0) 4017 return 0; 4018 4019 mutex_lock(&dev_priv->sb_lock); 4020 4021 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4022 if (temp & SBI_SSCCTL_DISABLE) { 4023 mutex_unlock(&dev_priv->sb_lock); 4024 return 0; 4025 } 4026 4027 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4028 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >> 4029 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT; 4030 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >> 4031 SBI_SSCDIVINTPHASE_INCVAL_SHIFT; 4032 4033 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4034 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >> 4035 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT; 4036 4037 mutex_unlock(&dev_priv->sb_lock); 4038 4039 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc; 4040 4041 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq, 4042 desired_divisor << auxdiv); 4043 } 4044 4045 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4046 enum i915_pipe pch_transcoder) 4047 { 4048 struct drm_device *dev = crtc->base.dev; 4049 struct drm_i915_private *dev_priv = to_i915(dev); 4050 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4051 4052 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4053 I915_READ(HTOTAL(cpu_transcoder))); 4054 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4055 I915_READ(HBLANK(cpu_transcoder))); 4056 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4057 I915_READ(HSYNC(cpu_transcoder))); 4058 4059 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4060 I915_READ(VTOTAL(cpu_transcoder))); 4061 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4062 I915_READ(VBLANK(cpu_transcoder))); 4063 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4064 I915_READ(VSYNC(cpu_transcoder))); 4065 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4066 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4067 } 4068 4069 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4070 { 4071 struct drm_i915_private *dev_priv = to_i915(dev); 4072 uint32_t temp; 4073 4074 temp = I915_READ(SOUTH_CHICKEN1); 4075 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4076 return; 4077 4078 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4079 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4080 4081 temp &= ~FDI_BC_BIFURCATION_SELECT; 4082 if (enable) 4083 temp |= FDI_BC_BIFURCATION_SELECT; 4084 4085 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4086 I915_WRITE(SOUTH_CHICKEN1, temp); 4087 POSTING_READ(SOUTH_CHICKEN1); 4088 } 4089 4090 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4091 { 4092 struct drm_device *dev = intel_crtc->base.dev; 4093 4094 switch (intel_crtc->pipe) { 4095 case PIPE_A: 4096 break; 4097 case PIPE_B: 4098 if (intel_crtc->config->fdi_lanes > 2) 4099 cpt_set_fdi_bc_bifurcation(dev, false); 4100 else 4101 cpt_set_fdi_bc_bifurcation(dev, true); 4102 4103 break; 4104 case PIPE_C: 4105 cpt_set_fdi_bc_bifurcation(dev, true); 4106 4107 break; 4108 default: 4109 BUG(); 4110 } 4111 } 4112 4113 /* Return which DP Port should be selected for Transcoder DP control */ 4114 static enum port 4115 intel_trans_dp_port_sel(struct drm_crtc *crtc) 4116 { 4117 struct drm_device *dev = crtc->dev; 4118 struct intel_encoder *encoder; 4119 4120 for_each_encoder_on_crtc(dev, crtc, encoder) { 4121 if (encoder->type == INTEL_OUTPUT_DP || 4122 encoder->type == INTEL_OUTPUT_EDP) 4123 return enc_to_dig_port(&encoder->base)->port; 4124 } 4125 4126 return -1; 4127 } 4128 4129 /* 4130 * Enable PCH resources required for PCH ports: 4131 * - PCH PLLs 4132 * - FDI training & RX/TX 4133 * - update transcoder timings 4134 * - DP transcoding bits 4135 * - transcoder 4136 */ 4137 static void ironlake_pch_enable(struct drm_crtc *crtc) 4138 { 4139 struct drm_device *dev = crtc->dev; 4140 struct drm_i915_private *dev_priv = to_i915(dev); 4141 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4142 int pipe = intel_crtc->pipe; 4143 u32 temp; 4144 4145 assert_pch_transcoder_disabled(dev_priv, pipe); 4146 4147 if (IS_IVYBRIDGE(dev)) 4148 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4149 4150 /* Write the TU size bits before fdi link training, so that error 4151 * detection works. */ 4152 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4153 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4154 4155 /* For PCH output, training FDI link */ 4156 dev_priv->display.fdi_link_train(crtc); 4157 4158 /* We need to program the right clock selection before writing the pixel 4159 * mutliplier into the DPLL. */ 4160 if (HAS_PCH_CPT(dev)) { 4161 u32 sel; 4162 4163 temp = I915_READ(PCH_DPLL_SEL); 4164 temp |= TRANS_DPLL_ENABLE(pipe); 4165 sel = TRANS_DPLLB_SEL(pipe); 4166 if (intel_crtc->config->shared_dpll == 4167 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B)) 4168 temp |= sel; 4169 else 4170 temp &= ~sel; 4171 I915_WRITE(PCH_DPLL_SEL, temp); 4172 } 4173 4174 /* XXX: pch pll's can be enabled any time before we enable the PCH 4175 * transcoder, and we actually should do this to not upset any PCH 4176 * transcoder that already use the clock when we share it. 4177 * 4178 * Note that enable_shared_dpll tries to do the right thing, but 4179 * get_shared_dpll unconditionally resets the pll - we need that to have 4180 * the right LVDS enable sequence. */ 4181 intel_enable_shared_dpll(intel_crtc); 4182 4183 /* set transcoder timing, panel must allow it */ 4184 assert_panel_unlocked(dev_priv, pipe); 4185 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4186 4187 intel_fdi_normal_train(crtc); 4188 4189 /* For PCH DP, enable TRANS_DP_CTL */ 4190 if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) { 4191 const struct drm_display_mode *adjusted_mode = 4192 &intel_crtc->config->base.adjusted_mode; 4193 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4194 i915_reg_t reg = TRANS_DP_CTL(pipe); 4195 temp = I915_READ(reg); 4196 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4197 TRANS_DP_SYNC_MASK | 4198 TRANS_DP_BPC_MASK); 4199 temp |= TRANS_DP_OUTPUT_ENABLE; 4200 temp |= bpc << 9; /* same format but at 11:9 */ 4201 4202 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4203 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4204 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4205 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4206 4207 switch (intel_trans_dp_port_sel(crtc)) { 4208 case PORT_B: 4209 temp |= TRANS_DP_PORT_SEL_B; 4210 break; 4211 case PORT_C: 4212 temp |= TRANS_DP_PORT_SEL_C; 4213 break; 4214 case PORT_D: 4215 temp |= TRANS_DP_PORT_SEL_D; 4216 break; 4217 default: 4218 BUG(); 4219 } 4220 4221 I915_WRITE(reg, temp); 4222 } 4223 4224 ironlake_enable_pch_transcoder(dev_priv, pipe); 4225 } 4226 4227 static void lpt_pch_enable(struct drm_crtc *crtc) 4228 { 4229 struct drm_device *dev = crtc->dev; 4230 struct drm_i915_private *dev_priv = to_i915(dev); 4231 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4232 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4233 4234 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4235 4236 lpt_program_iclkip(crtc); 4237 4238 /* Set transcoder timing. */ 4239 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4240 4241 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4242 } 4243 4244 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4245 { 4246 struct drm_i915_private *dev_priv = to_i915(dev); 4247 i915_reg_t dslreg = PIPEDSL(pipe); 4248 u32 temp; 4249 4250 temp = I915_READ(dslreg); 4251 udelay(500); 4252 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4253 if (wait_for(I915_READ(dslreg) != temp, 5)) 4254 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4255 } 4256 } 4257 4258 static int 4259 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4260 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4261 int src_w, int src_h, int dst_w, int dst_h) 4262 { 4263 struct intel_crtc_scaler_state *scaler_state = 4264 &crtc_state->scaler_state; 4265 struct intel_crtc *intel_crtc = 4266 to_intel_crtc(crtc_state->base.crtc); 4267 int need_scaling; 4268 4269 need_scaling = intel_rotation_90_or_270(rotation) ? 4270 (src_h != dst_w || src_w != dst_h): 4271 (src_w != dst_w || src_h != dst_h); 4272 4273 /* 4274 * if plane is being disabled or scaler is no more required or force detach 4275 * - free scaler binded to this plane/crtc 4276 * - in order to do this, update crtc->scaler_usage 4277 * 4278 * Here scaler state in crtc_state is set free so that 4279 * scaler can be assigned to other user. Actual register 4280 * update to free the scaler is done in plane/panel-fit programming. 4281 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4282 */ 4283 if (force_detach || !need_scaling) { 4284 if (*scaler_id >= 0) { 4285 scaler_state->scaler_users &= ~(1 << scaler_user); 4286 scaler_state->scalers[*scaler_id].in_use = 0; 4287 4288 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4289 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4290 intel_crtc->pipe, scaler_user, *scaler_id, 4291 scaler_state->scaler_users); 4292 *scaler_id = -1; 4293 } 4294 return 0; 4295 } 4296 4297 /* range checks */ 4298 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4299 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4300 4301 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4302 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4303 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4304 "size is out of scaler range\n", 4305 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4306 return -EINVAL; 4307 } 4308 4309 /* mark this plane as a scaler user in crtc_state */ 4310 scaler_state->scaler_users |= (1 << scaler_user); 4311 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4312 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4313 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4314 scaler_state->scaler_users); 4315 4316 return 0; 4317 } 4318 4319 /** 4320 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4321 * 4322 * @state: crtc's scaler state 4323 * 4324 * Return 4325 * 0 - scaler_usage updated successfully 4326 * error - requested scaling cannot be supported or other error condition 4327 */ 4328 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4329 { 4330 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4331 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4332 4333 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n", 4334 intel_crtc->base.base.id, intel_crtc->base.name, 4335 intel_crtc->pipe, SKL_CRTC_INDEX); 4336 4337 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4338 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4339 state->pipe_src_w, state->pipe_src_h, 4340 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4341 } 4342 4343 /** 4344 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4345 * 4346 * @state: crtc's scaler state 4347 * @plane_state: atomic plane state to update 4348 * 4349 * Return 4350 * 0 - scaler_usage updated successfully 4351 * error - requested scaling cannot be supported or other error condition 4352 */ 4353 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4354 struct intel_plane_state *plane_state) 4355 { 4356 4357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4358 struct intel_plane *intel_plane = 4359 to_intel_plane(plane_state->base.plane); 4360 struct drm_framebuffer *fb = plane_state->base.fb; 4361 int ret; 4362 4363 bool force_detach = !fb || !plane_state->visible; 4364 4365 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n", 4366 intel_plane->base.base.id, intel_plane->base.name, 4367 intel_crtc->pipe, drm_plane_index(&intel_plane->base)); 4368 4369 ret = skl_update_scaler(crtc_state, force_detach, 4370 drm_plane_index(&intel_plane->base), 4371 &plane_state->scaler_id, 4372 plane_state->base.rotation, 4373 drm_rect_width(&plane_state->src) >> 16, 4374 drm_rect_height(&plane_state->src) >> 16, 4375 drm_rect_width(&plane_state->dst), 4376 drm_rect_height(&plane_state->dst)); 4377 4378 if (ret || plane_state->scaler_id < 0) 4379 return ret; 4380 4381 /* check colorkey */ 4382 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4383 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed", 4384 intel_plane->base.base.id, 4385 intel_plane->base.name); 4386 return -EINVAL; 4387 } 4388 4389 /* Check src format */ 4390 switch (fb->pixel_format) { 4391 case DRM_FORMAT_RGB565: 4392 case DRM_FORMAT_XBGR8888: 4393 case DRM_FORMAT_XRGB8888: 4394 case DRM_FORMAT_ABGR8888: 4395 case DRM_FORMAT_ARGB8888: 4396 case DRM_FORMAT_XRGB2101010: 4397 case DRM_FORMAT_XBGR2101010: 4398 case DRM_FORMAT_YUYV: 4399 case DRM_FORMAT_YVYU: 4400 case DRM_FORMAT_UYVY: 4401 case DRM_FORMAT_VYUY: 4402 break; 4403 default: 4404 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", 4405 intel_plane->base.base.id, intel_plane->base.name, 4406 fb->base.id, fb->pixel_format); 4407 return -EINVAL; 4408 } 4409 4410 return 0; 4411 } 4412 4413 static void skylake_scaler_disable(struct intel_crtc *crtc) 4414 { 4415 int i; 4416 4417 for (i = 0; i < crtc->num_scalers; i++) 4418 skl_detach_scaler(crtc, i); 4419 } 4420 4421 static void skylake_pfit_enable(struct intel_crtc *crtc) 4422 { 4423 struct drm_device *dev = crtc->base.dev; 4424 struct drm_i915_private *dev_priv = to_i915(dev); 4425 int pipe = crtc->pipe; 4426 struct intel_crtc_scaler_state *scaler_state = 4427 &crtc->config->scaler_state; 4428 4429 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4430 4431 if (crtc->config->pch_pfit.enabled) { 4432 int id; 4433 4434 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4435 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4436 return; 4437 } 4438 4439 id = scaler_state->scaler_id; 4440 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4441 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4442 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4443 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4444 4445 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4446 } 4447 } 4448 4449 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4450 { 4451 struct drm_device *dev = crtc->base.dev; 4452 struct drm_i915_private *dev_priv = to_i915(dev); 4453 int pipe = crtc->pipe; 4454 4455 if (crtc->config->pch_pfit.enabled) { 4456 /* Force use of hard-coded filter coefficients 4457 * as some pre-programmed values are broken, 4458 * e.g. x201. 4459 */ 4460 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4461 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4462 PF_PIPE_SEL_IVB(pipe)); 4463 else 4464 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4465 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4466 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4467 } 4468 } 4469 4470 void hsw_enable_ips(struct intel_crtc *crtc) 4471 { 4472 struct drm_device *dev = crtc->base.dev; 4473 struct drm_i915_private *dev_priv = to_i915(dev); 4474 4475 if (!crtc->config->ips_enabled) 4476 return; 4477 4478 /* 4479 * We can only enable IPS after we enable a plane and wait for a vblank 4480 * This function is called from post_plane_update, which is run after 4481 * a vblank wait. 4482 */ 4483 4484 assert_plane_enabled(dev_priv, crtc->plane); 4485 if (IS_BROADWELL(dev)) { 4486 mutex_lock(&dev_priv->rps.hw_lock); 4487 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4488 mutex_unlock(&dev_priv->rps.hw_lock); 4489 /* Quoting Art Runyan: "its not safe to expect any particular 4490 * value in IPS_CTL bit 31 after enabling IPS through the 4491 * mailbox." Moreover, the mailbox may return a bogus state, 4492 * so we need to just enable it and continue on. 4493 */ 4494 } else { 4495 I915_WRITE(IPS_CTL, IPS_ENABLE); 4496 /* The bit only becomes 1 in the next vblank, so this wait here 4497 * is essentially intel_wait_for_vblank. If we don't have this 4498 * and don't wait for vblanks until the end of crtc_enable, then 4499 * the HW state readout code will complain that the expected 4500 * IPS_CTL value is not the one we read. */ 4501 if (intel_wait_for_register(dev_priv, 4502 IPS_CTL, IPS_ENABLE, IPS_ENABLE, 4503 50)) 4504 DRM_ERROR("Timed out waiting for IPS enable\n"); 4505 } 4506 } 4507 4508 void hsw_disable_ips(struct intel_crtc *crtc) 4509 { 4510 struct drm_device *dev = crtc->base.dev; 4511 struct drm_i915_private *dev_priv = to_i915(dev); 4512 4513 if (!crtc->config->ips_enabled) 4514 return; 4515 4516 assert_plane_enabled(dev_priv, crtc->plane); 4517 if (IS_BROADWELL(dev)) { 4518 mutex_lock(&dev_priv->rps.hw_lock); 4519 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4520 mutex_unlock(&dev_priv->rps.hw_lock); 4521 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4522 if (intel_wait_for_register(dev_priv, 4523 IPS_CTL, IPS_ENABLE, 0, 4524 42)) 4525 DRM_ERROR("Timed out waiting for IPS disable\n"); 4526 } else { 4527 I915_WRITE(IPS_CTL, 0); 4528 POSTING_READ(IPS_CTL); 4529 } 4530 4531 /* We need to wait for a vblank before we can disable the plane. */ 4532 intel_wait_for_vblank(dev, crtc->pipe); 4533 } 4534 4535 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4536 { 4537 if (intel_crtc->overlay) { 4538 struct drm_device *dev = intel_crtc->base.dev; 4539 struct drm_i915_private *dev_priv = to_i915(dev); 4540 4541 mutex_lock(&dev->struct_mutex); 4542 dev_priv->mm.interruptible = false; 4543 (void) intel_overlay_switch_off(intel_crtc->overlay); 4544 dev_priv->mm.interruptible = true; 4545 mutex_unlock(&dev->struct_mutex); 4546 } 4547 4548 /* Let userspace switch the overlay on again. In most cases userspace 4549 * has to recompute where to put it anyway. 4550 */ 4551 } 4552 4553 /** 4554 * intel_post_enable_primary - Perform operations after enabling primary plane 4555 * @crtc: the CRTC whose primary plane was just enabled 4556 * 4557 * Performs potentially sleeping operations that must be done after the primary 4558 * plane is enabled, such as updating FBC and IPS. Note that this may be 4559 * called due to an explicit primary plane update, or due to an implicit 4560 * re-enable that is caused when a sprite plane is updated to no longer 4561 * completely hide the primary plane. 4562 */ 4563 static void 4564 intel_post_enable_primary(struct drm_crtc *crtc) 4565 { 4566 struct drm_device *dev = crtc->dev; 4567 struct drm_i915_private *dev_priv = to_i915(dev); 4568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4569 int pipe = intel_crtc->pipe; 4570 4571 /* 4572 * FIXME IPS should be fine as long as one plane is 4573 * enabled, but in practice it seems to have problems 4574 * when going from primary only to sprite only and vice 4575 * versa. 4576 */ 4577 hsw_enable_ips(intel_crtc); 4578 4579 /* 4580 * Gen2 reports pipe underruns whenever all planes are disabled. 4581 * So don't enable underrun reporting before at least some planes 4582 * are enabled. 4583 * FIXME: Need to fix the logic to work when we turn off all planes 4584 * but leave the pipe running. 4585 */ 4586 if (IS_GEN2(dev)) 4587 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4588 4589 /* Underruns don't always raise interrupts, so check manually. */ 4590 intel_check_cpu_fifo_underruns(dev_priv); 4591 intel_check_pch_fifo_underruns(dev_priv); 4592 } 4593 4594 /* FIXME move all this to pre_plane_update() with proper state tracking */ 4595 static void 4596 intel_pre_disable_primary(struct drm_crtc *crtc) 4597 { 4598 struct drm_device *dev = crtc->dev; 4599 struct drm_i915_private *dev_priv = to_i915(dev); 4600 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4601 int pipe = intel_crtc->pipe; 4602 4603 /* 4604 * Gen2 reports pipe underruns whenever all planes are disabled. 4605 * So diasble underrun reporting before all the planes get disabled. 4606 * FIXME: Need to fix the logic to work when we turn off all planes 4607 * but leave the pipe running. 4608 */ 4609 if (IS_GEN2(dev)) 4610 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4611 4612 /* 4613 * FIXME IPS should be fine as long as one plane is 4614 * enabled, but in practice it seems to have problems 4615 * when going from primary only to sprite only and vice 4616 * versa. 4617 */ 4618 hsw_disable_ips(intel_crtc); 4619 } 4620 4621 /* FIXME get rid of this and use pre_plane_update */ 4622 static void 4623 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4624 { 4625 struct drm_device *dev = crtc->dev; 4626 struct drm_i915_private *dev_priv = to_i915(dev); 4627 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4628 int pipe = intel_crtc->pipe; 4629 4630 intel_pre_disable_primary(crtc); 4631 4632 /* 4633 * Vblank time updates from the shadow to live plane control register 4634 * are blocked if the memory self-refresh mode is active at that 4635 * moment. So to make sure the plane gets truly disabled, disable 4636 * first the self-refresh mode. The self-refresh enable bit in turn 4637 * will be checked/applied by the HW only at the next frame start 4638 * event which is after the vblank start event, so we need to have a 4639 * wait-for-vblank between disabling the plane and the pipe. 4640 */ 4641 if (HAS_GMCH_DISPLAY(dev)) { 4642 intel_set_memory_cxsr(dev_priv, false); 4643 dev_priv->wm.vlv.cxsr = false; 4644 intel_wait_for_vblank(dev, pipe); 4645 } 4646 } 4647 4648 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state) 4649 { 4650 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4651 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4652 struct intel_crtc_state *pipe_config = 4653 to_intel_crtc_state(crtc->base.state); 4654 struct drm_device *dev = crtc->base.dev; 4655 struct drm_plane *primary = crtc->base.primary; 4656 struct drm_plane_state *old_pri_state = 4657 drm_atomic_get_existing_plane_state(old_state, primary); 4658 4659 intel_frontbuffer_flip(dev, pipe_config->fb_bits); 4660 4661 crtc->wm.cxsr_allowed = true; 4662 4663 if (pipe_config->update_wm_post && pipe_config->base.active) 4664 intel_update_watermarks(&crtc->base); 4665 4666 if (old_pri_state) { 4667 struct intel_plane_state *primary_state = 4668 to_intel_plane_state(primary->state); 4669 struct intel_plane_state *old_primary_state = 4670 to_intel_plane_state(old_pri_state); 4671 4672 intel_fbc_post_update(crtc); 4673 4674 if (primary_state->visible && 4675 (needs_modeset(&pipe_config->base) || 4676 !old_primary_state->visible)) 4677 intel_post_enable_primary(&crtc->base); 4678 } 4679 } 4680 4681 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) 4682 { 4683 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4684 struct drm_device *dev = crtc->base.dev; 4685 struct drm_i915_private *dev_priv = to_i915(dev); 4686 struct intel_crtc_state *pipe_config = 4687 to_intel_crtc_state(crtc->base.state); 4688 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4689 struct drm_plane *primary = crtc->base.primary; 4690 struct drm_plane_state *old_pri_state = 4691 drm_atomic_get_existing_plane_state(old_state, primary); 4692 bool modeset = needs_modeset(&pipe_config->base); 4693 4694 if (old_pri_state) { 4695 struct intel_plane_state *primary_state = 4696 to_intel_plane_state(primary->state); 4697 struct intel_plane_state *old_primary_state = 4698 to_intel_plane_state(old_pri_state); 4699 4700 intel_fbc_pre_update(crtc, pipe_config, primary_state); 4701 4702 if (old_primary_state->visible && 4703 (modeset || !primary_state->visible)) 4704 intel_pre_disable_primary(&crtc->base); 4705 } 4706 4707 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) { 4708 crtc->wm.cxsr_allowed = false; 4709 4710 /* 4711 * Vblank time updates from the shadow to live plane control register 4712 * are blocked if the memory self-refresh mode is active at that 4713 * moment. So to make sure the plane gets truly disabled, disable 4714 * first the self-refresh mode. The self-refresh enable bit in turn 4715 * will be checked/applied by the HW only at the next frame start 4716 * event which is after the vblank start event, so we need to have a 4717 * wait-for-vblank between disabling the plane and the pipe. 4718 */ 4719 if (old_crtc_state->base.active) { 4720 intel_set_memory_cxsr(dev_priv, false); 4721 dev_priv->wm.vlv.cxsr = false; 4722 intel_wait_for_vblank(dev, crtc->pipe); 4723 } 4724 } 4725 4726 /* 4727 * IVB workaround: must disable low power watermarks for at least 4728 * one frame before enabling scaling. LP watermarks can be re-enabled 4729 * when scaling is disabled. 4730 * 4731 * WaCxSRDisabledForSpriteScaling:ivb 4732 */ 4733 if (pipe_config->disable_lp_wm) { 4734 ilk_disable_lp_wm(dev); 4735 intel_wait_for_vblank(dev, crtc->pipe); 4736 } 4737 4738 /* 4739 * If we're doing a modeset, we're done. No need to do any pre-vblank 4740 * watermark programming here. 4741 */ 4742 if (needs_modeset(&pipe_config->base)) 4743 return; 4744 4745 /* 4746 * For platforms that support atomic watermarks, program the 4747 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these 4748 * will be the intermediate values that are safe for both pre- and 4749 * post- vblank; when vblank happens, the 'active' values will be set 4750 * to the final 'target' values and we'll do this again to get the 4751 * optimal watermarks. For gen9+ platforms, the values we program here 4752 * will be the final target values which will get automatically latched 4753 * at vblank time; no further programming will be necessary. 4754 * 4755 * If a platform hasn't been transitioned to atomic watermarks yet, 4756 * we'll continue to update watermarks the old way, if flags tell 4757 * us to. 4758 */ 4759 if (dev_priv->display.initial_watermarks != NULL) 4760 dev_priv->display.initial_watermarks(pipe_config); 4761 else if (pipe_config->update_wm_pre) 4762 intel_update_watermarks(&crtc->base); 4763 } 4764 4765 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 4766 { 4767 struct drm_device *dev = crtc->dev; 4768 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4769 struct drm_plane *p; 4770 int pipe = intel_crtc->pipe; 4771 4772 intel_crtc_dpms_overlay_disable(intel_crtc); 4773 4774 drm_for_each_plane_mask(p, dev, plane_mask) 4775 to_intel_plane(p)->disable_plane(p, crtc); 4776 4777 /* 4778 * FIXME: Once we grow proper nuclear flip support out of this we need 4779 * to compute the mask of flip planes precisely. For the time being 4780 * consider this a flip to a NULL plane. 4781 */ 4782 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4783 } 4784 4785 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4786 { 4787 struct drm_device *dev = crtc->dev; 4788 struct drm_i915_private *dev_priv = to_i915(dev); 4789 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4790 struct intel_encoder *encoder; 4791 int pipe = intel_crtc->pipe; 4792 struct intel_crtc_state *pipe_config = 4793 to_intel_crtc_state(crtc->state); 4794 4795 if (WARN_ON(intel_crtc->active)) 4796 return; 4797 4798 /* 4799 * Sometimes spurious CPU pipe underruns happen during FDI 4800 * training, at least with VGA+HDMI cloning. Suppress them. 4801 * 4802 * On ILK we get an occasional spurious CPU pipe underruns 4803 * between eDP port A enable and vdd enable. Also PCH port 4804 * enable seems to result in the occasional CPU pipe underrun. 4805 * 4806 * Spurious PCH underruns also occur during PCH enabling. 4807 */ 4808 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv)) 4809 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4810 if (intel_crtc->config->has_pch_encoder) 4811 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4812 4813 if (intel_crtc->config->has_pch_encoder) 4814 intel_prepare_shared_dpll(intel_crtc); 4815 4816 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 4817 intel_dp_set_m_n(intel_crtc, M1_N1); 4818 4819 intel_set_pipe_timings(intel_crtc); 4820 intel_set_pipe_src_size(intel_crtc); 4821 4822 if (intel_crtc->config->has_pch_encoder) { 4823 intel_cpu_transcoder_set_m_n(intel_crtc, 4824 &intel_crtc->config->fdi_m_n, NULL); 4825 } 4826 4827 ironlake_set_pipeconf(crtc); 4828 4829 intel_crtc->active = true; 4830 4831 for_each_encoder_on_crtc(dev, crtc, encoder) 4832 if (encoder->pre_enable) 4833 encoder->pre_enable(encoder); 4834 4835 if (intel_crtc->config->has_pch_encoder) { 4836 /* Note: FDI PLL enabling _must_ be done before we enable the 4837 * cpu pipes, hence this is separate from all the other fdi/pch 4838 * enabling. */ 4839 ironlake_fdi_pll_enable(intel_crtc); 4840 } else { 4841 assert_fdi_tx_disabled(dev_priv, pipe); 4842 assert_fdi_rx_disabled(dev_priv, pipe); 4843 } 4844 4845 ironlake_pfit_enable(intel_crtc); 4846 4847 /* 4848 * On ILK+ LUT must be loaded before the pipe is running but with 4849 * clocks enabled 4850 */ 4851 intel_color_load_luts(&pipe_config->base); 4852 4853 if (dev_priv->display.initial_watermarks != NULL) 4854 dev_priv->display.initial_watermarks(intel_crtc->config); 4855 intel_enable_pipe(intel_crtc); 4856 4857 if (intel_crtc->config->has_pch_encoder) 4858 ironlake_pch_enable(crtc); 4859 4860 assert_vblank_disabled(crtc); 4861 drm_crtc_vblank_on(crtc); 4862 4863 for_each_encoder_on_crtc(dev, crtc, encoder) 4864 encoder->enable(encoder); 4865 4866 if (HAS_PCH_CPT(dev)) 4867 cpt_verify_modeset(dev, intel_crtc->pipe); 4868 4869 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 4870 if (intel_crtc->config->has_pch_encoder) 4871 intel_wait_for_vblank(dev, pipe); 4872 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4873 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4874 } 4875 4876 /* IPS only exists on ULT machines and is tied to pipe A. */ 4877 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4878 { 4879 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4880 } 4881 4882 static void haswell_crtc_enable(struct drm_crtc *crtc) 4883 { 4884 struct drm_device *dev = crtc->dev; 4885 struct drm_i915_private *dev_priv = to_i915(dev); 4886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4887 struct intel_encoder *encoder; 4888 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4889 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4890 struct intel_crtc_state *pipe_config = 4891 to_intel_crtc_state(crtc->state); 4892 4893 if (WARN_ON(intel_crtc->active)) 4894 return; 4895 4896 if (intel_crtc->config->has_pch_encoder) 4897 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4898 false); 4899 4900 for_each_encoder_on_crtc(dev, crtc, encoder) 4901 if (encoder->pre_pll_enable) 4902 encoder->pre_pll_enable(encoder); 4903 4904 if (intel_crtc->config->shared_dpll) 4905 intel_enable_shared_dpll(intel_crtc); 4906 4907 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 4908 intel_dp_set_m_n(intel_crtc, M1_N1); 4909 4910 if (!transcoder_is_dsi(cpu_transcoder)) 4911 intel_set_pipe_timings(intel_crtc); 4912 4913 intel_set_pipe_src_size(intel_crtc); 4914 4915 if (cpu_transcoder != TRANSCODER_EDP && 4916 !transcoder_is_dsi(cpu_transcoder)) { 4917 I915_WRITE(PIPE_MULT(cpu_transcoder), 4918 intel_crtc->config->pixel_multiplier - 1); 4919 } 4920 4921 if (intel_crtc->config->has_pch_encoder) { 4922 intel_cpu_transcoder_set_m_n(intel_crtc, 4923 &intel_crtc->config->fdi_m_n, NULL); 4924 } 4925 4926 if (!transcoder_is_dsi(cpu_transcoder)) 4927 haswell_set_pipeconf(crtc); 4928 4929 haswell_set_pipemisc(crtc); 4930 4931 intel_color_set_csc(&pipe_config->base); 4932 4933 intel_crtc->active = true; 4934 4935 if (intel_crtc->config->has_pch_encoder) 4936 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4937 else 4938 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4939 4940 for_each_encoder_on_crtc(dev, crtc, encoder) { 4941 if (encoder->pre_enable) 4942 encoder->pre_enable(encoder); 4943 } 4944 4945 if (intel_crtc->config->has_pch_encoder) 4946 dev_priv->display.fdi_link_train(crtc); 4947 4948 if (!transcoder_is_dsi(cpu_transcoder)) 4949 intel_ddi_enable_pipe_clock(intel_crtc); 4950 4951 if (INTEL_INFO(dev)->gen >= 9) 4952 skylake_pfit_enable(intel_crtc); 4953 else 4954 ironlake_pfit_enable(intel_crtc); 4955 4956 /* 4957 * On ILK+ LUT must be loaded before the pipe is running but with 4958 * clocks enabled 4959 */ 4960 intel_color_load_luts(&pipe_config->base); 4961 4962 intel_ddi_set_pipe_settings(crtc); 4963 if (!transcoder_is_dsi(cpu_transcoder)) 4964 intel_ddi_enable_transcoder_func(crtc); 4965 4966 if (dev_priv->display.initial_watermarks != NULL) 4967 dev_priv->display.initial_watermarks(pipe_config); 4968 else 4969 intel_update_watermarks(crtc); 4970 4971 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 4972 if (!transcoder_is_dsi(cpu_transcoder)) 4973 intel_enable_pipe(intel_crtc); 4974 4975 if (intel_crtc->config->has_pch_encoder) 4976 lpt_pch_enable(crtc); 4977 4978 if (intel_crtc->config->dp_encoder_is_mst) 4979 intel_ddi_set_vc_payload_alloc(crtc, true); 4980 4981 assert_vblank_disabled(crtc); 4982 drm_crtc_vblank_on(crtc); 4983 4984 for_each_encoder_on_crtc(dev, crtc, encoder) { 4985 encoder->enable(encoder); 4986 intel_opregion_notify_encoder(encoder, true); 4987 } 4988 4989 if (intel_crtc->config->has_pch_encoder) { 4990 intel_wait_for_vblank(dev, pipe); 4991 intel_wait_for_vblank(dev, pipe); 4992 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4993 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4994 true); 4995 } 4996 4997 /* If we change the relative order between pipe/planes enabling, we need 4998 * to change the workaround. */ 4999 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5000 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { 5001 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5002 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5003 } 5004 } 5005 5006 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5007 { 5008 struct drm_device *dev = crtc->base.dev; 5009 struct drm_i915_private *dev_priv = to_i915(dev); 5010 int pipe = crtc->pipe; 5011 5012 /* To avoid upsetting the power well on haswell only disable the pfit if 5013 * it's in use. The hw state code will make sure we get this right. */ 5014 if (force || crtc->config->pch_pfit.enabled) { 5015 I915_WRITE(PF_CTL(pipe), 0); 5016 I915_WRITE(PF_WIN_POS(pipe), 0); 5017 I915_WRITE(PF_WIN_SZ(pipe), 0); 5018 } 5019 } 5020 5021 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5022 { 5023 struct drm_device *dev = crtc->dev; 5024 struct drm_i915_private *dev_priv = to_i915(dev); 5025 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5026 struct intel_encoder *encoder; 5027 int pipe = intel_crtc->pipe; 5028 5029 /* 5030 * Sometimes spurious CPU pipe underruns happen when the 5031 * pipe is already disabled, but FDI RX/TX is still enabled. 5032 * Happens at least with VGA+HDMI cloning. Suppress them. 5033 */ 5034 if (intel_crtc->config->has_pch_encoder) { 5035 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5036 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5037 } 5038 5039 for_each_encoder_on_crtc(dev, crtc, encoder) 5040 encoder->disable(encoder); 5041 5042 drm_crtc_vblank_off(crtc); 5043 assert_vblank_disabled(crtc); 5044 5045 intel_disable_pipe(intel_crtc); 5046 5047 ironlake_pfit_disable(intel_crtc, false); 5048 5049 if (intel_crtc->config->has_pch_encoder) 5050 ironlake_fdi_disable(crtc); 5051 5052 for_each_encoder_on_crtc(dev, crtc, encoder) 5053 if (encoder->post_disable) 5054 encoder->post_disable(encoder); 5055 5056 if (intel_crtc->config->has_pch_encoder) { 5057 ironlake_disable_pch_transcoder(dev_priv, pipe); 5058 5059 if (HAS_PCH_CPT(dev)) { 5060 i915_reg_t reg; 5061 u32 temp; 5062 5063 /* disable TRANS_DP_CTL */ 5064 reg = TRANS_DP_CTL(pipe); 5065 temp = I915_READ(reg); 5066 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5067 TRANS_DP_PORT_SEL_MASK); 5068 temp |= TRANS_DP_PORT_SEL_NONE; 5069 I915_WRITE(reg, temp); 5070 5071 /* disable DPLL_SEL */ 5072 temp = I915_READ(PCH_DPLL_SEL); 5073 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5074 I915_WRITE(PCH_DPLL_SEL, temp); 5075 } 5076 5077 ironlake_fdi_pll_disable(intel_crtc); 5078 } 5079 5080 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5081 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5082 } 5083 5084 static void haswell_crtc_disable(struct drm_crtc *crtc) 5085 { 5086 struct drm_device *dev = crtc->dev; 5087 struct drm_i915_private *dev_priv = to_i915(dev); 5088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5089 struct intel_encoder *encoder; 5090 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5091 5092 if (intel_crtc->config->has_pch_encoder) 5093 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5094 false); 5095 5096 for_each_encoder_on_crtc(dev, crtc, encoder) { 5097 intel_opregion_notify_encoder(encoder, false); 5098 encoder->disable(encoder); 5099 } 5100 5101 drm_crtc_vblank_off(crtc); 5102 assert_vblank_disabled(crtc); 5103 5104 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5105 if (!transcoder_is_dsi(cpu_transcoder)) 5106 intel_disable_pipe(intel_crtc); 5107 5108 if (intel_crtc->config->dp_encoder_is_mst) 5109 intel_ddi_set_vc_payload_alloc(crtc, false); 5110 5111 if (!transcoder_is_dsi(cpu_transcoder)) 5112 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5113 5114 if (INTEL_INFO(dev)->gen >= 9) 5115 skylake_scaler_disable(intel_crtc); 5116 else 5117 ironlake_pfit_disable(intel_crtc, false); 5118 5119 if (!transcoder_is_dsi(cpu_transcoder)) 5120 intel_ddi_disable_pipe_clock(intel_crtc); 5121 5122 for_each_encoder_on_crtc(dev, crtc, encoder) 5123 if (encoder->post_disable) 5124 encoder->post_disable(encoder); 5125 5126 if (intel_crtc->config->has_pch_encoder) { 5127 lpt_disable_pch_transcoder(dev_priv); 5128 lpt_disable_iclkip(dev_priv); 5129 intel_ddi_fdi_disable(crtc); 5130 5131 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5132 true); 5133 } 5134 } 5135 5136 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5137 { 5138 struct drm_device *dev = crtc->base.dev; 5139 struct drm_i915_private *dev_priv = to_i915(dev); 5140 struct intel_crtc_state *pipe_config = crtc->config; 5141 5142 if (!pipe_config->gmch_pfit.control) 5143 return; 5144 5145 /* 5146 * The panel fitter should only be adjusted whilst the pipe is disabled, 5147 * according to register description and PRM. 5148 */ 5149 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5150 assert_pipe_disabled(dev_priv, crtc->pipe); 5151 5152 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5153 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5154 5155 /* Border color in case we don't scale up to the full screen. Black by 5156 * default, change to something else for debugging. */ 5157 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5158 } 5159 5160 static enum intel_display_power_domain port_to_power_domain(enum port port) 5161 { 5162 switch (port) { 5163 case PORT_A: 5164 return POWER_DOMAIN_PORT_DDI_A_LANES; 5165 case PORT_B: 5166 return POWER_DOMAIN_PORT_DDI_B_LANES; 5167 case PORT_C: 5168 return POWER_DOMAIN_PORT_DDI_C_LANES; 5169 case PORT_D: 5170 return POWER_DOMAIN_PORT_DDI_D_LANES; 5171 case PORT_E: 5172 return POWER_DOMAIN_PORT_DDI_E_LANES; 5173 default: 5174 MISSING_CASE(port); 5175 return POWER_DOMAIN_PORT_OTHER; 5176 } 5177 } 5178 5179 static enum intel_display_power_domain port_to_aux_power_domain(enum port port) 5180 { 5181 switch (port) { 5182 case PORT_A: 5183 return POWER_DOMAIN_AUX_A; 5184 case PORT_B: 5185 return POWER_DOMAIN_AUX_B; 5186 case PORT_C: 5187 return POWER_DOMAIN_AUX_C; 5188 case PORT_D: 5189 return POWER_DOMAIN_AUX_D; 5190 case PORT_E: 5191 /* FIXME: Check VBT for actual wiring of PORT E */ 5192 return POWER_DOMAIN_AUX_D; 5193 default: 5194 MISSING_CASE(port); 5195 return POWER_DOMAIN_AUX_A; 5196 } 5197 } 5198 5199 enum intel_display_power_domain 5200 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5201 { 5202 struct drm_device *dev = intel_encoder->base.dev; 5203 struct intel_digital_port *intel_dig_port; 5204 5205 switch (intel_encoder->type) { 5206 case INTEL_OUTPUT_UNKNOWN: 5207 /* Only DDI platforms should ever use this output type */ 5208 WARN_ON_ONCE(!HAS_DDI(dev)); 5209 case INTEL_OUTPUT_DP: 5210 case INTEL_OUTPUT_HDMI: 5211 case INTEL_OUTPUT_EDP: 5212 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5213 return port_to_power_domain(intel_dig_port->port); 5214 case INTEL_OUTPUT_DP_MST: 5215 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5216 return port_to_power_domain(intel_dig_port->port); 5217 case INTEL_OUTPUT_ANALOG: 5218 return POWER_DOMAIN_PORT_CRT; 5219 case INTEL_OUTPUT_DSI: 5220 return POWER_DOMAIN_PORT_DSI; 5221 default: 5222 return POWER_DOMAIN_PORT_OTHER; 5223 } 5224 } 5225 5226 enum intel_display_power_domain 5227 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) 5228 { 5229 struct drm_device *dev = intel_encoder->base.dev; 5230 struct intel_digital_port *intel_dig_port; 5231 5232 switch (intel_encoder->type) { 5233 case INTEL_OUTPUT_UNKNOWN: 5234 case INTEL_OUTPUT_HDMI: 5235 /* 5236 * Only DDI platforms should ever use these output types. 5237 * We can get here after the HDMI detect code has already set 5238 * the type of the shared encoder. Since we can't be sure 5239 * what's the status of the given connectors, play safe and 5240 * run the DP detection too. 5241 */ 5242 WARN_ON_ONCE(!HAS_DDI(dev)); 5243 case INTEL_OUTPUT_DP: 5244 case INTEL_OUTPUT_EDP: 5245 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5246 return port_to_aux_power_domain(intel_dig_port->port); 5247 case INTEL_OUTPUT_DP_MST: 5248 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5249 return port_to_aux_power_domain(intel_dig_port->port); 5250 default: 5251 MISSING_CASE(intel_encoder->type); 5252 return POWER_DOMAIN_AUX_A; 5253 } 5254 } 5255 5256 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc, 5257 struct intel_crtc_state *crtc_state) 5258 { 5259 struct drm_device *dev = crtc->dev; 5260 struct drm_encoder *encoder; 5261 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5262 enum i915_pipe pipe = intel_crtc->pipe; 5263 unsigned long mask; 5264 enum transcoder transcoder = crtc_state->cpu_transcoder; 5265 5266 if (!crtc_state->base.active) 5267 return 0; 5268 5269 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5270 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5271 if (crtc_state->pch_pfit.enabled || 5272 crtc_state->pch_pfit.force_thru) 5273 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5274 5275 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5276 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5277 5278 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5279 } 5280 5281 if (crtc_state->shared_dpll) 5282 mask |= BIT(POWER_DOMAIN_PLLS); 5283 5284 return mask; 5285 } 5286 5287 static unsigned long 5288 modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5289 struct intel_crtc_state *crtc_state) 5290 { 5291 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 5292 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5293 enum intel_display_power_domain domain; 5294 unsigned long domains, new_domains, old_domains; 5295 5296 old_domains = intel_crtc->enabled_power_domains; 5297 intel_crtc->enabled_power_domains = new_domains = 5298 get_crtc_power_domains(crtc, crtc_state); 5299 5300 domains = new_domains & ~old_domains; 5301 5302 for_each_power_domain(domain, domains) 5303 intel_display_power_get(dev_priv, domain); 5304 5305 return old_domains & ~new_domains; 5306 } 5307 5308 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5309 unsigned long domains) 5310 { 5311 enum intel_display_power_domain domain; 5312 5313 for_each_power_domain(domain, domains) 5314 intel_display_power_put(dev_priv, domain); 5315 } 5316 5317 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 5318 { 5319 int max_cdclk_freq = dev_priv->max_cdclk_freq; 5320 5321 if (INTEL_INFO(dev_priv)->gen >= 9 || 5322 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5323 return max_cdclk_freq; 5324 else if (IS_CHERRYVIEW(dev_priv)) 5325 return max_cdclk_freq*95/100; 5326 else if (INTEL_INFO(dev_priv)->gen < 4) 5327 return 2*max_cdclk_freq*90/100; 5328 else 5329 return max_cdclk_freq*90/100; 5330 } 5331 5332 static int skl_calc_cdclk(int max_pixclk, int vco); 5333 5334 static void intel_update_max_cdclk(struct drm_device *dev) 5335 { 5336 struct drm_i915_private *dev_priv = to_i915(dev); 5337 5338 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5339 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5340 int max_cdclk, vco; 5341 5342 vco = dev_priv->skl_preferred_vco_freq; 5343 WARN_ON(vco != 8100000 && vco != 8640000); 5344 5345 /* 5346 * Use the lower (vco 8640) cdclk values as a 5347 * first guess. skl_calc_cdclk() will correct it 5348 * if the preferred vco is 8100 instead. 5349 */ 5350 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5351 max_cdclk = 617143; 5352 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5353 max_cdclk = 540000; 5354 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5355 max_cdclk = 432000; 5356 else 5357 max_cdclk = 308571; 5358 5359 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco); 5360 } else if (IS_BROXTON(dev)) { 5361 dev_priv->max_cdclk_freq = 624000; 5362 } else if (IS_BROADWELL(dev)) { 5363 /* 5364 * FIXME with extra cooling we can allow 5365 * 540 MHz for ULX and 675 Mhz for ULT. 5366 * How can we know if extra cooling is 5367 * available? PCI ID, VTB, something else? 5368 */ 5369 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 5370 dev_priv->max_cdclk_freq = 450000; 5371 else if (IS_BDW_ULX(dev)) 5372 dev_priv->max_cdclk_freq = 450000; 5373 else if (IS_BDW_ULT(dev)) 5374 dev_priv->max_cdclk_freq = 540000; 5375 else 5376 dev_priv->max_cdclk_freq = 675000; 5377 } else if (IS_CHERRYVIEW(dev)) { 5378 dev_priv->max_cdclk_freq = 320000; 5379 } else if (IS_VALLEYVIEW(dev)) { 5380 dev_priv->max_cdclk_freq = 400000; 5381 } else { 5382 /* otherwise assume cdclk is fixed */ 5383 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5384 } 5385 5386 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 5387 5388 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5389 dev_priv->max_cdclk_freq); 5390 5391 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", 5392 dev_priv->max_dotclk_freq); 5393 } 5394 5395 static void intel_update_cdclk(struct drm_device *dev) 5396 { 5397 struct drm_i915_private *dev_priv = to_i915(dev); 5398 5399 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5400 5401 if (INTEL_GEN(dev_priv) >= 9) 5402 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n", 5403 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco, 5404 dev_priv->cdclk_pll.ref); 5405 else 5406 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5407 dev_priv->cdclk_freq); 5408 5409 /* 5410 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq): 5411 * Programmng [sic] note: bit[9:2] should be programmed to the number 5412 * of cdclk that generates 4MHz reference clock freq which is used to 5413 * generate GMBus clock. This will vary with the cdclk freq. 5414 */ 5415 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 5416 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5417 } 5418 5419 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5420 static int skl_cdclk_decimal(int cdclk) 5421 { 5422 return DIV_ROUND_CLOSEST(cdclk - 1000, 500); 5423 } 5424 5425 static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) 5426 { 5427 int ratio; 5428 5429 if (cdclk == dev_priv->cdclk_pll.ref) 5430 return 0; 5431 5432 switch (cdclk) { 5433 default: 5434 MISSING_CASE(cdclk); 5435 case 144000: 5436 case 288000: 5437 case 384000: 5438 case 576000: 5439 ratio = 60; 5440 break; 5441 case 624000: 5442 ratio = 65; 5443 break; 5444 } 5445 5446 return dev_priv->cdclk_pll.ref * ratio; 5447 } 5448 5449 static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) 5450 { 5451 I915_WRITE(BXT_DE_PLL_ENABLE, 0); 5452 5453 /* Timeout 200us */ 5454 if (intel_wait_for_register(dev_priv, 5455 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0, 5456 1)) 5457 DRM_ERROR("timeout waiting for DE PLL unlock\n"); 5458 5459 dev_priv->cdclk_pll.vco = 0; 5460 } 5461 5462 static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) 5463 { 5464 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref); 5465 u32 val; 5466 5467 val = I915_READ(BXT_DE_PLL_CTL); 5468 val &= ~BXT_DE_PLL_RATIO_MASK; 5469 val |= BXT_DE_PLL_RATIO(ratio); 5470 I915_WRITE(BXT_DE_PLL_CTL, val); 5471 5472 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5473 5474 /* Timeout 200us */ 5475 if (intel_wait_for_register(dev_priv, 5476 BXT_DE_PLL_ENABLE, 5477 BXT_DE_PLL_LOCK, 5478 BXT_DE_PLL_LOCK, 5479 1)) 5480 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5481 5482 dev_priv->cdclk_pll.vco = vco; 5483 } 5484 5485 static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk) 5486 { 5487 u32 val, divider; 5488 int vco, ret; 5489 5490 vco = bxt_de_pll_vco(dev_priv, cdclk); 5491 5492 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); 5493 5494 /* cdclk = vco / 2 / div{1,1.5,2,4} */ 5495 switch (DIV_ROUND_CLOSEST(vco, cdclk)) { 5496 case 8: 5497 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5498 break; 5499 case 4: 5500 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5501 break; 5502 case 3: 5503 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5504 break; 5505 case 2: 5506 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5507 break; 5508 default: 5509 WARN_ON(cdclk != dev_priv->cdclk_pll.ref); 5510 WARN_ON(vco != 0); 5511 5512 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5513 break; 5514 } 5515 5516 /* Inform power controller of upcoming frequency change */ 5517 mutex_lock(&dev_priv->rps.hw_lock); 5518 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5519 0x80000000); 5520 mutex_unlock(&dev_priv->rps.hw_lock); 5521 5522 if (ret) { 5523 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5524 ret, cdclk); 5525 return; 5526 } 5527 5528 if (dev_priv->cdclk_pll.vco != 0 && 5529 dev_priv->cdclk_pll.vco != vco) 5530 bxt_de_pll_disable(dev_priv); 5531 5532 if (dev_priv->cdclk_pll.vco != vco) 5533 bxt_de_pll_enable(dev_priv, vco); 5534 5535 val = divider | skl_cdclk_decimal(cdclk); 5536 /* 5537 * FIXME if only the cd2x divider needs changing, it could be done 5538 * without shutting off the pipe (if only one pipe is active). 5539 */ 5540 val |= BXT_CDCLK_CD2X_PIPE_NONE; 5541 /* 5542 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5543 * enable otherwise. 5544 */ 5545 if (cdclk >= 500000) 5546 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5547 I915_WRITE(CDCLK_CTL, val); 5548 5549 mutex_lock(&dev_priv->rps.hw_lock); 5550 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5551 DIV_ROUND_UP(cdclk, 25000)); 5552 mutex_unlock(&dev_priv->rps.hw_lock); 5553 5554 if (ret) { 5555 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5556 ret, cdclk); 5557 return; 5558 } 5559 5560 intel_update_cdclk(&dev_priv->drm); 5561 } 5562 5563 static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) 5564 { 5565 u32 cdctl, expected; 5566 5567 intel_update_cdclk(&dev_priv->drm); 5568 5569 if (dev_priv->cdclk_pll.vco == 0 || 5570 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5571 goto sanitize; 5572 5573 /* DPLL okay; verify the cdclock 5574 * 5575 * Some BIOS versions leave an incorrect decimal frequency value and 5576 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, 5577 * so sanitize this register. 5578 */ 5579 cdctl = I915_READ(CDCLK_CTL); 5580 /* 5581 * Let's ignore the pipe field, since BIOS could have configured the 5582 * dividers both synching to an active pipe, or asynchronously 5583 * (PIPE_NONE). 5584 */ 5585 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; 5586 5587 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | 5588 skl_cdclk_decimal(dev_priv->cdclk_freq); 5589 /* 5590 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5591 * enable otherwise. 5592 */ 5593 if (dev_priv->cdclk_freq >= 500000) 5594 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5595 5596 if (cdctl == expected) 5597 /* All well; nothing to sanitize */ 5598 return; 5599 5600 sanitize: 5601 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); 5602 5603 /* force cdclk programming */ 5604 dev_priv->cdclk_freq = 0; 5605 5606 /* force full PLL disable + enable */ 5607 dev_priv->cdclk_pll.vco = -1; 5608 } 5609 5610 void bxt_init_cdclk(struct drm_i915_private *dev_priv) 5611 { 5612 bxt_sanitize_cdclk(dev_priv); 5613 5614 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) 5615 return; 5616 5617 /* 5618 * FIXME: 5619 * - The initial CDCLK needs to be read from VBT. 5620 * Need to make this change after VBT has changes for BXT. 5621 */ 5622 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0)); 5623 } 5624 5625 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) 5626 { 5627 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref); 5628 } 5629 5630 static int skl_calc_cdclk(int max_pixclk, int vco) 5631 { 5632 if (vco == 8640000) { 5633 if (max_pixclk > 540000) 5634 return 617143; 5635 else if (max_pixclk > 432000) 5636 return 540000; 5637 else if (max_pixclk > 308571) 5638 return 432000; 5639 else 5640 return 308571; 5641 } else { 5642 if (max_pixclk > 540000) 5643 return 675000; 5644 else if (max_pixclk > 450000) 5645 return 540000; 5646 else if (max_pixclk > 337500) 5647 return 450000; 5648 else 5649 return 337500; 5650 } 5651 } 5652 5653 static void 5654 skl_dpll0_update(struct drm_i915_private *dev_priv) 5655 { 5656 u32 val; 5657 5658 dev_priv->cdclk_pll.ref = 24000; 5659 dev_priv->cdclk_pll.vco = 0; 5660 5661 val = I915_READ(LCPLL1_CTL); 5662 if ((val & LCPLL_PLL_ENABLE) == 0) 5663 return; 5664 5665 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0)) 5666 return; 5667 5668 val = I915_READ(DPLL_CTRL1); 5669 5670 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | 5671 DPLL_CTRL1_SSC(SKL_DPLL0) | 5672 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) != 5673 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) 5674 return; 5675 5676 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) { 5677 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0): 5678 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0): 5679 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0): 5680 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0): 5681 dev_priv->cdclk_pll.vco = 8100000; 5682 break; 5683 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0): 5684 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0): 5685 dev_priv->cdclk_pll.vco = 8640000; 5686 break; 5687 default: 5688 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5689 break; 5690 } 5691 } 5692 5693 void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco) 5694 { 5695 bool changed = dev_priv->skl_preferred_vco_freq != vco; 5696 5697 dev_priv->skl_preferred_vco_freq = vco; 5698 5699 if (changed) 5700 intel_update_max_cdclk(&dev_priv->drm); 5701 } 5702 5703 static void 5704 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco) 5705 { 5706 int min_cdclk = skl_calc_cdclk(0, vco); 5707 u32 val; 5708 5709 WARN_ON(vco != 8100000 && vco != 8640000); 5710 5711 /* select the minimum CDCLK before enabling DPLL 0 */ 5712 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk); 5713 I915_WRITE(CDCLK_CTL, val); 5714 POSTING_READ(CDCLK_CTL); 5715 5716 /* 5717 * We always enable DPLL0 with the lowest link rate possible, but still 5718 * taking into account the VCO required to operate the eDP panel at the 5719 * desired frequency. The usual DP link rates operate with a VCO of 5720 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5721 * The modeset code is responsible for the selection of the exact link 5722 * rate later on, with the constraint of choosing a frequency that 5723 * works with vco. 5724 */ 5725 val = I915_READ(DPLL_CTRL1); 5726 5727 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5728 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5729 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5730 if (vco == 8640000) 5731 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5732 SKL_DPLL0); 5733 else 5734 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5735 SKL_DPLL0); 5736 5737 I915_WRITE(DPLL_CTRL1, val); 5738 POSTING_READ(DPLL_CTRL1); 5739 5740 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5741 5742 if (intel_wait_for_register(dev_priv, 5743 LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 5744 5)) 5745 DRM_ERROR("DPLL0 not locked\n"); 5746 5747 dev_priv->cdclk_pll.vco = vco; 5748 5749 /* We'll want to keep using the current vco from now on. */ 5750 skl_set_preferred_cdclk_vco(dev_priv, vco); 5751 } 5752 5753 static void 5754 skl_dpll0_disable(struct drm_i915_private *dev_priv) 5755 { 5756 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5757 if (intel_wait_for_register(dev_priv, 5758 LCPLL1_CTL, LCPLL_PLL_LOCK, 0, 5759 1)) 5760 DRM_ERROR("Couldn't disable DPLL0\n"); 5761 5762 dev_priv->cdclk_pll.vco = 0; 5763 } 5764 5765 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5766 { 5767 int ret; 5768 u32 val; 5769 5770 /* inform PCU we want to change CDCLK */ 5771 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5772 mutex_lock(&dev_priv->rps.hw_lock); 5773 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5774 mutex_unlock(&dev_priv->rps.hw_lock); 5775 5776 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5777 } 5778 5779 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5780 { 5781 return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; 5782 } 5783 5784 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 5785 { 5786 struct drm_device *dev = &dev_priv->drm; 5787 u32 freq_select, pcu_ack; 5788 5789 WARN_ON((cdclk == 24000) != (vco == 0)); 5790 5791 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); 5792 5793 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5794 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5795 return; 5796 } 5797 5798 /* set CDCLK_CTL */ 5799 switch (cdclk) { 5800 case 450000: 5801 case 432000: 5802 freq_select = CDCLK_FREQ_450_432; 5803 pcu_ack = 1; 5804 break; 5805 case 540000: 5806 freq_select = CDCLK_FREQ_540; 5807 pcu_ack = 2; 5808 break; 5809 case 308571: 5810 case 337500: 5811 default: 5812 freq_select = CDCLK_FREQ_337_308; 5813 pcu_ack = 0; 5814 break; 5815 case 617143: 5816 case 675000: 5817 freq_select = CDCLK_FREQ_675_617; 5818 pcu_ack = 3; 5819 break; 5820 } 5821 5822 if (dev_priv->cdclk_pll.vco != 0 && 5823 dev_priv->cdclk_pll.vco != vco) 5824 skl_dpll0_disable(dev_priv); 5825 5826 if (dev_priv->cdclk_pll.vco != vco) 5827 skl_dpll0_enable(dev_priv, vco); 5828 5829 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk)); 5830 POSTING_READ(CDCLK_CTL); 5831 5832 /* inform PCU of the change */ 5833 mutex_lock(&dev_priv->rps.hw_lock); 5834 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5835 mutex_unlock(&dev_priv->rps.hw_lock); 5836 5837 intel_update_cdclk(dev); 5838 } 5839 5840 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv); 5841 5842 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5843 { 5844 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0); 5845 } 5846 5847 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5848 { 5849 int cdclk, vco; 5850 5851 skl_sanitize_cdclk(dev_priv); 5852 5853 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) { 5854 /* 5855 * Use the current vco as our initial 5856 * guess as to what the preferred vco is. 5857 */ 5858 if (dev_priv->skl_preferred_vco_freq == 0) 5859 skl_set_preferred_cdclk_vco(dev_priv, 5860 dev_priv->cdclk_pll.vco); 5861 return; 5862 } 5863 5864 vco = dev_priv->skl_preferred_vco_freq; 5865 if (vco == 0) 5866 vco = 8100000; 5867 cdclk = skl_calc_cdclk(0, vco); 5868 5869 skl_set_cdclk(dev_priv, cdclk, vco); 5870 } 5871 5872 static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5873 { 5874 uint32_t cdctl, expected; 5875 5876 /* 5877 * check if the pre-os intialized the display 5878 * There is SWF18 scratchpad register defined which is set by the 5879 * pre-os which can be used by the OS drivers to check the status 5880 */ 5881 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5882 goto sanitize; 5883 5884 intel_update_cdclk(&dev_priv->drm); 5885 /* Is PLL enabled and locked ? */ 5886 if (dev_priv->cdclk_pll.vco == 0 || 5887 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref) 5888 goto sanitize; 5889 5890 /* DPLL okay; verify the cdclock 5891 * 5892 * Noticed in some instances that the freq selection is correct but 5893 * decimal part is programmed wrong from BIOS where pre-os does not 5894 * enable display. Verify the same as well. 5895 */ 5896 cdctl = I915_READ(CDCLK_CTL); 5897 expected = (cdctl & CDCLK_FREQ_SEL_MASK) | 5898 skl_cdclk_decimal(dev_priv->cdclk_freq); 5899 if (cdctl == expected) 5900 /* All well; nothing to sanitize */ 5901 return; 5902 5903 sanitize: 5904 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); 5905 5906 /* force cdclk programming */ 5907 dev_priv->cdclk_freq = 0; 5908 /* force full PLL disable + enable */ 5909 dev_priv->cdclk_pll.vco = -1; 5910 } 5911 5912 /* Adjust CDclk dividers to allow high res or save power if possible */ 5913 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5914 { 5915 struct drm_i915_private *dev_priv = to_i915(dev); 5916 u32 val, cmd; 5917 5918 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5919 != dev_priv->cdclk_freq); 5920 5921 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5922 cmd = 2; 5923 else if (cdclk == 266667) 5924 cmd = 1; 5925 else 5926 cmd = 0; 5927 5928 mutex_lock(&dev_priv->rps.hw_lock); 5929 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5930 val &= ~DSPFREQGUAR_MASK; 5931 val |= (cmd << DSPFREQGUAR_SHIFT); 5932 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5933 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5934 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5935 50)) { 5936 DRM_ERROR("timed out waiting for CDclk change\n"); 5937 } 5938 mutex_unlock(&dev_priv->rps.hw_lock); 5939 5940 mutex_lock(&dev_priv->sb_lock); 5941 5942 if (cdclk == 400000) { 5943 u32 divider; 5944 5945 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5946 5947 /* adjust cdclk divider */ 5948 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5949 val &= ~CCK_FREQUENCY_VALUES; 5950 val |= divider; 5951 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5952 5953 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5954 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 5955 50)) 5956 DRM_ERROR("timed out waiting for CDclk change\n"); 5957 } 5958 5959 /* adjust self-refresh exit latency value */ 5960 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5961 val &= ~0x7f; 5962 5963 /* 5964 * For high bandwidth configs, we set a higher latency in the bunit 5965 * so that the core display fetch happens in time to avoid underruns. 5966 */ 5967 if (cdclk == 400000) 5968 val |= 4500 / 250; /* 4.5 usec */ 5969 else 5970 val |= 3000 / 250; /* 3.0 usec */ 5971 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5972 5973 mutex_unlock(&dev_priv->sb_lock); 5974 5975 intel_update_cdclk(dev); 5976 } 5977 5978 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5979 { 5980 struct drm_i915_private *dev_priv = to_i915(dev); 5981 u32 val, cmd; 5982 5983 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5984 != dev_priv->cdclk_freq); 5985 5986 switch (cdclk) { 5987 case 333333: 5988 case 320000: 5989 case 266667: 5990 case 200000: 5991 break; 5992 default: 5993 MISSING_CASE(cdclk); 5994 return; 5995 } 5996 5997 /* 5998 * Specs are full of misinformation, but testing on actual 5999 * hardware has shown that we just need to write the desired 6000 * CCK divider into the Punit register. 6001 */ 6002 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 6003 6004 mutex_lock(&dev_priv->rps.hw_lock); 6005 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 6006 val &= ~DSPFREQGUAR_MASK_CHV; 6007 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 6008 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 6009 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 6010 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 6011 50)) { 6012 DRM_ERROR("timed out waiting for CDclk change\n"); 6013 } 6014 mutex_unlock(&dev_priv->rps.hw_lock); 6015 6016 intel_update_cdclk(dev); 6017 } 6018 6019 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 6020 int max_pixclk) 6021 { 6022 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 6023 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 6024 6025 /* 6026 * Really only a few cases to deal with, as only 4 CDclks are supported: 6027 * 200MHz 6028 * 267MHz 6029 * 320/333MHz (depends on HPLL freq) 6030 * 400MHz (VLV only) 6031 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 6032 * of the lower bin and adjust if needed. 6033 * 6034 * We seem to get an unstable or solid color picture at 200MHz. 6035 * Not sure what's wrong. For now use 200MHz only when all pipes 6036 * are off. 6037 */ 6038 if (!IS_CHERRYVIEW(dev_priv) && 6039 max_pixclk > freq_320*limit/100) 6040 return 400000; 6041 else if (max_pixclk > 266667*limit/100) 6042 return freq_320; 6043 else if (max_pixclk > 0) 6044 return 266667; 6045 else 6046 return 200000; 6047 } 6048 6049 static int bxt_calc_cdclk(int max_pixclk) 6050 { 6051 if (max_pixclk > 576000) 6052 return 624000; 6053 else if (max_pixclk > 384000) 6054 return 576000; 6055 else if (max_pixclk > 288000) 6056 return 384000; 6057 else if (max_pixclk > 144000) 6058 return 288000; 6059 else 6060 return 144000; 6061 } 6062 6063 /* Compute the max pixel clock for new configuration. */ 6064 static int intel_mode_max_pixclk(struct drm_device *dev, 6065 struct drm_atomic_state *state) 6066 { 6067 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 6068 struct drm_i915_private *dev_priv = to_i915(dev); 6069 struct drm_crtc *crtc; 6070 struct drm_crtc_state *crtc_state; 6071 unsigned max_pixclk = 0, i; 6072 enum i915_pipe pipe; 6073 6074 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 6075 sizeof(intel_state->min_pixclk)); 6076 6077 for_each_crtc_in_state(state, crtc, crtc_state, i) { 6078 int pixclk = 0; 6079 6080 if (crtc_state->enable) 6081 pixclk = crtc_state->adjusted_mode.crtc_clock; 6082 6083 intel_state->min_pixclk[i] = pixclk; 6084 } 6085 6086 for_each_pipe(dev_priv, pipe) 6087 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); 6088 6089 return max_pixclk; 6090 } 6091 6092 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6093 { 6094 struct drm_device *dev = state->dev; 6095 struct drm_i915_private *dev_priv = to_i915(dev); 6096 int max_pixclk = intel_mode_max_pixclk(dev, state); 6097 struct intel_atomic_state *intel_state = 6098 to_intel_atomic_state(state); 6099 6100 intel_state->cdclk = intel_state->dev_cdclk = 6101 valleyview_calc_cdclk(dev_priv, max_pixclk); 6102 6103 if (!intel_state->active_crtcs) 6104 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0); 6105 6106 return 0; 6107 } 6108 6109 static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state) 6110 { 6111 int max_pixclk = ilk_max_pixel_rate(state); 6112 struct intel_atomic_state *intel_state = 6113 to_intel_atomic_state(state); 6114 6115 intel_state->cdclk = intel_state->dev_cdclk = 6116 bxt_calc_cdclk(max_pixclk); 6117 6118 if (!intel_state->active_crtcs) 6119 intel_state->dev_cdclk = bxt_calc_cdclk(0); 6120 6121 return 0; 6122 } 6123 6124 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 6125 { 6126 unsigned int credits, default_credits; 6127 6128 if (IS_CHERRYVIEW(dev_priv)) 6129 default_credits = PFI_CREDIT(12); 6130 else 6131 default_credits = PFI_CREDIT(8); 6132 6133 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) { 6134 /* CHV suggested value is 31 or 63 */ 6135 if (IS_CHERRYVIEW(dev_priv)) 6136 credits = PFI_CREDIT_63; 6137 else 6138 credits = PFI_CREDIT(15); 6139 } else { 6140 credits = default_credits; 6141 } 6142 6143 /* 6144 * WA - write default credits before re-programming 6145 * FIXME: should we also set the resend bit here? 6146 */ 6147 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6148 default_credits); 6149 6150 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6151 credits | PFI_CREDIT_RESEND); 6152 6153 /* 6154 * FIXME is this guaranteed to clear 6155 * immediately or should we poll for it? 6156 */ 6157 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6158 } 6159 6160 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6161 { 6162 struct drm_device *dev = old_state->dev; 6163 struct drm_i915_private *dev_priv = to_i915(dev); 6164 struct intel_atomic_state *old_intel_state = 6165 to_intel_atomic_state(old_state); 6166 unsigned req_cdclk = old_intel_state->dev_cdclk; 6167 6168 /* 6169 * FIXME: We can end up here with all power domains off, yet 6170 * with a CDCLK frequency other than the minimum. To account 6171 * for this take the PIPE-A power domain, which covers the HW 6172 * blocks needed for the following programming. This can be 6173 * removed once it's guaranteed that we get here either with 6174 * the minimum CDCLK set, or the required power domains 6175 * enabled. 6176 */ 6177 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6178 6179 if (IS_CHERRYVIEW(dev)) 6180 cherryview_set_cdclk(dev, req_cdclk); 6181 else 6182 valleyview_set_cdclk(dev, req_cdclk); 6183 6184 vlv_program_pfi_credits(dev_priv); 6185 6186 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6187 } 6188 6189 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6190 { 6191 struct drm_device *dev = crtc->dev; 6192 struct drm_i915_private *dev_priv = to_i915(dev); 6193 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6194 struct intel_encoder *encoder; 6195 struct intel_crtc_state *pipe_config = 6196 to_intel_crtc_state(crtc->state); 6197 int pipe = intel_crtc->pipe; 6198 6199 if (WARN_ON(intel_crtc->active)) 6200 return; 6201 6202 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6203 intel_dp_set_m_n(intel_crtc, M1_N1); 6204 6205 intel_set_pipe_timings(intel_crtc); 6206 intel_set_pipe_src_size(intel_crtc); 6207 6208 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6209 struct drm_i915_private *dev_priv = to_i915(dev); 6210 6211 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6212 I915_WRITE(CHV_CANVAS(pipe), 0); 6213 } 6214 6215 i9xx_set_pipeconf(intel_crtc); 6216 6217 intel_crtc->active = true; 6218 6219 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6220 6221 for_each_encoder_on_crtc(dev, crtc, encoder) 6222 if (encoder->pre_pll_enable) 6223 encoder->pre_pll_enable(encoder); 6224 6225 if (IS_CHERRYVIEW(dev)) { 6226 chv_prepare_pll(intel_crtc, intel_crtc->config); 6227 chv_enable_pll(intel_crtc, intel_crtc->config); 6228 } else { 6229 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6230 vlv_enable_pll(intel_crtc, intel_crtc->config); 6231 } 6232 6233 for_each_encoder_on_crtc(dev, crtc, encoder) 6234 if (encoder->pre_enable) 6235 encoder->pre_enable(encoder); 6236 6237 i9xx_pfit_enable(intel_crtc); 6238 6239 intel_color_load_luts(&pipe_config->base); 6240 6241 intel_update_watermarks(crtc); 6242 intel_enable_pipe(intel_crtc); 6243 6244 assert_vblank_disabled(crtc); 6245 drm_crtc_vblank_on(crtc); 6246 6247 for_each_encoder_on_crtc(dev, crtc, encoder) 6248 encoder->enable(encoder); 6249 } 6250 6251 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6252 { 6253 struct drm_device *dev = crtc->base.dev; 6254 struct drm_i915_private *dev_priv = to_i915(dev); 6255 6256 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6257 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6258 } 6259 6260 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6261 { 6262 struct drm_device *dev = crtc->dev; 6263 struct drm_i915_private *dev_priv = to_i915(dev); 6264 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6265 struct intel_encoder *encoder; 6266 struct intel_crtc_state *pipe_config = 6267 to_intel_crtc_state(crtc->state); 6268 enum i915_pipe pipe = intel_crtc->pipe; 6269 6270 if (WARN_ON(intel_crtc->active)) 6271 return; 6272 6273 i9xx_set_pll_dividers(intel_crtc); 6274 6275 if (intel_crtc_has_dp_encoder(intel_crtc->config)) 6276 intel_dp_set_m_n(intel_crtc, M1_N1); 6277 6278 intel_set_pipe_timings(intel_crtc); 6279 intel_set_pipe_src_size(intel_crtc); 6280 6281 i9xx_set_pipeconf(intel_crtc); 6282 6283 intel_crtc->active = true; 6284 6285 if (!IS_GEN2(dev)) 6286 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6287 6288 for_each_encoder_on_crtc(dev, crtc, encoder) 6289 if (encoder->pre_enable) 6290 encoder->pre_enable(encoder); 6291 6292 i9xx_enable_pll(intel_crtc); 6293 6294 i9xx_pfit_enable(intel_crtc); 6295 6296 intel_color_load_luts(&pipe_config->base); 6297 6298 intel_update_watermarks(crtc); 6299 intel_enable_pipe(intel_crtc); 6300 6301 assert_vblank_disabled(crtc); 6302 drm_crtc_vblank_on(crtc); 6303 6304 for_each_encoder_on_crtc(dev, crtc, encoder) 6305 encoder->enable(encoder); 6306 } 6307 6308 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6309 { 6310 struct drm_device *dev = crtc->base.dev; 6311 struct drm_i915_private *dev_priv = to_i915(dev); 6312 6313 if (!crtc->config->gmch_pfit.control) 6314 return; 6315 6316 assert_pipe_disabled(dev_priv, crtc->pipe); 6317 6318 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6319 I915_READ(PFIT_CONTROL)); 6320 I915_WRITE(PFIT_CONTROL, 0); 6321 } 6322 6323 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6324 { 6325 struct drm_device *dev = crtc->dev; 6326 struct drm_i915_private *dev_priv = to_i915(dev); 6327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6328 struct intel_encoder *encoder; 6329 int pipe = intel_crtc->pipe; 6330 6331 /* 6332 * On gen2 planes are double buffered but the pipe isn't, so we must 6333 * wait for planes to fully turn off before disabling the pipe. 6334 */ 6335 if (IS_GEN2(dev)) 6336 intel_wait_for_vblank(dev, pipe); 6337 6338 for_each_encoder_on_crtc(dev, crtc, encoder) 6339 encoder->disable(encoder); 6340 6341 drm_crtc_vblank_off(crtc); 6342 assert_vblank_disabled(crtc); 6343 6344 intel_disable_pipe(intel_crtc); 6345 6346 i9xx_pfit_disable(intel_crtc); 6347 6348 for_each_encoder_on_crtc(dev, crtc, encoder) 6349 if (encoder->post_disable) 6350 encoder->post_disable(encoder); 6351 6352 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) { 6353 if (IS_CHERRYVIEW(dev)) 6354 chv_disable_pll(dev_priv, pipe); 6355 else if (IS_VALLEYVIEW(dev)) 6356 vlv_disable_pll(dev_priv, pipe); 6357 else 6358 i9xx_disable_pll(intel_crtc); 6359 } 6360 6361 for_each_encoder_on_crtc(dev, crtc, encoder) 6362 if (encoder->post_pll_disable) 6363 encoder->post_pll_disable(encoder); 6364 6365 if (!IS_GEN2(dev)) 6366 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6367 } 6368 6369 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6370 { 6371 struct intel_encoder *encoder; 6372 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6373 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6374 enum intel_display_power_domain domain; 6375 unsigned long domains; 6376 6377 if (!intel_crtc->active) 6378 return; 6379 6380 if (to_intel_plane_state(crtc->primary->state)->visible) { 6381 WARN_ON(intel_crtc->flip_work); 6382 6383 intel_pre_disable_primary_noatomic(crtc); 6384 6385 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 6386 to_intel_plane_state(crtc->primary->state)->visible = false; 6387 } 6388 6389 dev_priv->display.crtc_disable(crtc); 6390 6391 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n", 6392 crtc->base.id, crtc->name); 6393 6394 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6395 crtc->state->active = false; 6396 intel_crtc->active = false; 6397 crtc->enabled = false; 6398 crtc->state->connector_mask = 0; 6399 crtc->state->encoder_mask = 0; 6400 6401 for_each_encoder_on_crtc(crtc->dev, crtc, encoder) 6402 encoder->base.crtc = NULL; 6403 6404 intel_fbc_disable(intel_crtc); 6405 intel_update_watermarks(crtc); 6406 intel_disable_shared_dpll(intel_crtc); 6407 6408 domains = intel_crtc->enabled_power_domains; 6409 for_each_power_domain(domain, domains) 6410 intel_display_power_put(dev_priv, domain); 6411 intel_crtc->enabled_power_domains = 0; 6412 6413 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 6414 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 6415 } 6416 6417 /* 6418 * turn all crtc's off, but do not adjust state 6419 * This has to be paired with a call to intel_modeset_setup_hw_state. 6420 */ 6421 int intel_display_suspend(struct drm_device *dev) 6422 { 6423 struct drm_i915_private *dev_priv = to_i915(dev); 6424 struct drm_atomic_state *state; 6425 int ret; 6426 6427 state = drm_atomic_helper_suspend(dev); 6428 ret = PTR_ERR_OR_ZERO(state); 6429 if (ret) 6430 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6431 else 6432 dev_priv->modeset_restore_state = state; 6433 return ret; 6434 } 6435 6436 void intel_encoder_destroy(struct drm_encoder *encoder) 6437 { 6438 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6439 6440 drm_encoder_cleanup(encoder); 6441 kfree(intel_encoder); 6442 } 6443 6444 /* Cross check the actual hw state with our own modeset state tracking (and it's 6445 * internal consistency). */ 6446 static void intel_connector_verify_state(struct intel_connector *connector) 6447 { 6448 struct drm_crtc *crtc = connector->base.state->crtc; 6449 6450 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6451 connector->base.base.id, 6452 connector->base.name); 6453 6454 if (connector->get_hw_state(connector)) { 6455 struct intel_encoder *encoder = connector->encoder; 6456 struct drm_connector_state *conn_state = connector->base.state; 6457 6458 I915_STATE_WARN(!crtc, 6459 "connector enabled without attached crtc\n"); 6460 6461 if (!crtc) 6462 return; 6463 6464 I915_STATE_WARN(!crtc->state->active, 6465 "connector is active, but attached crtc isn't\n"); 6466 6467 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6468 return; 6469 6470 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6471 "atomic encoder doesn't match attached encoder\n"); 6472 6473 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6474 "attached encoder crtc differs from connector crtc\n"); 6475 } else { 6476 I915_STATE_WARN(crtc && crtc->state->active, 6477 "attached crtc is active, but connector isn't\n"); 6478 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 6479 "best encoder set without crtc!\n"); 6480 } 6481 } 6482 6483 int intel_connector_init(struct intel_connector *connector) 6484 { 6485 drm_atomic_helper_connector_reset(&connector->base); 6486 6487 if (!connector->base.state) 6488 return -ENOMEM; 6489 6490 return 0; 6491 } 6492 6493 struct intel_connector *intel_connector_alloc(void) 6494 { 6495 struct intel_connector *connector; 6496 6497 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6498 if (!connector) 6499 return NULL; 6500 6501 if (intel_connector_init(connector) < 0) { 6502 kfree(connector); 6503 return NULL; 6504 } 6505 6506 return connector; 6507 } 6508 6509 /* Simple connector->get_hw_state implementation for encoders that support only 6510 * one connector and no cloning and hence the encoder state determines the state 6511 * of the connector. */ 6512 bool intel_connector_get_hw_state(struct intel_connector *connector) 6513 { 6514 enum i915_pipe pipe = 0; 6515 struct intel_encoder *encoder = connector->encoder; 6516 6517 return encoder->get_hw_state(encoder, &pipe); 6518 } 6519 6520 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6521 { 6522 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6523 return crtc_state->fdi_lanes; 6524 6525 return 0; 6526 } 6527 6528 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6529 struct intel_crtc_state *pipe_config) 6530 { 6531 struct drm_atomic_state *state = pipe_config->base.state; 6532 struct intel_crtc *other_crtc; 6533 struct intel_crtc_state *other_crtc_state; 6534 6535 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6536 pipe_name(pipe), pipe_config->fdi_lanes); 6537 if (pipe_config->fdi_lanes > 4) { 6538 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6539 pipe_name(pipe), pipe_config->fdi_lanes); 6540 return -EINVAL; 6541 } 6542 6543 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6544 if (pipe_config->fdi_lanes > 2) { 6545 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6546 pipe_config->fdi_lanes); 6547 return -EINVAL; 6548 } else { 6549 return 0; 6550 } 6551 } 6552 6553 if (INTEL_INFO(dev)->num_pipes == 2) 6554 return 0; 6555 6556 /* Ivybridge 3 pipe is really complicated */ 6557 switch (pipe) { 6558 case PIPE_A: 6559 return 0; 6560 case PIPE_B: 6561 if (pipe_config->fdi_lanes <= 2) 6562 return 0; 6563 6564 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6565 other_crtc_state = 6566 intel_atomic_get_crtc_state(state, other_crtc); 6567 if (IS_ERR(other_crtc_state)) 6568 return PTR_ERR(other_crtc_state); 6569 6570 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6571 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6572 pipe_name(pipe), pipe_config->fdi_lanes); 6573 return -EINVAL; 6574 } 6575 return 0; 6576 case PIPE_C: 6577 if (pipe_config->fdi_lanes > 2) { 6578 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6579 pipe_name(pipe), pipe_config->fdi_lanes); 6580 return -EINVAL; 6581 } 6582 6583 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6584 other_crtc_state = 6585 intel_atomic_get_crtc_state(state, other_crtc); 6586 if (IS_ERR(other_crtc_state)) 6587 return PTR_ERR(other_crtc_state); 6588 6589 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6590 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6591 return -EINVAL; 6592 } 6593 return 0; 6594 default: 6595 BUG(); 6596 } 6597 } 6598 6599 #define RETRY 1 6600 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6601 struct intel_crtc_state *pipe_config) 6602 { 6603 struct drm_device *dev = intel_crtc->base.dev; 6604 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6605 int lane, link_bw, fdi_dotclock, ret; 6606 bool needs_recompute = false; 6607 6608 retry: 6609 /* FDI is a binary signal running at ~2.7GHz, encoding 6610 * each output octet as 10 bits. The actual frequency 6611 * is stored as a divider into a 100MHz clock, and the 6612 * mode pixel clock is stored in units of 1KHz. 6613 * Hence the bw of each lane in terms of the mode signal 6614 * is: 6615 */ 6616 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config); 6617 6618 fdi_dotclock = adjusted_mode->crtc_clock; 6619 6620 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6621 pipe_config->pipe_bpp); 6622 6623 pipe_config->fdi_lanes = lane; 6624 6625 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6626 link_bw, &pipe_config->fdi_m_n); 6627 6628 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 6629 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6630 pipe_config->pipe_bpp -= 2*3; 6631 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6632 pipe_config->pipe_bpp); 6633 needs_recompute = true; 6634 pipe_config->bw_constrained = true; 6635 6636 goto retry; 6637 } 6638 6639 if (needs_recompute) 6640 return RETRY; 6641 6642 return ret; 6643 } 6644 6645 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6646 struct intel_crtc_state *pipe_config) 6647 { 6648 if (pipe_config->pipe_bpp > 24) 6649 return false; 6650 6651 /* HSW can handle pixel rate up to cdclk? */ 6652 if (IS_HASWELL(dev_priv)) 6653 return true; 6654 6655 /* 6656 * We compare against max which means we must take 6657 * the increased cdclk requirement into account when 6658 * calculating the new cdclk. 6659 * 6660 * Should measure whether using a lower cdclk w/o IPS 6661 */ 6662 return ilk_pipe_pixel_rate(pipe_config) <= 6663 dev_priv->max_cdclk_freq * 95 / 100; 6664 } 6665 6666 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6667 struct intel_crtc_state *pipe_config) 6668 { 6669 struct drm_device *dev = crtc->base.dev; 6670 struct drm_i915_private *dev_priv = to_i915(dev); 6671 6672 pipe_config->ips_enabled = i915.enable_ips && 6673 hsw_crtc_supports_ips(crtc) && 6674 pipe_config_supports_ips(dev_priv, pipe_config); 6675 } 6676 6677 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6678 { 6679 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6680 6681 /* GDG double wide on either pipe, otherwise pipe A only */ 6682 return INTEL_INFO(dev_priv)->gen < 4 && 6683 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6684 } 6685 6686 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6687 struct intel_crtc_state *pipe_config) 6688 { 6689 struct drm_device *dev = crtc->base.dev; 6690 struct drm_i915_private *dev_priv = to_i915(dev); 6691 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6692 int clock_limit = dev_priv->max_dotclk_freq; 6693 6694 if (INTEL_INFO(dev)->gen < 4) { 6695 clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6696 6697 /* 6698 * Enable double wide mode when the dot clock 6699 * is > 90% of the (display) core speed. 6700 */ 6701 if (intel_crtc_supports_double_wide(crtc) && 6702 adjusted_mode->crtc_clock > clock_limit) { 6703 clock_limit = dev_priv->max_dotclk_freq; 6704 pipe_config->double_wide = true; 6705 } 6706 } 6707 6708 if (adjusted_mode->crtc_clock > clock_limit) { 6709 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6710 adjusted_mode->crtc_clock, clock_limit, 6711 yesno(pipe_config->double_wide)); 6712 return -EINVAL; 6713 } 6714 6715 /* 6716 * Pipe horizontal size must be even in: 6717 * - DVO ganged mode 6718 * - LVDS dual channel mode 6719 * - Double wide pipe 6720 */ 6721 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) && 6722 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6723 pipe_config->pipe_src_w &= ~1; 6724 6725 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6726 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6727 */ 6728 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6729 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6730 return -EINVAL; 6731 6732 if (HAS_IPS(dev)) 6733 hsw_compute_ips_config(crtc, pipe_config); 6734 6735 if (pipe_config->has_pch_encoder) 6736 return ironlake_fdi_compute_config(crtc, pipe_config); 6737 6738 return 0; 6739 } 6740 6741 static int skylake_get_display_clock_speed(struct drm_device *dev) 6742 { 6743 struct drm_i915_private *dev_priv = to_i915(dev); 6744 uint32_t cdctl; 6745 6746 skl_dpll0_update(dev_priv); 6747 6748 if (dev_priv->cdclk_pll.vco == 0) 6749 return dev_priv->cdclk_pll.ref; 6750 6751 cdctl = I915_READ(CDCLK_CTL); 6752 6753 if (dev_priv->cdclk_pll.vco == 8640000) { 6754 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6755 case CDCLK_FREQ_450_432: 6756 return 432000; 6757 case CDCLK_FREQ_337_308: 6758 return 308571; 6759 case CDCLK_FREQ_540: 6760 return 540000; 6761 case CDCLK_FREQ_675_617: 6762 return 617143; 6763 default: 6764 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 6765 } 6766 } else { 6767 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6768 case CDCLK_FREQ_450_432: 6769 return 450000; 6770 case CDCLK_FREQ_337_308: 6771 return 337500; 6772 case CDCLK_FREQ_540: 6773 return 540000; 6774 case CDCLK_FREQ_675_617: 6775 return 675000; 6776 default: 6777 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK); 6778 } 6779 } 6780 6781 return dev_priv->cdclk_pll.ref; 6782 } 6783 6784 static void bxt_de_pll_update(struct drm_i915_private *dev_priv) 6785 { 6786 u32 val; 6787 6788 dev_priv->cdclk_pll.ref = 19200; 6789 dev_priv->cdclk_pll.vco = 0; 6790 6791 val = I915_READ(BXT_DE_PLL_ENABLE); 6792 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) 6793 return; 6794 6795 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) 6796 return; 6797 6798 val = I915_READ(BXT_DE_PLL_CTL); 6799 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) * 6800 dev_priv->cdclk_pll.ref; 6801 } 6802 6803 static int broxton_get_display_clock_speed(struct drm_device *dev) 6804 { 6805 struct drm_i915_private *dev_priv = to_i915(dev); 6806 u32 divider; 6807 int div, vco; 6808 6809 bxt_de_pll_update(dev_priv); 6810 6811 vco = dev_priv->cdclk_pll.vco; 6812 if (vco == 0) 6813 return dev_priv->cdclk_pll.ref; 6814 6815 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; 6816 6817 switch (divider) { 6818 case BXT_CDCLK_CD2X_DIV_SEL_1: 6819 div = 2; 6820 break; 6821 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6822 div = 3; 6823 break; 6824 case BXT_CDCLK_CD2X_DIV_SEL_2: 6825 div = 4; 6826 break; 6827 case BXT_CDCLK_CD2X_DIV_SEL_4: 6828 div = 8; 6829 break; 6830 default: 6831 MISSING_CASE(divider); 6832 return dev_priv->cdclk_pll.ref; 6833 } 6834 6835 return DIV_ROUND_CLOSEST(vco, div); 6836 } 6837 6838 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6839 { 6840 struct drm_i915_private *dev_priv = to_i915(dev); 6841 uint32_t lcpll = I915_READ(LCPLL_CTL); 6842 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6843 6844 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6845 return 800000; 6846 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6847 return 450000; 6848 else if (freq == LCPLL_CLK_FREQ_450) 6849 return 450000; 6850 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6851 return 540000; 6852 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6853 return 337500; 6854 else 6855 return 675000; 6856 } 6857 6858 static int haswell_get_display_clock_speed(struct drm_device *dev) 6859 { 6860 struct drm_i915_private *dev_priv = to_i915(dev); 6861 uint32_t lcpll = I915_READ(LCPLL_CTL); 6862 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6863 6864 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6865 return 800000; 6866 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6867 return 450000; 6868 else if (freq == LCPLL_CLK_FREQ_450) 6869 return 450000; 6870 else if (IS_HSW_ULT(dev)) 6871 return 337500; 6872 else 6873 return 540000; 6874 } 6875 6876 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6877 { 6878 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", 6879 CCK_DISPLAY_CLOCK_CONTROL); 6880 } 6881 6882 static int ilk_get_display_clock_speed(struct drm_device *dev) 6883 { 6884 return 450000; 6885 } 6886 6887 static int i945_get_display_clock_speed(struct drm_device *dev) 6888 { 6889 return 400000; 6890 } 6891 6892 static int i915_get_display_clock_speed(struct drm_device *dev) 6893 { 6894 return 333333; 6895 } 6896 6897 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6898 { 6899 return 200000; 6900 } 6901 6902 static int pnv_get_display_clock_speed(struct drm_device *dev) 6903 { 6904 u16 gcfgc = 0; 6905 6906 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6907 6908 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6909 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6910 return 266667; 6911 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6912 return 333333; 6913 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6914 return 444444; 6915 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6916 return 200000; 6917 default: 6918 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6919 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6920 return 133333; 6921 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6922 return 166667; 6923 } 6924 } 6925 6926 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6927 { 6928 u16 gcfgc = 0; 6929 6930 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6931 6932 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6933 return 133333; 6934 else { 6935 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6936 case GC_DISPLAY_CLOCK_333_MHZ: 6937 return 333333; 6938 default: 6939 case GC_DISPLAY_CLOCK_190_200_MHZ: 6940 return 190000; 6941 } 6942 } 6943 } 6944 6945 static int i865_get_display_clock_speed(struct drm_device *dev) 6946 { 6947 return 266667; 6948 } 6949 6950 static int i85x_get_display_clock_speed(struct drm_device *dev) 6951 { 6952 u16 hpllcc = 0; 6953 6954 /* 6955 * 852GM/852GMV only supports 133 MHz and the HPLLCC 6956 * encoding is different :( 6957 * FIXME is this the right way to detect 852GM/852GMV? 6958 */ 6959 if (dev->pdev->revision == 0x1) 6960 return 133333; 6961 6962 pci_bus_read_config_word(dev->pdev->bus, 6963 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 6964 6965 /* Assume that the hardware is in the high speed state. This 6966 * should be the default. 6967 */ 6968 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6969 case GC_CLOCK_133_200: 6970 case GC_CLOCK_133_200_2: 6971 case GC_CLOCK_100_200: 6972 return 200000; 6973 case GC_CLOCK_166_250: 6974 return 250000; 6975 case GC_CLOCK_100_133: 6976 return 133333; 6977 case GC_CLOCK_133_266: 6978 case GC_CLOCK_133_266_2: 6979 case GC_CLOCK_166_266: 6980 return 266667; 6981 } 6982 6983 /* Shouldn't happen */ 6984 return 0; 6985 } 6986 6987 static int i830_get_display_clock_speed(struct drm_device *dev) 6988 { 6989 return 133333; 6990 } 6991 6992 static unsigned int intel_hpll_vco(struct drm_device *dev) 6993 { 6994 struct drm_i915_private *dev_priv = to_i915(dev); 6995 static const unsigned int blb_vco[8] = { 6996 [0] = 3200000, 6997 [1] = 4000000, 6998 [2] = 5333333, 6999 [3] = 4800000, 7000 [4] = 6400000, 7001 }; 7002 static const unsigned int pnv_vco[8] = { 7003 [0] = 3200000, 7004 [1] = 4000000, 7005 [2] = 5333333, 7006 [3] = 4800000, 7007 [4] = 2666667, 7008 }; 7009 static const unsigned int cl_vco[8] = { 7010 [0] = 3200000, 7011 [1] = 4000000, 7012 [2] = 5333333, 7013 [3] = 6400000, 7014 [4] = 3333333, 7015 [5] = 3566667, 7016 [6] = 4266667, 7017 }; 7018 static const unsigned int elk_vco[8] = { 7019 [0] = 3200000, 7020 [1] = 4000000, 7021 [2] = 5333333, 7022 [3] = 4800000, 7023 }; 7024 static const unsigned int ctg_vco[8] = { 7025 [0] = 3200000, 7026 [1] = 4000000, 7027 [2] = 5333333, 7028 [3] = 6400000, 7029 [4] = 2666667, 7030 [5] = 4266667, 7031 }; 7032 const unsigned int *vco_table; 7033 unsigned int vco; 7034 uint8_t tmp = 0; 7035 7036 /* FIXME other chipsets? */ 7037 if (IS_GM45(dev)) 7038 vco_table = ctg_vco; 7039 else if (IS_G4X(dev)) 7040 vco_table = elk_vco; 7041 else if (IS_CRESTLINE(dev)) 7042 vco_table = cl_vco; 7043 else if (IS_PINEVIEW(dev)) 7044 vco_table = pnv_vco; 7045 else if (IS_G33(dev)) 7046 vco_table = blb_vco; 7047 else 7048 return 0; 7049 7050 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); 7051 7052 vco = vco_table[tmp & 0x7]; 7053 if (vco == 0) 7054 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); 7055 else 7056 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); 7057 7058 return vco; 7059 } 7060 7061 static int gm45_get_display_clock_speed(struct drm_device *dev) 7062 { 7063 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7064 uint16_t tmp = 0; 7065 7066 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7067 7068 cdclk_sel = (tmp >> 12) & 0x1; 7069 7070 switch (vco) { 7071 case 2666667: 7072 case 4000000: 7073 case 5333333: 7074 return cdclk_sel ? 333333 : 222222; 7075 case 3200000: 7076 return cdclk_sel ? 320000 : 228571; 7077 default: 7078 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); 7079 return 222222; 7080 } 7081 } 7082 7083 static int i965gm_get_display_clock_speed(struct drm_device *dev) 7084 { 7085 static const uint8_t div_3200[] = { 16, 10, 8 }; 7086 static const uint8_t div_4000[] = { 20, 12, 10 }; 7087 static const uint8_t div_5333[] = { 24, 16, 14 }; 7088 const uint8_t *div_table; 7089 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7090 uint16_t tmp = 0; 7091 7092 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7093 7094 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 7095 7096 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7097 goto fail; 7098 7099 switch (vco) { 7100 case 3200000: 7101 div_table = div_3200; 7102 break; 7103 case 4000000: 7104 div_table = div_4000; 7105 break; 7106 case 5333333: 7107 div_table = div_5333; 7108 break; 7109 default: 7110 goto fail; 7111 } 7112 7113 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7114 7115 fail: 7116 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); 7117 return 200000; 7118 } 7119 7120 static int g33_get_display_clock_speed(struct drm_device *dev) 7121 { 7122 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; 7123 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; 7124 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; 7125 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; 7126 const uint8_t *div_table; 7127 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7128 uint16_t tmp = 0; 7129 7130 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7131 7132 cdclk_sel = (tmp >> 4) & 0x7; 7133 7134 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7135 goto fail; 7136 7137 switch (vco) { 7138 case 3200000: 7139 div_table = div_3200; 7140 break; 7141 case 4000000: 7142 div_table = div_4000; 7143 break; 7144 case 4800000: 7145 div_table = div_4800; 7146 break; 7147 case 5333333: 7148 div_table = div_5333; 7149 break; 7150 default: 7151 goto fail; 7152 } 7153 7154 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7155 7156 fail: 7157 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); 7158 return 190476; 7159 } 7160 7161 static void 7162 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 7163 { 7164 while (*num > DATA_LINK_M_N_MASK || 7165 *den > DATA_LINK_M_N_MASK) { 7166 *num >>= 1; 7167 *den >>= 1; 7168 } 7169 } 7170 7171 static void compute_m_n(unsigned int m, unsigned int n, 7172 uint32_t *ret_m, uint32_t *ret_n) 7173 { 7174 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7175 *ret_m = div_u64((uint64_t) m * *ret_n, n); 7176 intel_reduce_m_n_ratio(ret_m, ret_n); 7177 } 7178 7179 void 7180 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 7181 int pixel_clock, int link_clock, 7182 struct intel_link_m_n *m_n) 7183 { 7184 m_n->tu = 64; 7185 7186 compute_m_n(bits_per_pixel * pixel_clock, 7187 link_clock * nlanes * 8, 7188 &m_n->gmch_m, &m_n->gmch_n); 7189 7190 compute_m_n(pixel_clock, link_clock, 7191 &m_n->link_m, &m_n->link_n); 7192 } 7193 7194 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7195 { 7196 if (i915.panel_use_ssc >= 0) 7197 return i915.panel_use_ssc != 0; 7198 return dev_priv->vbt.lvds_use_ssc 7199 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7200 } 7201 7202 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 7203 { 7204 return (1 << dpll->n) << 16 | dpll->m2; 7205 } 7206 7207 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 7208 { 7209 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7210 } 7211 7212 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7213 struct intel_crtc_state *crtc_state, 7214 struct dpll *reduced_clock) 7215 { 7216 struct drm_device *dev = crtc->base.dev; 7217 u32 fp, fp2 = 0; 7218 7219 if (IS_PINEVIEW(dev)) { 7220 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7221 if (reduced_clock) 7222 fp2 = pnv_dpll_compute_fp(reduced_clock); 7223 } else { 7224 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7225 if (reduced_clock) 7226 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7227 } 7228 7229 crtc_state->dpll_hw_state.fp0 = fp; 7230 7231 crtc->lowfreq_avail = false; 7232 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7233 reduced_clock) { 7234 crtc_state->dpll_hw_state.fp1 = fp2; 7235 crtc->lowfreq_avail = true; 7236 } else { 7237 crtc_state->dpll_hw_state.fp1 = fp; 7238 } 7239 } 7240 7241 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 7242 pipe) 7243 { 7244 u32 reg_val; 7245 7246 /* 7247 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7248 * and set it to a reasonable value instead. 7249 */ 7250 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7251 reg_val &= 0xffffff00; 7252 reg_val |= 0x00000030; 7253 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7254 7255 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7256 reg_val &= 0x8cffffff; 7257 reg_val = 0x8c000000; 7258 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7259 7260 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7261 reg_val &= 0xffffff00; 7262 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7263 7264 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7265 reg_val &= 0x00ffffff; 7266 reg_val |= 0xb0000000; 7267 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7268 } 7269 7270 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 7271 struct intel_link_m_n *m_n) 7272 { 7273 struct drm_device *dev = crtc->base.dev; 7274 struct drm_i915_private *dev_priv = to_i915(dev); 7275 int pipe = crtc->pipe; 7276 7277 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7278 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7279 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7280 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7281 } 7282 7283 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 7284 struct intel_link_m_n *m_n, 7285 struct intel_link_m_n *m2_n2) 7286 { 7287 struct drm_device *dev = crtc->base.dev; 7288 struct drm_i915_private *dev_priv = to_i915(dev); 7289 int pipe = crtc->pipe; 7290 enum transcoder transcoder = crtc->config->cpu_transcoder; 7291 7292 if (INTEL_INFO(dev)->gen >= 5) { 7293 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7294 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7295 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7296 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7297 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7298 * for gen < 8) and if DRRS is supported (to make sure the 7299 * registers are not unnecessarily accessed). 7300 */ 7301 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7302 crtc->config->has_drrs) { 7303 I915_WRITE(PIPE_DATA_M2(transcoder), 7304 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7305 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7306 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7307 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7308 } 7309 } else { 7310 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7311 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7312 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7313 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7314 } 7315 } 7316 7317 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7318 { 7319 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7320 7321 if (m_n == M1_N1) { 7322 dp_m_n = &crtc->config->dp_m_n; 7323 dp_m2_n2 = &crtc->config->dp_m2_n2; 7324 } else if (m_n == M2_N2) { 7325 7326 /* 7327 * M2_N2 registers are not supported. Hence m2_n2 divider value 7328 * needs to be programmed into M1_N1. 7329 */ 7330 dp_m_n = &crtc->config->dp_m2_n2; 7331 } else { 7332 DRM_ERROR("Unsupported divider value\n"); 7333 return; 7334 } 7335 7336 if (crtc->config->has_pch_encoder) 7337 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7338 else 7339 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7340 } 7341 7342 static void vlv_compute_dpll(struct intel_crtc *crtc, 7343 struct intel_crtc_state *pipe_config) 7344 { 7345 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 7346 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7347 if (crtc->pipe != PIPE_A) 7348 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7349 7350 /* DPLL not used with DSI, but still need the rest set up */ 7351 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7352 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7353 DPLL_EXT_BUFFER_ENABLE_VLV; 7354 7355 pipe_config->dpll_hw_state.dpll_md = 7356 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7357 } 7358 7359 static void chv_compute_dpll(struct intel_crtc *crtc, 7360 struct intel_crtc_state *pipe_config) 7361 { 7362 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7363 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 7364 if (crtc->pipe != PIPE_A) 7365 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7366 7367 /* DPLL not used with DSI, but still need the rest set up */ 7368 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI)) 7369 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7370 7371 pipe_config->dpll_hw_state.dpll_md = 7372 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7373 } 7374 7375 static void vlv_prepare_pll(struct intel_crtc *crtc, 7376 const struct intel_crtc_state *pipe_config) 7377 { 7378 struct drm_device *dev = crtc->base.dev; 7379 struct drm_i915_private *dev_priv = to_i915(dev); 7380 enum i915_pipe pipe = crtc->pipe; 7381 u32 mdiv; 7382 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7383 u32 coreclk, reg_val; 7384 7385 /* Enable Refclk */ 7386 I915_WRITE(DPLL(pipe), 7387 pipe_config->dpll_hw_state.dpll & 7388 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV)); 7389 7390 /* No need to actually set up the DPLL with DSI */ 7391 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7392 return; 7393 7394 mutex_lock(&dev_priv->sb_lock); 7395 7396 bestn = pipe_config->dpll.n; 7397 bestm1 = pipe_config->dpll.m1; 7398 bestm2 = pipe_config->dpll.m2; 7399 bestp1 = pipe_config->dpll.p1; 7400 bestp2 = pipe_config->dpll.p2; 7401 7402 /* See eDP HDMI DPIO driver vbios notes doc */ 7403 7404 /* PLL B needs special handling */ 7405 if (pipe == PIPE_B) 7406 vlv_pllb_recal_opamp(dev_priv, pipe); 7407 7408 /* Set up Tx target for periodic Rcomp update */ 7409 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7410 7411 /* Disable target IRef on PLL */ 7412 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7413 reg_val &= 0x00ffffff; 7414 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7415 7416 /* Disable fast lock */ 7417 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7418 7419 /* Set idtafcrecal before PLL is enabled */ 7420 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7421 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7422 mdiv |= ((bestn << DPIO_N_SHIFT)); 7423 mdiv |= (1 << DPIO_K_SHIFT); 7424 7425 /* 7426 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7427 * but we don't support that). 7428 * Note: don't use the DAC post divider as it seems unstable. 7429 */ 7430 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7431 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7432 7433 mdiv |= DPIO_ENABLE_CALIBRATION; 7434 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7435 7436 /* Set HBR and RBR LPF coefficients */ 7437 if (pipe_config->port_clock == 162000 || 7438 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) || 7439 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) 7440 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7441 0x009f0003); 7442 else 7443 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7444 0x00d0000f); 7445 7446 if (intel_crtc_has_dp_encoder(pipe_config)) { 7447 /* Use SSC source */ 7448 if (pipe == PIPE_A) 7449 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7450 0x0df40000); 7451 else 7452 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7453 0x0df70000); 7454 } else { /* HDMI or VGA */ 7455 /* Use bend source */ 7456 if (pipe == PIPE_A) 7457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7458 0x0df70000); 7459 else 7460 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7461 0x0df40000); 7462 } 7463 7464 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7465 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7466 if (intel_crtc_has_dp_encoder(crtc->config)) 7467 coreclk |= 0x01000000; 7468 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7469 7470 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7471 mutex_unlock(&dev_priv->sb_lock); 7472 } 7473 7474 static void chv_prepare_pll(struct intel_crtc *crtc, 7475 const struct intel_crtc_state *pipe_config) 7476 { 7477 struct drm_device *dev = crtc->base.dev; 7478 struct drm_i915_private *dev_priv = to_i915(dev); 7479 enum i915_pipe pipe = crtc->pipe; 7480 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7481 u32 loopfilter, tribuf_calcntr; 7482 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7483 u32 dpio_val; 7484 int vco; 7485 7486 /* Enable Refclk and SSC */ 7487 I915_WRITE(DPLL(pipe), 7488 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7489 7490 /* No need to actually set up the DPLL with DSI */ 7491 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 7492 return; 7493 7494 bestn = pipe_config->dpll.n; 7495 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7496 bestm1 = pipe_config->dpll.m1; 7497 bestm2 = pipe_config->dpll.m2 >> 22; 7498 bestp1 = pipe_config->dpll.p1; 7499 bestp2 = pipe_config->dpll.p2; 7500 vco = pipe_config->dpll.vco; 7501 dpio_val = 0; 7502 loopfilter = 0; 7503 7504 mutex_lock(&dev_priv->sb_lock); 7505 7506 /* p1 and p2 divider */ 7507 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7508 5 << DPIO_CHV_S1_DIV_SHIFT | 7509 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7510 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7511 1 << DPIO_CHV_K_DIV_SHIFT); 7512 7513 /* Feedback post-divider - m2 */ 7514 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7515 7516 /* Feedback refclk divider - n and m1 */ 7517 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7518 DPIO_CHV_M1_DIV_BY_2 | 7519 1 << DPIO_CHV_N_DIV_SHIFT); 7520 7521 /* M2 fraction division */ 7522 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7523 7524 /* M2 fraction division enable */ 7525 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7526 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7527 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7528 if (bestm2_frac) 7529 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7530 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7531 7532 /* Program digital lock detect threshold */ 7533 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7534 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7535 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7536 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7537 if (!bestm2_frac) 7538 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7539 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7540 7541 /* Loop filter */ 7542 if (vco == 5400000) { 7543 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7544 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7545 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7546 tribuf_calcntr = 0x9; 7547 } else if (vco <= 6200000) { 7548 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7549 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7550 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7551 tribuf_calcntr = 0x9; 7552 } else if (vco <= 6480000) { 7553 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7554 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7555 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7556 tribuf_calcntr = 0x8; 7557 } else { 7558 /* Not supported. Apply the same limits as in the max case */ 7559 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7560 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7561 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7562 tribuf_calcntr = 0; 7563 } 7564 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7565 7566 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7567 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7568 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7569 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7570 7571 /* AFC Recal */ 7572 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7573 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7574 DPIO_AFC_RECAL); 7575 7576 mutex_unlock(&dev_priv->sb_lock); 7577 } 7578 7579 /** 7580 * vlv_force_pll_on - forcibly enable just the PLL 7581 * @dev_priv: i915 private structure 7582 * @pipe: pipe PLL to enable 7583 * @dpll: PLL configuration 7584 * 7585 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7586 * in cases where we need the PLL enabled even when @pipe is not going to 7587 * be enabled. 7588 */ 7589 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7590 const struct dpll *dpll) 7591 { 7592 struct intel_crtc *crtc = 7593 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7594 struct intel_crtc_state *pipe_config; 7595 7596 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7597 if (!pipe_config) 7598 return -ENOMEM; 7599 7600 pipe_config->base.crtc = &crtc->base; 7601 pipe_config->pixel_multiplier = 1; 7602 pipe_config->dpll = *dpll; 7603 7604 if (IS_CHERRYVIEW(dev)) { 7605 chv_compute_dpll(crtc, pipe_config); 7606 chv_prepare_pll(crtc, pipe_config); 7607 chv_enable_pll(crtc, pipe_config); 7608 } else { 7609 vlv_compute_dpll(crtc, pipe_config); 7610 vlv_prepare_pll(crtc, pipe_config); 7611 vlv_enable_pll(crtc, pipe_config); 7612 } 7613 7614 kfree(pipe_config); 7615 7616 return 0; 7617 } 7618 7619 /** 7620 * vlv_force_pll_off - forcibly disable just the PLL 7621 * @dev_priv: i915 private structure 7622 * @pipe: pipe PLL to disable 7623 * 7624 * Disable the PLL for @pipe. To be used in cases where we need 7625 * the PLL enabled even when @pipe is not going to be enabled. 7626 */ 7627 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7628 { 7629 if (IS_CHERRYVIEW(dev)) 7630 chv_disable_pll(to_i915(dev), pipe); 7631 else 7632 vlv_disable_pll(to_i915(dev), pipe); 7633 } 7634 7635 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7636 struct intel_crtc_state *crtc_state, 7637 struct dpll *reduced_clock) 7638 { 7639 struct drm_device *dev = crtc->base.dev; 7640 struct drm_i915_private *dev_priv = to_i915(dev); 7641 u32 dpll; 7642 struct dpll *clock = &crtc_state->dpll; 7643 7644 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7645 7646 dpll = DPLL_VGA_MODE_DIS; 7647 7648 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 7649 dpll |= DPLLB_MODE_LVDS; 7650 else 7651 dpll |= DPLLB_MODE_DAC_SERIAL; 7652 7653 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7654 dpll |= (crtc_state->pixel_multiplier - 1) 7655 << SDVO_MULTIPLIER_SHIFT_HIRES; 7656 } 7657 7658 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 7659 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 7660 dpll |= DPLL_SDVO_HIGH_SPEED; 7661 7662 if (intel_crtc_has_dp_encoder(crtc_state)) 7663 dpll |= DPLL_SDVO_HIGH_SPEED; 7664 7665 /* compute bitmask from p1 value */ 7666 if (IS_PINEVIEW(dev)) 7667 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7668 else { 7669 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7670 if (IS_G4X(dev) && reduced_clock) 7671 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7672 } 7673 switch (clock->p2) { 7674 case 5: 7675 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7676 break; 7677 case 7: 7678 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7679 break; 7680 case 10: 7681 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7682 break; 7683 case 14: 7684 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7685 break; 7686 } 7687 if (INTEL_INFO(dev)->gen >= 4) 7688 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7689 7690 if (crtc_state->sdvo_tv_clock) 7691 dpll |= PLL_REF_INPUT_TVCLKINBC; 7692 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7693 intel_panel_use_ssc(dev_priv)) 7694 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7695 else 7696 dpll |= PLL_REF_INPUT_DREFCLK; 7697 7698 dpll |= DPLL_VCO_ENABLE; 7699 crtc_state->dpll_hw_state.dpll = dpll; 7700 7701 if (INTEL_INFO(dev)->gen >= 4) { 7702 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7703 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7704 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7705 } 7706 } 7707 7708 static void i8xx_compute_dpll(struct intel_crtc *crtc, 7709 struct intel_crtc_state *crtc_state, 7710 struct dpll *reduced_clock) 7711 { 7712 struct drm_device *dev = crtc->base.dev; 7713 struct drm_i915_private *dev_priv = to_i915(dev); 7714 u32 dpll; 7715 struct dpll *clock = &crtc_state->dpll; 7716 7717 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7718 7719 dpll = DPLL_VGA_MODE_DIS; 7720 7721 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7722 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7723 } else { 7724 if (clock->p1 == 2) 7725 dpll |= PLL_P1_DIVIDE_BY_TWO; 7726 else 7727 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7728 if (clock->p2 == 4) 7729 dpll |= PLL_P2_DIVIDE_BY_4; 7730 } 7731 7732 if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) 7733 dpll |= DPLL_DVO_2X_MODE; 7734 7735 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 7736 intel_panel_use_ssc(dev_priv)) 7737 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7738 else 7739 dpll |= PLL_REF_INPUT_DREFCLK; 7740 7741 dpll |= DPLL_VCO_ENABLE; 7742 crtc_state->dpll_hw_state.dpll = dpll; 7743 } 7744 7745 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7746 { 7747 struct drm_device *dev = intel_crtc->base.dev; 7748 struct drm_i915_private *dev_priv = to_i915(dev); 7749 enum i915_pipe pipe = intel_crtc->pipe; 7750 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7751 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7752 uint32_t crtc_vtotal, crtc_vblank_end; 7753 int vsyncshift = 0; 7754 7755 /* We need to be careful not to changed the adjusted mode, for otherwise 7756 * the hw state checker will get angry at the mismatch. */ 7757 crtc_vtotal = adjusted_mode->crtc_vtotal; 7758 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7759 7760 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7761 /* the chip adds 2 halflines automatically */ 7762 crtc_vtotal -= 1; 7763 crtc_vblank_end -= 1; 7764 7765 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7766 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7767 else 7768 vsyncshift = adjusted_mode->crtc_hsync_start - 7769 adjusted_mode->crtc_htotal / 2; 7770 if (vsyncshift < 0) 7771 vsyncshift += adjusted_mode->crtc_htotal; 7772 } 7773 7774 if (INTEL_INFO(dev)->gen > 3) 7775 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7776 7777 I915_WRITE(HTOTAL(cpu_transcoder), 7778 (adjusted_mode->crtc_hdisplay - 1) | 7779 ((adjusted_mode->crtc_htotal - 1) << 16)); 7780 I915_WRITE(HBLANK(cpu_transcoder), 7781 (adjusted_mode->crtc_hblank_start - 1) | 7782 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7783 I915_WRITE(HSYNC(cpu_transcoder), 7784 (adjusted_mode->crtc_hsync_start - 1) | 7785 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7786 7787 I915_WRITE(VTOTAL(cpu_transcoder), 7788 (adjusted_mode->crtc_vdisplay - 1) | 7789 ((crtc_vtotal - 1) << 16)); 7790 I915_WRITE(VBLANK(cpu_transcoder), 7791 (adjusted_mode->crtc_vblank_start - 1) | 7792 ((crtc_vblank_end - 1) << 16)); 7793 I915_WRITE(VSYNC(cpu_transcoder), 7794 (adjusted_mode->crtc_vsync_start - 1) | 7795 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7796 7797 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7798 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7799 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7800 * bits. */ 7801 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7802 (pipe == PIPE_B || pipe == PIPE_C)) 7803 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7804 7805 } 7806 7807 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7808 { 7809 struct drm_device *dev = intel_crtc->base.dev; 7810 struct drm_i915_private *dev_priv = to_i915(dev); 7811 enum i915_pipe pipe = intel_crtc->pipe; 7812 7813 /* pipesrc controls the size that is scaled from, which should 7814 * always be the user's requested size. 7815 */ 7816 I915_WRITE(PIPESRC(pipe), 7817 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7818 (intel_crtc->config->pipe_src_h - 1)); 7819 } 7820 7821 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7822 struct intel_crtc_state *pipe_config) 7823 { 7824 struct drm_device *dev = crtc->base.dev; 7825 struct drm_i915_private *dev_priv = to_i915(dev); 7826 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7827 uint32_t tmp; 7828 7829 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7830 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7831 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7832 tmp = I915_READ(HBLANK(cpu_transcoder)); 7833 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7834 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7835 tmp = I915_READ(HSYNC(cpu_transcoder)); 7836 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7837 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7838 7839 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7840 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7841 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7842 tmp = I915_READ(VBLANK(cpu_transcoder)); 7843 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7844 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7845 tmp = I915_READ(VSYNC(cpu_transcoder)); 7846 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7847 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7848 7849 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7850 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7851 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7852 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7853 } 7854 } 7855 7856 static void intel_get_pipe_src_size(struct intel_crtc *crtc, 7857 struct intel_crtc_state *pipe_config) 7858 { 7859 struct drm_device *dev = crtc->base.dev; 7860 struct drm_i915_private *dev_priv = to_i915(dev); 7861 u32 tmp; 7862 7863 tmp = I915_READ(PIPESRC(crtc->pipe)); 7864 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7865 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7866 7867 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7868 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7869 } 7870 7871 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7872 struct intel_crtc_state *pipe_config) 7873 { 7874 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7875 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7876 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7877 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7878 7879 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7880 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7881 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7882 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7883 7884 mode->flags = pipe_config->base.adjusted_mode.flags; 7885 mode->type = DRM_MODE_TYPE_DRIVER; 7886 7887 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7888 mode->flags |= pipe_config->base.adjusted_mode.flags; 7889 7890 mode->hsync = drm_mode_hsync(mode); 7891 mode->vrefresh = drm_mode_vrefresh(mode); 7892 drm_mode_set_name(mode); 7893 } 7894 7895 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7896 { 7897 struct drm_device *dev = intel_crtc->base.dev; 7898 struct drm_i915_private *dev_priv = to_i915(dev); 7899 uint32_t pipeconf; 7900 7901 pipeconf = 0; 7902 7903 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7904 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7905 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7906 7907 if (intel_crtc->config->double_wide) 7908 pipeconf |= PIPECONF_DOUBLE_WIDE; 7909 7910 /* only g4x and later have fancy bpc/dither controls */ 7911 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 7912 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7913 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7914 pipeconf |= PIPECONF_DITHER_EN | 7915 PIPECONF_DITHER_TYPE_SP; 7916 7917 switch (intel_crtc->config->pipe_bpp) { 7918 case 18: 7919 pipeconf |= PIPECONF_6BPC; 7920 break; 7921 case 24: 7922 pipeconf |= PIPECONF_8BPC; 7923 break; 7924 case 30: 7925 pipeconf |= PIPECONF_10BPC; 7926 break; 7927 default: 7928 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7929 BUG(); 7930 } 7931 } 7932 7933 if (HAS_PIPE_CXSR(dev)) { 7934 if (intel_crtc->lowfreq_avail) { 7935 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7936 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7937 } else { 7938 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7939 } 7940 } 7941 7942 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7943 if (INTEL_INFO(dev)->gen < 4 || 7944 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO)) 7945 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7946 else 7947 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7948 } else 7949 pipeconf |= PIPECONF_PROGRESSIVE; 7950 7951 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 7952 intel_crtc->config->limited_color_range) 7953 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7954 7955 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7956 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7957 } 7958 7959 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc, 7960 struct intel_crtc_state *crtc_state) 7961 { 7962 struct drm_device *dev = crtc->base.dev; 7963 struct drm_i915_private *dev_priv = to_i915(dev); 7964 const struct intel_limit *limit; 7965 int refclk = 48000; 7966 7967 memset(&crtc_state->dpll_hw_state, 0, 7968 sizeof(crtc_state->dpll_hw_state)); 7969 7970 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7971 if (intel_panel_use_ssc(dev_priv)) { 7972 refclk = dev_priv->vbt.lvds_ssc_freq; 7973 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7974 } 7975 7976 limit = &intel_limits_i8xx_lvds; 7977 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) { 7978 limit = &intel_limits_i8xx_dvo; 7979 } else { 7980 limit = &intel_limits_i8xx_dac; 7981 } 7982 7983 if (!crtc_state->clock_set && 7984 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 7985 refclk, NULL, &crtc_state->dpll)) { 7986 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7987 return -EINVAL; 7988 } 7989 7990 i8xx_compute_dpll(crtc, crtc_state, NULL); 7991 7992 return 0; 7993 } 7994 7995 static int g4x_crtc_compute_clock(struct intel_crtc *crtc, 7996 struct intel_crtc_state *crtc_state) 7997 { 7998 struct drm_device *dev = crtc->base.dev; 7999 struct drm_i915_private *dev_priv = to_i915(dev); 8000 const struct intel_limit *limit; 8001 int refclk = 96000; 8002 8003 memset(&crtc_state->dpll_hw_state, 0, 8004 sizeof(crtc_state->dpll_hw_state)); 8005 8006 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8007 if (intel_panel_use_ssc(dev_priv)) { 8008 refclk = dev_priv->vbt.lvds_ssc_freq; 8009 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8010 } 8011 8012 if (intel_is_dual_link_lvds(dev)) 8013 limit = &intel_limits_g4x_dual_channel_lvds; 8014 else 8015 limit = &intel_limits_g4x_single_channel_lvds; 8016 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) || 8017 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 8018 limit = &intel_limits_g4x_hdmi; 8019 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) { 8020 limit = &intel_limits_g4x_sdvo; 8021 } else { 8022 /* The option is for other outputs */ 8023 limit = &intel_limits_i9xx_sdvo; 8024 } 8025 8026 if (!crtc_state->clock_set && 8027 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8028 refclk, NULL, &crtc_state->dpll)) { 8029 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8030 return -EINVAL; 8031 } 8032 8033 i9xx_compute_dpll(crtc, crtc_state, NULL); 8034 8035 return 0; 8036 } 8037 8038 static int pnv_crtc_compute_clock(struct intel_crtc *crtc, 8039 struct intel_crtc_state *crtc_state) 8040 { 8041 struct drm_device *dev = crtc->base.dev; 8042 struct drm_i915_private *dev_priv = to_i915(dev); 8043 const struct intel_limit *limit; 8044 int refclk = 96000; 8045 8046 memset(&crtc_state->dpll_hw_state, 0, 8047 sizeof(crtc_state->dpll_hw_state)); 8048 8049 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8050 if (intel_panel_use_ssc(dev_priv)) { 8051 refclk = dev_priv->vbt.lvds_ssc_freq; 8052 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8053 } 8054 8055 limit = &intel_limits_pineview_lvds; 8056 } else { 8057 limit = &intel_limits_pineview_sdvo; 8058 } 8059 8060 if (!crtc_state->clock_set && 8061 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8062 refclk, NULL, &crtc_state->dpll)) { 8063 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8064 return -EINVAL; 8065 } 8066 8067 i9xx_compute_dpll(crtc, crtc_state, NULL); 8068 8069 return 0; 8070 } 8071 8072 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 8073 struct intel_crtc_state *crtc_state) 8074 { 8075 struct drm_device *dev = crtc->base.dev; 8076 struct drm_i915_private *dev_priv = to_i915(dev); 8077 const struct intel_limit *limit; 8078 int refclk = 96000; 8079 8080 memset(&crtc_state->dpll_hw_state, 0, 8081 sizeof(crtc_state->dpll_hw_state)); 8082 8083 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8084 if (intel_panel_use_ssc(dev_priv)) { 8085 refclk = dev_priv->vbt.lvds_ssc_freq; 8086 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8087 } 8088 8089 limit = &intel_limits_i9xx_lvds; 8090 } else { 8091 limit = &intel_limits_i9xx_sdvo; 8092 } 8093 8094 if (!crtc_state->clock_set && 8095 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8096 refclk, NULL, &crtc_state->dpll)) { 8097 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8098 return -EINVAL; 8099 } 8100 8101 i9xx_compute_dpll(crtc, crtc_state, NULL); 8102 8103 return 0; 8104 } 8105 8106 static int chv_crtc_compute_clock(struct intel_crtc *crtc, 8107 struct intel_crtc_state *crtc_state) 8108 { 8109 int refclk = 100000; 8110 const struct intel_limit *limit = &intel_limits_chv; 8111 8112 memset(&crtc_state->dpll_hw_state, 0, 8113 sizeof(crtc_state->dpll_hw_state)); 8114 8115 if (!crtc_state->clock_set && 8116 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8117 refclk, NULL, &crtc_state->dpll)) { 8118 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8119 return -EINVAL; 8120 } 8121 8122 chv_compute_dpll(crtc, crtc_state); 8123 8124 return 0; 8125 } 8126 8127 static int vlv_crtc_compute_clock(struct intel_crtc *crtc, 8128 struct intel_crtc_state *crtc_state) 8129 { 8130 int refclk = 100000; 8131 const struct intel_limit *limit = &intel_limits_vlv; 8132 8133 memset(&crtc_state->dpll_hw_state, 0, 8134 sizeof(crtc_state->dpll_hw_state)); 8135 8136 if (!crtc_state->clock_set && 8137 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 8138 refclk, NULL, &crtc_state->dpll)) { 8139 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8140 return -EINVAL; 8141 } 8142 8143 vlv_compute_dpll(crtc, crtc_state); 8144 8145 return 0; 8146 } 8147 8148 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 8149 struct intel_crtc_state *pipe_config) 8150 { 8151 struct drm_device *dev = crtc->base.dev; 8152 struct drm_i915_private *dev_priv = to_i915(dev); 8153 uint32_t tmp; 8154 8155 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 8156 return; 8157 8158 tmp = I915_READ(PFIT_CONTROL); 8159 if (!(tmp & PFIT_ENABLE)) 8160 return; 8161 8162 /* Check whether the pfit is attached to our pipe. */ 8163 if (INTEL_INFO(dev)->gen < 4) { 8164 if (crtc->pipe != PIPE_B) 8165 return; 8166 } else { 8167 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 8168 return; 8169 } 8170 8171 pipe_config->gmch_pfit.control = tmp; 8172 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 8173 } 8174 8175 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 8176 struct intel_crtc_state *pipe_config) 8177 { 8178 struct drm_device *dev = crtc->base.dev; 8179 struct drm_i915_private *dev_priv = to_i915(dev); 8180 int pipe = pipe_config->cpu_transcoder; 8181 struct dpll clock; 8182 u32 mdiv; 8183 int refclk = 100000; 8184 8185 /* In case of DSI, DPLL will not be used */ 8186 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8187 return; 8188 8189 mutex_lock(&dev_priv->sb_lock); 8190 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8191 mutex_unlock(&dev_priv->sb_lock); 8192 8193 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8194 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8195 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8196 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8197 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8198 8199 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8200 } 8201 8202 static void 8203 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8204 struct intel_initial_plane_config *plane_config) 8205 { 8206 struct drm_device *dev = crtc->base.dev; 8207 struct drm_i915_private *dev_priv = to_i915(dev); 8208 u32 val, base, offset; 8209 int pipe = crtc->pipe, plane = crtc->plane; 8210 int fourcc, pixel_format; 8211 unsigned int aligned_height; 8212 struct drm_framebuffer *fb; 8213 struct intel_framebuffer *intel_fb; 8214 8215 val = I915_READ(DSPCNTR(plane)); 8216 if (!(val & DISPLAY_PLANE_ENABLE)) 8217 return; 8218 8219 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8220 if (!intel_fb) { 8221 DRM_DEBUG_KMS("failed to alloc fb\n"); 8222 return; 8223 } 8224 8225 fb = &intel_fb->base; 8226 8227 if (INTEL_INFO(dev)->gen >= 4) { 8228 if (val & DISPPLANE_TILED) { 8229 plane_config->tiling = I915_TILING_X; 8230 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8231 } 8232 } 8233 8234 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8235 fourcc = i9xx_format_to_fourcc(pixel_format); 8236 fb->pixel_format = fourcc; 8237 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8238 8239 if (INTEL_INFO(dev)->gen >= 4) { 8240 if (plane_config->tiling) 8241 offset = I915_READ(DSPTILEOFF(plane)); 8242 else 8243 offset = I915_READ(DSPLINOFF(plane)); 8244 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 8245 } else { 8246 base = I915_READ(DSPADDR(plane)); 8247 } 8248 plane_config->base = base; 8249 8250 val = I915_READ(PIPESRC(pipe)); 8251 fb->width = ((val >> 16) & 0xfff) + 1; 8252 fb->height = ((val >> 0) & 0xfff) + 1; 8253 8254 val = I915_READ(DSPSTRIDE(pipe)); 8255 fb->pitches[0] = val & 0xffffffc0; 8256 8257 aligned_height = intel_fb_align_height(dev, fb->height, 8258 fb->pixel_format, 8259 fb->modifier[0]); 8260 8261 plane_config->size = fb->pitches[0] * aligned_height; 8262 8263 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8264 pipe_name(pipe), plane, fb->width, fb->height, 8265 fb->bits_per_pixel, base, fb->pitches[0], 8266 plane_config->size); 8267 8268 plane_config->fb = intel_fb; 8269 } 8270 8271 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8272 struct intel_crtc_state *pipe_config) 8273 { 8274 struct drm_device *dev = crtc->base.dev; 8275 struct drm_i915_private *dev_priv = to_i915(dev); 8276 int pipe = pipe_config->cpu_transcoder; 8277 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8278 struct dpll clock; 8279 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8280 int refclk = 100000; 8281 8282 /* In case of DSI, DPLL will not be used */ 8283 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0) 8284 return; 8285 8286 mutex_lock(&dev_priv->sb_lock); 8287 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8288 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8289 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8290 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8291 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8292 mutex_unlock(&dev_priv->sb_lock); 8293 8294 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8295 clock.m2 = (pll_dw0 & 0xff) << 22; 8296 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8297 clock.m2 |= pll_dw2 & 0x3fffff; 8298 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8299 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8300 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8301 8302 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8303 } 8304 8305 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8306 struct intel_crtc_state *pipe_config) 8307 { 8308 struct drm_device *dev = crtc->base.dev; 8309 struct drm_i915_private *dev_priv = to_i915(dev); 8310 enum intel_display_power_domain power_domain; 8311 uint32_t tmp; 8312 bool ret; 8313 8314 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8315 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8316 return false; 8317 8318 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8319 pipe_config->shared_dpll = NULL; 8320 8321 ret = false; 8322 8323 tmp = I915_READ(PIPECONF(crtc->pipe)); 8324 if (!(tmp & PIPECONF_ENABLE)) 8325 goto out; 8326 8327 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8328 switch (tmp & PIPECONF_BPC_MASK) { 8329 case PIPECONF_6BPC: 8330 pipe_config->pipe_bpp = 18; 8331 break; 8332 case PIPECONF_8BPC: 8333 pipe_config->pipe_bpp = 24; 8334 break; 8335 case PIPECONF_10BPC: 8336 pipe_config->pipe_bpp = 30; 8337 break; 8338 default: 8339 break; 8340 } 8341 } 8342 8343 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 8344 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8345 pipe_config->limited_color_range = true; 8346 8347 if (INTEL_INFO(dev)->gen < 4) 8348 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8349 8350 intel_get_pipe_timings(crtc, pipe_config); 8351 intel_get_pipe_src_size(crtc, pipe_config); 8352 8353 i9xx_get_pfit_config(crtc, pipe_config); 8354 8355 if (INTEL_INFO(dev)->gen >= 4) { 8356 /* No way to read it out on pipes B and C */ 8357 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A) 8358 tmp = dev_priv->chv_dpll_md[crtc->pipe]; 8359 else 8360 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8361 pipe_config->pixel_multiplier = 8362 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8363 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8364 pipe_config->dpll_hw_state.dpll_md = tmp; 8365 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 8366 tmp = I915_READ(DPLL(crtc->pipe)); 8367 pipe_config->pixel_multiplier = 8368 ((tmp & SDVO_MULTIPLIER_MASK) 8369 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8370 } else { 8371 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8372 * port and will be fixed up in the encoder->get_config 8373 * function. */ 8374 pipe_config->pixel_multiplier = 1; 8375 } 8376 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8377 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 8378 /* 8379 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 8380 * on 830. Filter it out here so that we don't 8381 * report errors due to that. 8382 */ 8383 if (IS_I830(dev)) 8384 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 8385 8386 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8387 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8388 } else { 8389 /* Mask out read-only status bits. */ 8390 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8391 DPLL_PORTC_READY_MASK | 8392 DPLL_PORTB_READY_MASK); 8393 } 8394 8395 if (IS_CHERRYVIEW(dev)) 8396 chv_crtc_clock_get(crtc, pipe_config); 8397 else if (IS_VALLEYVIEW(dev)) 8398 vlv_crtc_clock_get(crtc, pipe_config); 8399 else 8400 i9xx_crtc_clock_get(crtc, pipe_config); 8401 8402 /* 8403 * Normally the dotclock is filled in by the encoder .get_config() 8404 * but in case the pipe is enabled w/o any ports we need a sane 8405 * default. 8406 */ 8407 pipe_config->base.adjusted_mode.crtc_clock = 8408 pipe_config->port_clock / pipe_config->pixel_multiplier; 8409 8410 ret = true; 8411 8412 out: 8413 intel_display_power_put(dev_priv, power_domain); 8414 8415 return ret; 8416 } 8417 8418 static void ironlake_init_pch_refclk(struct drm_device *dev) 8419 { 8420 struct drm_i915_private *dev_priv = to_i915(dev); 8421 struct intel_encoder *encoder; 8422 int i; 8423 u32 val, final; 8424 bool has_lvds = false; 8425 bool has_cpu_edp = false; 8426 bool has_panel = false; 8427 bool has_ck505 = false; 8428 bool can_ssc = false; 8429 bool using_ssc_source = false; 8430 8431 /* We need to take the global config into account */ 8432 for_each_intel_encoder(dev, encoder) { 8433 switch (encoder->type) { 8434 case INTEL_OUTPUT_LVDS: 8435 has_panel = true; 8436 has_lvds = true; 8437 break; 8438 case INTEL_OUTPUT_EDP: 8439 has_panel = true; 8440 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8441 has_cpu_edp = true; 8442 break; 8443 default: 8444 break; 8445 } 8446 } 8447 8448 if (HAS_PCH_IBX(dev)) { 8449 has_ck505 = dev_priv->vbt.display_clock_mode; 8450 can_ssc = has_ck505; 8451 } else { 8452 has_ck505 = false; 8453 can_ssc = true; 8454 } 8455 8456 /* Check if any DPLLs are using the SSC source */ 8457 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 8458 u32 temp = I915_READ(PCH_DPLL(i)); 8459 8460 if (!(temp & DPLL_VCO_ENABLE)) 8461 continue; 8462 8463 if ((temp & PLL_REF_INPUT_MASK) == 8464 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 8465 using_ssc_source = true; 8466 break; 8467 } 8468 } 8469 8470 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n", 8471 has_panel, has_lvds, has_ck505, using_ssc_source); 8472 8473 /* Ironlake: try to setup display ref clock before DPLL 8474 * enabling. This is only under driver's control after 8475 * PCH B stepping, previous chipset stepping should be 8476 * ignoring this setting. 8477 */ 8478 val = I915_READ(PCH_DREF_CONTROL); 8479 8480 /* As we must carefully and slowly disable/enable each source in turn, 8481 * compute the final state we want first and check if we need to 8482 * make any changes at all. 8483 */ 8484 final = val; 8485 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8486 if (has_ck505) 8487 final |= DREF_NONSPREAD_CK505_ENABLE; 8488 else 8489 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8490 8491 final &= ~DREF_SSC_SOURCE_MASK; 8492 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8493 final &= ~DREF_SSC1_ENABLE; 8494 8495 if (has_panel) { 8496 final |= DREF_SSC_SOURCE_ENABLE; 8497 8498 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8499 final |= DREF_SSC1_ENABLE; 8500 8501 if (has_cpu_edp) { 8502 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8503 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8504 else 8505 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8506 } else 8507 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8508 } else if (using_ssc_source) { 8509 final |= DREF_SSC_SOURCE_ENABLE; 8510 final |= DREF_SSC1_ENABLE; 8511 } 8512 8513 if (final == val) 8514 return; 8515 8516 /* Always enable nonspread source */ 8517 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8518 8519 if (has_ck505) 8520 val |= DREF_NONSPREAD_CK505_ENABLE; 8521 else 8522 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8523 8524 if (has_panel) { 8525 val &= ~DREF_SSC_SOURCE_MASK; 8526 val |= DREF_SSC_SOURCE_ENABLE; 8527 8528 /* SSC must be turned on before enabling the CPU output */ 8529 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8530 DRM_DEBUG_KMS("Using SSC on panel\n"); 8531 val |= DREF_SSC1_ENABLE; 8532 } else 8533 val &= ~DREF_SSC1_ENABLE; 8534 8535 /* Get SSC going before enabling the outputs */ 8536 I915_WRITE(PCH_DREF_CONTROL, val); 8537 POSTING_READ(PCH_DREF_CONTROL); 8538 udelay(200); 8539 8540 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8541 8542 /* Enable CPU source on CPU attached eDP */ 8543 if (has_cpu_edp) { 8544 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8545 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8546 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8547 } else 8548 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8549 } else 8550 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8551 8552 I915_WRITE(PCH_DREF_CONTROL, val); 8553 POSTING_READ(PCH_DREF_CONTROL); 8554 udelay(200); 8555 } else { 8556 DRM_DEBUG_KMS("Disabling CPU source output\n"); 8557 8558 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8559 8560 /* Turn off CPU output */ 8561 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8562 8563 I915_WRITE(PCH_DREF_CONTROL, val); 8564 POSTING_READ(PCH_DREF_CONTROL); 8565 udelay(200); 8566 8567 if (!using_ssc_source) { 8568 DRM_DEBUG_KMS("Disabling SSC source\n"); 8569 8570 /* Turn off the SSC source */ 8571 val &= ~DREF_SSC_SOURCE_MASK; 8572 val |= DREF_SSC_SOURCE_DISABLE; 8573 8574 /* Turn off SSC1 */ 8575 val &= ~DREF_SSC1_ENABLE; 8576 8577 I915_WRITE(PCH_DREF_CONTROL, val); 8578 POSTING_READ(PCH_DREF_CONTROL); 8579 udelay(200); 8580 } 8581 } 8582 8583 BUG_ON(val != final); 8584 } 8585 8586 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8587 { 8588 uint32_t tmp; 8589 8590 tmp = I915_READ(SOUTH_CHICKEN2); 8591 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8592 I915_WRITE(SOUTH_CHICKEN2, tmp); 8593 8594 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) & 8595 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8596 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8597 8598 tmp = I915_READ(SOUTH_CHICKEN2); 8599 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8600 I915_WRITE(SOUTH_CHICKEN2, tmp); 8601 8602 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) & 8603 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8604 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8605 } 8606 8607 /* WaMPhyProgramming:hsw */ 8608 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8609 { 8610 uint32_t tmp; 8611 8612 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8613 tmp &= ~(0xFF << 24); 8614 tmp |= (0x12 << 24); 8615 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8616 8617 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8618 tmp |= (1 << 11); 8619 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8620 8621 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8622 tmp |= (1 << 11); 8623 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8624 8625 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8626 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8627 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8628 8629 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8630 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8631 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8632 8633 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8634 tmp &= ~(7 << 13); 8635 tmp |= (5 << 13); 8636 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8637 8638 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8639 tmp &= ~(7 << 13); 8640 tmp |= (5 << 13); 8641 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8642 8643 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8644 tmp &= ~0xFF; 8645 tmp |= 0x1C; 8646 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8647 8648 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8649 tmp &= ~0xFF; 8650 tmp |= 0x1C; 8651 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8652 8653 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8654 tmp &= ~(0xFF << 16); 8655 tmp |= (0x1C << 16); 8656 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8657 8658 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8659 tmp &= ~(0xFF << 16); 8660 tmp |= (0x1C << 16); 8661 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8662 8663 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8664 tmp |= (1 << 27); 8665 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8666 8667 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8668 tmp |= (1 << 27); 8669 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8670 8671 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8672 tmp &= ~(0xF << 28); 8673 tmp |= (4 << 28); 8674 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8675 8676 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8677 tmp &= ~(0xF << 28); 8678 tmp |= (4 << 28); 8679 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8680 } 8681 8682 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8683 * Programming" based on the parameters passed: 8684 * - Sequence to enable CLKOUT_DP 8685 * - Sequence to enable CLKOUT_DP without spread 8686 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8687 */ 8688 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8689 bool with_fdi) 8690 { 8691 struct drm_i915_private *dev_priv = to_i915(dev); 8692 uint32_t reg, tmp; 8693 8694 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8695 with_spread = true; 8696 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) 8697 with_fdi = false; 8698 8699 mutex_lock(&dev_priv->sb_lock); 8700 8701 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8702 tmp &= ~SBI_SSCCTL_DISABLE; 8703 tmp |= SBI_SSCCTL_PATHALT; 8704 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8705 8706 udelay(24); 8707 8708 if (with_spread) { 8709 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8710 tmp &= ~SBI_SSCCTL_PATHALT; 8711 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8712 8713 if (with_fdi) { 8714 lpt_reset_fdi_mphy(dev_priv); 8715 lpt_program_fdi_mphy(dev_priv); 8716 } 8717 } 8718 8719 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8720 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8721 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8722 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8723 8724 mutex_unlock(&dev_priv->sb_lock); 8725 } 8726 8727 /* Sequence to disable CLKOUT_DP */ 8728 static void lpt_disable_clkout_dp(struct drm_device *dev) 8729 { 8730 struct drm_i915_private *dev_priv = to_i915(dev); 8731 uint32_t reg, tmp; 8732 8733 mutex_lock(&dev_priv->sb_lock); 8734 8735 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8736 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8737 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8738 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8739 8740 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8741 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8742 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8743 tmp |= SBI_SSCCTL_PATHALT; 8744 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8745 udelay(32); 8746 } 8747 tmp |= SBI_SSCCTL_DISABLE; 8748 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8749 } 8750 8751 mutex_unlock(&dev_priv->sb_lock); 8752 } 8753 8754 #define BEND_IDX(steps) ((50 + (steps)) / 5) 8755 8756 static const uint16_t sscdivintphase[] = { 8757 [BEND_IDX( 50)] = 0x3B23, 8758 [BEND_IDX( 45)] = 0x3B23, 8759 [BEND_IDX( 40)] = 0x3C23, 8760 [BEND_IDX( 35)] = 0x3C23, 8761 [BEND_IDX( 30)] = 0x3D23, 8762 [BEND_IDX( 25)] = 0x3D23, 8763 [BEND_IDX( 20)] = 0x3E23, 8764 [BEND_IDX( 15)] = 0x3E23, 8765 [BEND_IDX( 10)] = 0x3F23, 8766 [BEND_IDX( 5)] = 0x3F23, 8767 [BEND_IDX( 0)] = 0x0025, 8768 [BEND_IDX( -5)] = 0x0025, 8769 [BEND_IDX(-10)] = 0x0125, 8770 [BEND_IDX(-15)] = 0x0125, 8771 [BEND_IDX(-20)] = 0x0225, 8772 [BEND_IDX(-25)] = 0x0225, 8773 [BEND_IDX(-30)] = 0x0325, 8774 [BEND_IDX(-35)] = 0x0325, 8775 [BEND_IDX(-40)] = 0x0425, 8776 [BEND_IDX(-45)] = 0x0425, 8777 [BEND_IDX(-50)] = 0x0525, 8778 }; 8779 8780 /* 8781 * Bend CLKOUT_DP 8782 * steps -50 to 50 inclusive, in steps of 5 8783 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 8784 * change in clock period = -(steps / 10) * 5.787 ps 8785 */ 8786 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 8787 { 8788 uint32_t tmp; 8789 int idx = BEND_IDX(steps); 8790 8791 if (WARN_ON(steps % 5 != 0)) 8792 return; 8793 8794 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 8795 return; 8796 8797 mutex_lock(&dev_priv->sb_lock); 8798 8799 if (steps % 10 != 0) 8800 tmp = 0xAAAAAAAB; 8801 else 8802 tmp = 0x00000000; 8803 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 8804 8805 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 8806 tmp &= 0xffff0000; 8807 tmp |= sscdivintphase[idx]; 8808 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 8809 8810 mutex_unlock(&dev_priv->sb_lock); 8811 } 8812 8813 #undef BEND_IDX 8814 8815 static void lpt_init_pch_refclk(struct drm_device *dev) 8816 { 8817 struct intel_encoder *encoder; 8818 bool has_vga = false; 8819 8820 for_each_intel_encoder(dev, encoder) { 8821 switch (encoder->type) { 8822 case INTEL_OUTPUT_ANALOG: 8823 has_vga = true; 8824 break; 8825 default: 8826 break; 8827 } 8828 } 8829 8830 if (has_vga) { 8831 lpt_bend_clkout_dp(to_i915(dev), 0); 8832 lpt_enable_clkout_dp(dev, true, true); 8833 } else { 8834 lpt_disable_clkout_dp(dev); 8835 } 8836 } 8837 8838 /* 8839 * Initialize reference clocks when the driver loads 8840 */ 8841 void intel_init_pch_refclk(struct drm_device *dev) 8842 { 8843 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8844 ironlake_init_pch_refclk(dev); 8845 else if (HAS_PCH_LPT(dev)) 8846 lpt_init_pch_refclk(dev); 8847 } 8848 8849 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8850 { 8851 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8852 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8853 int pipe = intel_crtc->pipe; 8854 uint32_t val; 8855 8856 val = 0; 8857 8858 switch (intel_crtc->config->pipe_bpp) { 8859 case 18: 8860 val |= PIPECONF_6BPC; 8861 break; 8862 case 24: 8863 val |= PIPECONF_8BPC; 8864 break; 8865 case 30: 8866 val |= PIPECONF_10BPC; 8867 break; 8868 case 36: 8869 val |= PIPECONF_12BPC; 8870 break; 8871 default: 8872 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8873 BUG(); 8874 } 8875 8876 if (intel_crtc->config->dither) 8877 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8878 8879 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8880 val |= PIPECONF_INTERLACED_ILK; 8881 else 8882 val |= PIPECONF_PROGRESSIVE; 8883 8884 if (intel_crtc->config->limited_color_range) 8885 val |= PIPECONF_COLOR_RANGE_SELECT; 8886 8887 I915_WRITE(PIPECONF(pipe), val); 8888 POSTING_READ(PIPECONF(pipe)); 8889 } 8890 8891 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8892 { 8893 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8895 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8896 u32 val = 0; 8897 8898 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither) 8899 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8900 8901 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8902 val |= PIPECONF_INTERLACED_ILK; 8903 else 8904 val |= PIPECONF_PROGRESSIVE; 8905 8906 I915_WRITE(PIPECONF(cpu_transcoder), val); 8907 POSTING_READ(PIPECONF(cpu_transcoder)); 8908 } 8909 8910 static void haswell_set_pipemisc(struct drm_crtc *crtc) 8911 { 8912 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 8913 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8914 8915 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8916 u32 val = 0; 8917 8918 switch (intel_crtc->config->pipe_bpp) { 8919 case 18: 8920 val |= PIPEMISC_DITHER_6_BPC; 8921 break; 8922 case 24: 8923 val |= PIPEMISC_DITHER_8_BPC; 8924 break; 8925 case 30: 8926 val |= PIPEMISC_DITHER_10_BPC; 8927 break; 8928 case 36: 8929 val |= PIPEMISC_DITHER_12_BPC; 8930 break; 8931 default: 8932 /* Case prevented by pipe_config_set_bpp. */ 8933 BUG(); 8934 } 8935 8936 if (intel_crtc->config->dither) 8937 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8938 8939 I915_WRITE(PIPEMISC(intel_crtc->pipe), val); 8940 } 8941 } 8942 8943 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8944 { 8945 /* 8946 * Account for spread spectrum to avoid 8947 * oversubscribing the link. Max center spread 8948 * is 2.5%; use 5% for safety's sake. 8949 */ 8950 u32 bps = target_clock * bpp * 21 / 20; 8951 return DIV_ROUND_UP(bps, link_bw * 8); 8952 } 8953 8954 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8955 { 8956 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8957 } 8958 8959 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8960 struct intel_crtc_state *crtc_state, 8961 struct dpll *reduced_clock) 8962 { 8963 struct drm_crtc *crtc = &intel_crtc->base; 8964 struct drm_device *dev = crtc->dev; 8965 struct drm_i915_private *dev_priv = to_i915(dev); 8966 u32 dpll, fp, fp2; 8967 int factor; 8968 8969 /* Enable autotuning of the PLL clock (if permissible) */ 8970 factor = 21; 8971 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8972 if ((intel_panel_use_ssc(dev_priv) && 8973 dev_priv->vbt.lvds_ssc_freq == 100000) || 8974 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8975 factor = 25; 8976 } else if (crtc_state->sdvo_tv_clock) 8977 factor = 20; 8978 8979 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8980 8981 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8982 fp |= FP_CB_TUNE; 8983 8984 if (reduced_clock) { 8985 fp2 = i9xx_dpll_compute_fp(reduced_clock); 8986 8987 if (reduced_clock->m < factor * reduced_clock->n) 8988 fp2 |= FP_CB_TUNE; 8989 } else { 8990 fp2 = fp; 8991 } 8992 8993 dpll = 0; 8994 8995 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) 8996 dpll |= DPLLB_MODE_LVDS; 8997 else 8998 dpll |= DPLLB_MODE_DAC_SERIAL; 8999 9000 dpll |= (crtc_state->pixel_multiplier - 1) 9001 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 9002 9003 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) || 9004 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) 9005 dpll |= DPLL_SDVO_HIGH_SPEED; 9006 9007 if (intel_crtc_has_dp_encoder(crtc_state)) 9008 dpll |= DPLL_SDVO_HIGH_SPEED; 9009 9010 /* 9011 * The high speed IO clock is only really required for 9012 * SDVO/HDMI/DP, but we also enable it for CRT to make it 9013 * possible to share the DPLL between CRT and HDMI. Enabling 9014 * the clock needlessly does no real harm, except use up a 9015 * bit of power potentially. 9016 * 9017 * We'll limit this to IVB with 3 pipes, since it has only two 9018 * DPLLs and so DPLL sharing is the only way to get three pipes 9019 * driving PCH ports at the same time. On SNB we could do this, 9020 * and potentially avoid enabling the second DPLL, but it's not 9021 * clear if it''s a win or loss power wise. No point in doing 9022 * this on ILK at all since it has a fixed DPLL<->pipe mapping. 9023 */ 9024 if (INTEL_INFO(dev_priv)->num_pipes == 3 && 9025 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) 9026 dpll |= DPLL_SDVO_HIGH_SPEED; 9027 9028 /* compute bitmask from p1 value */ 9029 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 9030 /* also FPA1 */ 9031 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 9032 9033 switch (crtc_state->dpll.p2) { 9034 case 5: 9035 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 9036 break; 9037 case 7: 9038 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 9039 break; 9040 case 10: 9041 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 9042 break; 9043 case 14: 9044 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 9045 break; 9046 } 9047 9048 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9049 intel_panel_use_ssc(dev_priv)) 9050 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 9051 else 9052 dpll |= PLL_REF_INPUT_DREFCLK; 9053 9054 dpll |= DPLL_VCO_ENABLE; 9055 9056 crtc_state->dpll_hw_state.dpll = dpll; 9057 crtc_state->dpll_hw_state.fp0 = fp; 9058 crtc_state->dpll_hw_state.fp1 = fp2; 9059 } 9060 9061 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 9062 struct intel_crtc_state *crtc_state) 9063 { 9064 struct drm_device *dev = crtc->base.dev; 9065 struct drm_i915_private *dev_priv = to_i915(dev); 9066 struct dpll reduced_clock; 9067 bool has_reduced_clock = false; 9068 struct intel_shared_dpll *pll; 9069 const struct intel_limit *limit; 9070 int refclk = 120000; 9071 9072 memset(&crtc_state->dpll_hw_state, 0, 9073 sizeof(crtc_state->dpll_hw_state)); 9074 9075 crtc->lowfreq_avail = false; 9076 9077 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9078 if (!crtc_state->has_pch_encoder) 9079 return 0; 9080 9081 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) { 9082 if (intel_panel_use_ssc(dev_priv)) { 9083 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 9084 dev_priv->vbt.lvds_ssc_freq); 9085 refclk = dev_priv->vbt.lvds_ssc_freq; 9086 } 9087 9088 if (intel_is_dual_link_lvds(dev)) { 9089 if (refclk == 100000) 9090 limit = &intel_limits_ironlake_dual_lvds_100m; 9091 else 9092 limit = &intel_limits_ironlake_dual_lvds; 9093 } else { 9094 if (refclk == 100000) 9095 limit = &intel_limits_ironlake_single_lvds_100m; 9096 else 9097 limit = &intel_limits_ironlake_single_lvds; 9098 } 9099 } else { 9100 limit = &intel_limits_ironlake_dac; 9101 } 9102 9103 if (!crtc_state->clock_set && 9104 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock, 9105 refclk, NULL, &crtc_state->dpll)) { 9106 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 9107 return -EINVAL; 9108 } 9109 9110 ironlake_compute_dpll(crtc, crtc_state, 9111 has_reduced_clock ? &reduced_clock : NULL); 9112 9113 pll = intel_get_shared_dpll(crtc, crtc_state, NULL); 9114 if (pll == NULL) { 9115 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 9116 pipe_name(crtc->pipe)); 9117 return -EINVAL; 9118 } 9119 9120 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && 9121 has_reduced_clock) 9122 crtc->lowfreq_avail = true; 9123 9124 return 0; 9125 } 9126 9127 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9128 struct intel_link_m_n *m_n) 9129 { 9130 struct drm_device *dev = crtc->base.dev; 9131 struct drm_i915_private *dev_priv = to_i915(dev); 9132 enum i915_pipe pipe = crtc->pipe; 9133 9134 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9135 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9136 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9137 & ~TU_SIZE_MASK; 9138 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9139 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9140 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9141 } 9142 9143 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9144 enum transcoder transcoder, 9145 struct intel_link_m_n *m_n, 9146 struct intel_link_m_n *m2_n2) 9147 { 9148 struct drm_device *dev = crtc->base.dev; 9149 struct drm_i915_private *dev_priv = to_i915(dev); 9150 enum i915_pipe pipe = crtc->pipe; 9151 9152 if (INTEL_INFO(dev)->gen >= 5) { 9153 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9154 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9155 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9156 & ~TU_SIZE_MASK; 9157 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9158 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9159 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9160 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 9161 * gen < 8) and if DRRS is supported (to make sure the 9162 * registers are not unnecessarily read). 9163 */ 9164 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 9165 crtc->config->has_drrs) { 9166 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9167 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9168 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9169 & ~TU_SIZE_MASK; 9170 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9171 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9172 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9173 } 9174 } else { 9175 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9176 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9177 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9178 & ~TU_SIZE_MASK; 9179 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9180 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9181 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9182 } 9183 } 9184 9185 void intel_dp_get_m_n(struct intel_crtc *crtc, 9186 struct intel_crtc_state *pipe_config) 9187 { 9188 if (pipe_config->has_pch_encoder) 9189 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9190 else 9191 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9192 &pipe_config->dp_m_n, 9193 &pipe_config->dp_m2_n2); 9194 } 9195 9196 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9197 struct intel_crtc_state *pipe_config) 9198 { 9199 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9200 &pipe_config->fdi_m_n, NULL); 9201 } 9202 9203 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9204 struct intel_crtc_state *pipe_config) 9205 { 9206 struct drm_device *dev = crtc->base.dev; 9207 struct drm_i915_private *dev_priv = to_i915(dev); 9208 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9209 uint32_t ps_ctrl = 0; 9210 int id = -1; 9211 int i; 9212 9213 /* find scaler attached to this pipe */ 9214 for (i = 0; i < crtc->num_scalers; i++) { 9215 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9216 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9217 id = i; 9218 pipe_config->pch_pfit.enabled = true; 9219 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9220 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9221 break; 9222 } 9223 } 9224 9225 scaler_state->scaler_id = id; 9226 if (id >= 0) { 9227 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9228 } else { 9229 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9230 } 9231 } 9232 9233 static void 9234 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9235 struct intel_initial_plane_config *plane_config) 9236 { 9237 struct drm_device *dev = crtc->base.dev; 9238 struct drm_i915_private *dev_priv = to_i915(dev); 9239 u32 val, base, offset, stride_mult, tiling; 9240 int pipe = crtc->pipe; 9241 int fourcc, pixel_format; 9242 unsigned int aligned_height; 9243 struct drm_framebuffer *fb; 9244 struct intel_framebuffer *intel_fb; 9245 9246 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9247 if (!intel_fb) { 9248 DRM_DEBUG_KMS("failed to alloc fb\n"); 9249 return; 9250 } 9251 9252 fb = &intel_fb->base; 9253 9254 val = I915_READ(PLANE_CTL(pipe, 0)); 9255 if (!(val & PLANE_CTL_ENABLE)) 9256 goto error; 9257 9258 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9259 fourcc = skl_format_to_fourcc(pixel_format, 9260 val & PLANE_CTL_ORDER_RGBX, 9261 val & PLANE_CTL_ALPHA_MASK); 9262 fb->pixel_format = fourcc; 9263 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9264 9265 tiling = val & PLANE_CTL_TILED_MASK; 9266 switch (tiling) { 9267 case PLANE_CTL_TILED_LINEAR: 9268 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 9269 break; 9270 case PLANE_CTL_TILED_X: 9271 plane_config->tiling = I915_TILING_X; 9272 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9273 break; 9274 case PLANE_CTL_TILED_Y: 9275 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 9276 break; 9277 case PLANE_CTL_TILED_YF: 9278 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 9279 break; 9280 default: 9281 MISSING_CASE(tiling); 9282 goto error; 9283 } 9284 9285 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 9286 plane_config->base = base; 9287 9288 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 9289 9290 val = I915_READ(PLANE_SIZE(pipe, 0)); 9291 fb->height = ((val >> 16) & 0xfff) + 1; 9292 fb->width = ((val >> 0) & 0x1fff) + 1; 9293 9294 val = I915_READ(PLANE_STRIDE(pipe, 0)); 9295 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 9296 fb->pixel_format); 9297 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9298 9299 aligned_height = intel_fb_align_height(dev, fb->height, 9300 fb->pixel_format, 9301 fb->modifier[0]); 9302 9303 plane_config->size = fb->pitches[0] * aligned_height; 9304 9305 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9306 pipe_name(pipe), fb->width, fb->height, 9307 fb->bits_per_pixel, base, fb->pitches[0], 9308 plane_config->size); 9309 9310 plane_config->fb = intel_fb; 9311 return; 9312 9313 error: 9314 kfree(fb); 9315 } 9316 9317 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9318 struct intel_crtc_state *pipe_config) 9319 { 9320 struct drm_device *dev = crtc->base.dev; 9321 struct drm_i915_private *dev_priv = to_i915(dev); 9322 uint32_t tmp; 9323 9324 tmp = I915_READ(PF_CTL(crtc->pipe)); 9325 9326 if (tmp & PF_ENABLE) { 9327 pipe_config->pch_pfit.enabled = true; 9328 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9329 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9330 9331 /* We currently do not free assignements of panel fitters on 9332 * ivb/hsw (since we don't use the higher upscaling modes which 9333 * differentiates them) so just WARN about this case for now. */ 9334 if (IS_GEN7(dev)) { 9335 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9336 PF_PIPE_SEL_IVB(crtc->pipe)); 9337 } 9338 } 9339 } 9340 9341 static void 9342 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 9343 struct intel_initial_plane_config *plane_config) 9344 { 9345 struct drm_device *dev = crtc->base.dev; 9346 struct drm_i915_private *dev_priv = to_i915(dev); 9347 u32 val, base, offset; 9348 int pipe = crtc->pipe; 9349 int fourcc, pixel_format; 9350 unsigned int aligned_height; 9351 struct drm_framebuffer *fb; 9352 struct intel_framebuffer *intel_fb; 9353 9354 val = I915_READ(DSPCNTR(pipe)); 9355 if (!(val & DISPLAY_PLANE_ENABLE)) 9356 return; 9357 9358 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9359 if (!intel_fb) { 9360 DRM_DEBUG_KMS("failed to alloc fb\n"); 9361 return; 9362 } 9363 9364 fb = &intel_fb->base; 9365 9366 if (INTEL_INFO(dev)->gen >= 4) { 9367 if (val & DISPPLANE_TILED) { 9368 plane_config->tiling = I915_TILING_X; 9369 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9370 } 9371 } 9372 9373 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9374 fourcc = i9xx_format_to_fourcc(pixel_format); 9375 fb->pixel_format = fourcc; 9376 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9377 9378 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9379 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9380 offset = I915_READ(DSPOFFSET(pipe)); 9381 } else { 9382 if (plane_config->tiling) 9383 offset = I915_READ(DSPTILEOFF(pipe)); 9384 else 9385 offset = I915_READ(DSPLINOFF(pipe)); 9386 } 9387 plane_config->base = base; 9388 9389 val = I915_READ(PIPESRC(pipe)); 9390 fb->width = ((val >> 16) & 0xfff) + 1; 9391 fb->height = ((val >> 0) & 0xfff) + 1; 9392 9393 val = I915_READ(DSPSTRIDE(pipe)); 9394 fb->pitches[0] = val & 0xffffffc0; 9395 9396 aligned_height = intel_fb_align_height(dev, fb->height, 9397 fb->pixel_format, 9398 fb->modifier[0]); 9399 9400 plane_config->size = fb->pitches[0] * aligned_height; 9401 9402 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9403 pipe_name(pipe), fb->width, fb->height, 9404 fb->bits_per_pixel, base, fb->pitches[0], 9405 plane_config->size); 9406 9407 plane_config->fb = intel_fb; 9408 } 9409 9410 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9411 struct intel_crtc_state *pipe_config) 9412 { 9413 struct drm_device *dev = crtc->base.dev; 9414 struct drm_i915_private *dev_priv = to_i915(dev); 9415 enum intel_display_power_domain power_domain; 9416 uint32_t tmp; 9417 bool ret; 9418 9419 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9420 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9421 return false; 9422 9423 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9424 pipe_config->shared_dpll = NULL; 9425 9426 ret = false; 9427 tmp = I915_READ(PIPECONF(crtc->pipe)); 9428 if (!(tmp & PIPECONF_ENABLE)) 9429 goto out; 9430 9431 switch (tmp & PIPECONF_BPC_MASK) { 9432 case PIPECONF_6BPC: 9433 pipe_config->pipe_bpp = 18; 9434 break; 9435 case PIPECONF_8BPC: 9436 pipe_config->pipe_bpp = 24; 9437 break; 9438 case PIPECONF_10BPC: 9439 pipe_config->pipe_bpp = 30; 9440 break; 9441 case PIPECONF_12BPC: 9442 pipe_config->pipe_bpp = 36; 9443 break; 9444 default: 9445 break; 9446 } 9447 9448 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9449 pipe_config->limited_color_range = true; 9450 9451 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9452 struct intel_shared_dpll *pll; 9453 enum intel_dpll_id pll_id; 9454 9455 pipe_config->has_pch_encoder = true; 9456 9457 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9458 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9459 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9460 9461 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9462 9463 if (HAS_PCH_IBX(dev_priv)) { 9464 /* 9465 * The pipe->pch transcoder and pch transcoder->pll 9466 * mapping is fixed. 9467 */ 9468 pll_id = (enum intel_dpll_id) crtc->pipe; 9469 } else { 9470 tmp = I915_READ(PCH_DPLL_SEL); 9471 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9472 pll_id = DPLL_ID_PCH_PLL_B; 9473 else 9474 pll_id= DPLL_ID_PCH_PLL_A; 9475 } 9476 9477 pipe_config->shared_dpll = 9478 intel_get_shared_dpll_by_id(dev_priv, pll_id); 9479 pll = pipe_config->shared_dpll; 9480 9481 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 9482 &pipe_config->dpll_hw_state)); 9483 9484 tmp = pipe_config->dpll_hw_state.dpll; 9485 pipe_config->pixel_multiplier = 9486 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9487 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9488 9489 ironlake_pch_clock_get(crtc, pipe_config); 9490 } else { 9491 pipe_config->pixel_multiplier = 1; 9492 } 9493 9494 intel_get_pipe_timings(crtc, pipe_config); 9495 intel_get_pipe_src_size(crtc, pipe_config); 9496 9497 ironlake_get_pfit_config(crtc, pipe_config); 9498 9499 ret = true; 9500 9501 out: 9502 intel_display_power_put(dev_priv, power_domain); 9503 9504 return ret; 9505 } 9506 9507 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9508 { 9509 struct drm_device *dev = &dev_priv->drm; 9510 struct intel_crtc *crtc; 9511 9512 for_each_intel_crtc(dev, crtc) 9513 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9514 pipe_name(crtc->pipe)); 9515 9516 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9517 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9518 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9519 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9520 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9521 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9522 "CPU PWM1 enabled\n"); 9523 if (IS_HASWELL(dev)) 9524 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9525 "CPU PWM2 enabled\n"); 9526 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9527 "PCH PWM1 enabled\n"); 9528 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9529 "Utility pin enabled\n"); 9530 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9531 9532 /* 9533 * In theory we can still leave IRQs enabled, as long as only the HPD 9534 * interrupts remain enabled. We used to check for that, but since it's 9535 * gen-specific and since we only disable LCPLL after we fully disable 9536 * the interrupts, the check below should be enough. 9537 */ 9538 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9539 } 9540 9541 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9542 { 9543 struct drm_device *dev = &dev_priv->drm; 9544 9545 if (IS_HASWELL(dev)) 9546 return I915_READ(D_COMP_HSW); 9547 else 9548 return I915_READ(D_COMP_BDW); 9549 } 9550 9551 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9552 { 9553 struct drm_device *dev = &dev_priv->drm; 9554 9555 if (IS_HASWELL(dev)) { 9556 mutex_lock(&dev_priv->rps.hw_lock); 9557 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9558 val)) 9559 DRM_ERROR("Failed to write to D_COMP\n"); 9560 mutex_unlock(&dev_priv->rps.hw_lock); 9561 } else { 9562 I915_WRITE(D_COMP_BDW, val); 9563 POSTING_READ(D_COMP_BDW); 9564 } 9565 } 9566 9567 /* 9568 * This function implements pieces of two sequences from BSpec: 9569 * - Sequence for display software to disable LCPLL 9570 * - Sequence for display software to allow package C8+ 9571 * The steps implemented here are just the steps that actually touch the LCPLL 9572 * register. Callers should take care of disabling all the display engine 9573 * functions, doing the mode unset, fixing interrupts, etc. 9574 */ 9575 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9576 bool switch_to_fclk, bool allow_power_down) 9577 { 9578 uint32_t val; 9579 9580 assert_can_disable_lcpll(dev_priv); 9581 9582 val = I915_READ(LCPLL_CTL); 9583 9584 if (switch_to_fclk) { 9585 val |= LCPLL_CD_SOURCE_FCLK; 9586 I915_WRITE(LCPLL_CTL, val); 9587 9588 if (wait_for_us(I915_READ(LCPLL_CTL) & 9589 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9590 DRM_ERROR("Switching to FCLK failed\n"); 9591 9592 val = I915_READ(LCPLL_CTL); 9593 } 9594 9595 val |= LCPLL_PLL_DISABLE; 9596 I915_WRITE(LCPLL_CTL, val); 9597 POSTING_READ(LCPLL_CTL); 9598 9599 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1)) 9600 DRM_ERROR("LCPLL still locked\n"); 9601 9602 val = hsw_read_dcomp(dev_priv); 9603 val |= D_COMP_COMP_DISABLE; 9604 hsw_write_dcomp(dev_priv, val); 9605 ndelay(100); 9606 9607 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9608 1)) 9609 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9610 9611 if (allow_power_down) { 9612 val = I915_READ(LCPLL_CTL); 9613 val |= LCPLL_POWER_DOWN_ALLOW; 9614 I915_WRITE(LCPLL_CTL, val); 9615 POSTING_READ(LCPLL_CTL); 9616 } 9617 } 9618 9619 /* 9620 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9621 * source. 9622 */ 9623 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9624 { 9625 uint32_t val; 9626 9627 val = I915_READ(LCPLL_CTL); 9628 9629 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9630 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9631 return; 9632 9633 /* 9634 * Make sure we're not on PC8 state before disabling PC8, otherwise 9635 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9636 */ 9637 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9638 9639 if (val & LCPLL_POWER_DOWN_ALLOW) { 9640 val &= ~LCPLL_POWER_DOWN_ALLOW; 9641 I915_WRITE(LCPLL_CTL, val); 9642 POSTING_READ(LCPLL_CTL); 9643 } 9644 9645 val = hsw_read_dcomp(dev_priv); 9646 val |= D_COMP_COMP_FORCE; 9647 val &= ~D_COMP_COMP_DISABLE; 9648 hsw_write_dcomp(dev_priv, val); 9649 9650 val = I915_READ(LCPLL_CTL); 9651 val &= ~LCPLL_PLL_DISABLE; 9652 I915_WRITE(LCPLL_CTL, val); 9653 9654 if (intel_wait_for_register(dev_priv, 9655 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK, 9656 5)) 9657 DRM_ERROR("LCPLL not locked yet\n"); 9658 9659 if (val & LCPLL_CD_SOURCE_FCLK) { 9660 val = I915_READ(LCPLL_CTL); 9661 val &= ~LCPLL_CD_SOURCE_FCLK; 9662 I915_WRITE(LCPLL_CTL, val); 9663 9664 if (wait_for_us((I915_READ(LCPLL_CTL) & 9665 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9666 DRM_ERROR("Switching back to LCPLL failed\n"); 9667 } 9668 9669 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9670 intel_update_cdclk(&dev_priv->drm); 9671 } 9672 9673 /* 9674 * Package states C8 and deeper are really deep PC states that can only be 9675 * reached when all the devices on the system allow it, so even if the graphics 9676 * device allows PC8+, it doesn't mean the system will actually get to these 9677 * states. Our driver only allows PC8+ when going into runtime PM. 9678 * 9679 * The requirements for PC8+ are that all the outputs are disabled, the power 9680 * well is disabled and most interrupts are disabled, and these are also 9681 * requirements for runtime PM. When these conditions are met, we manually do 9682 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9683 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9684 * hang the machine. 9685 * 9686 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9687 * the state of some registers, so when we come back from PC8+ we need to 9688 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9689 * need to take care of the registers kept by RC6. Notice that this happens even 9690 * if we don't put the device in PCI D3 state (which is what currently happens 9691 * because of the runtime PM support). 9692 * 9693 * For more, read "Display Sequences for Package C8" on the hardware 9694 * documentation. 9695 */ 9696 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9697 { 9698 struct drm_device *dev = &dev_priv->drm; 9699 uint32_t val; 9700 9701 DRM_DEBUG_KMS("Enabling package C8+\n"); 9702 9703 if (HAS_PCH_LPT_LP(dev)) { 9704 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9705 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9706 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9707 } 9708 9709 lpt_disable_clkout_dp(dev); 9710 hsw_disable_lcpll(dev_priv, true, true); 9711 } 9712 9713 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9714 { 9715 struct drm_device *dev = &dev_priv->drm; 9716 uint32_t val; 9717 9718 DRM_DEBUG_KMS("Disabling package C8+\n"); 9719 9720 hsw_restore_lcpll(dev_priv); 9721 lpt_init_pch_refclk(dev); 9722 9723 if (HAS_PCH_LPT_LP(dev)) { 9724 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9725 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9726 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9727 } 9728 } 9729 9730 static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9731 { 9732 struct drm_device *dev = old_state->dev; 9733 struct intel_atomic_state *old_intel_state = 9734 to_intel_atomic_state(old_state); 9735 unsigned int req_cdclk = old_intel_state->dev_cdclk; 9736 9737 bxt_set_cdclk(to_i915(dev), req_cdclk); 9738 } 9739 9740 static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state, 9741 int pixel_rate) 9742 { 9743 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); 9744 9745 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 9746 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 9747 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 9748 9749 /* BSpec says "Do not use DisplayPort with CDCLK less than 9750 * 432 MHz, audio enabled, port width x4, and link rate 9751 * HBR2 (5.4 GHz), or else there may be audio corruption or 9752 * screen corruption." 9753 */ 9754 if (intel_crtc_has_dp_encoder(crtc_state) && 9755 crtc_state->has_audio && 9756 crtc_state->port_clock >= 540000 && 9757 crtc_state->lane_count == 4) 9758 pixel_rate = max(432000, pixel_rate); 9759 9760 return pixel_rate; 9761 } 9762 9763 /* compute the max rate for new configuration */ 9764 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9765 { 9766 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9767 struct drm_i915_private *dev_priv = to_i915(state->dev); 9768 struct drm_crtc *crtc; 9769 struct drm_crtc_state *cstate; 9770 struct intel_crtc_state *crtc_state; 9771 unsigned max_pixel_rate = 0, i; 9772 enum i915_pipe pipe; 9773 9774 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 9775 sizeof(intel_state->min_pixclk)); 9776 9777 for_each_crtc_in_state(state, crtc, cstate, i) { 9778 int pixel_rate; 9779 9780 crtc_state = to_intel_crtc_state(cstate); 9781 if (!crtc_state->base.enable) { 9782 intel_state->min_pixclk[i] = 0; 9783 continue; 9784 } 9785 9786 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 9787 9788 if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv)) 9789 pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state, 9790 pixel_rate); 9791 9792 intel_state->min_pixclk[i] = pixel_rate; 9793 } 9794 9795 for_each_pipe(dev_priv, pipe) 9796 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); 9797 9798 return max_pixel_rate; 9799 } 9800 9801 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9802 { 9803 struct drm_i915_private *dev_priv = to_i915(dev); 9804 uint32_t val, data; 9805 int ret; 9806 9807 if (WARN((I915_READ(LCPLL_CTL) & 9808 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 9809 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 9810 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 9811 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 9812 "trying to change cdclk frequency with cdclk not enabled\n")) 9813 return; 9814 9815 mutex_lock(&dev_priv->rps.hw_lock); 9816 ret = sandybridge_pcode_write(dev_priv, 9817 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 9818 mutex_unlock(&dev_priv->rps.hw_lock); 9819 if (ret) { 9820 DRM_ERROR("failed to inform pcode about cdclk change\n"); 9821 return; 9822 } 9823 9824 val = I915_READ(LCPLL_CTL); 9825 val |= LCPLL_CD_SOURCE_FCLK; 9826 I915_WRITE(LCPLL_CTL, val); 9827 9828 if (wait_for_us(I915_READ(LCPLL_CTL) & 9829 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9830 DRM_ERROR("Switching to FCLK failed\n"); 9831 9832 val = I915_READ(LCPLL_CTL); 9833 val &= ~LCPLL_CLK_FREQ_MASK; 9834 9835 switch (cdclk) { 9836 case 450000: 9837 val |= LCPLL_CLK_FREQ_450; 9838 data = 0; 9839 break; 9840 case 540000: 9841 val |= LCPLL_CLK_FREQ_54O_BDW; 9842 data = 1; 9843 break; 9844 case 337500: 9845 val |= LCPLL_CLK_FREQ_337_5_BDW; 9846 data = 2; 9847 break; 9848 case 675000: 9849 val |= LCPLL_CLK_FREQ_675_BDW; 9850 data = 3; 9851 break; 9852 default: 9853 WARN(1, "invalid cdclk frequency\n"); 9854 return; 9855 } 9856 9857 I915_WRITE(LCPLL_CTL, val); 9858 9859 val = I915_READ(LCPLL_CTL); 9860 val &= ~LCPLL_CD_SOURCE_FCLK; 9861 I915_WRITE(LCPLL_CTL, val); 9862 9863 if (wait_for_us((I915_READ(LCPLL_CTL) & 9864 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9865 DRM_ERROR("Switching back to LCPLL failed\n"); 9866 9867 mutex_lock(&dev_priv->rps.hw_lock); 9868 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 9869 mutex_unlock(&dev_priv->rps.hw_lock); 9870 9871 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 9872 9873 intel_update_cdclk(dev); 9874 9875 WARN(cdclk != dev_priv->cdclk_freq, 9876 "cdclk requested %d kHz but got %d kHz\n", 9877 cdclk, dev_priv->cdclk_freq); 9878 } 9879 9880 static int broadwell_calc_cdclk(int max_pixclk) 9881 { 9882 if (max_pixclk > 540000) 9883 return 675000; 9884 else if (max_pixclk > 450000) 9885 return 540000; 9886 else if (max_pixclk > 337500) 9887 return 450000; 9888 else 9889 return 337500; 9890 } 9891 9892 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9893 { 9894 struct drm_i915_private *dev_priv = to_i915(state->dev); 9895 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9896 int max_pixclk = ilk_max_pixel_rate(state); 9897 int cdclk; 9898 9899 /* 9900 * FIXME should also account for plane ratio 9901 * once 64bpp pixel formats are supported. 9902 */ 9903 cdclk = broadwell_calc_cdclk(max_pixclk); 9904 9905 if (cdclk > dev_priv->max_cdclk_freq) { 9906 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9907 cdclk, dev_priv->max_cdclk_freq); 9908 return -EINVAL; 9909 } 9910 9911 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9912 if (!intel_state->active_crtcs) 9913 intel_state->dev_cdclk = broadwell_calc_cdclk(0); 9914 9915 return 0; 9916 } 9917 9918 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9919 { 9920 struct drm_device *dev = old_state->dev; 9921 struct intel_atomic_state *old_intel_state = 9922 to_intel_atomic_state(old_state); 9923 unsigned req_cdclk = old_intel_state->dev_cdclk; 9924 9925 broadwell_set_cdclk(dev, req_cdclk); 9926 } 9927 9928 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state) 9929 { 9930 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9931 struct drm_i915_private *dev_priv = to_i915(state->dev); 9932 const int max_pixclk = ilk_max_pixel_rate(state); 9933 int vco = intel_state->cdclk_pll_vco; 9934 int cdclk; 9935 9936 /* 9937 * FIXME should also account for plane ratio 9938 * once 64bpp pixel formats are supported. 9939 */ 9940 cdclk = skl_calc_cdclk(max_pixclk, vco); 9941 9942 /* 9943 * FIXME move the cdclk caclulation to 9944 * compute_config() so we can fail gracegully. 9945 */ 9946 if (cdclk > dev_priv->max_cdclk_freq) { 9947 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9948 cdclk, dev_priv->max_cdclk_freq); 9949 cdclk = dev_priv->max_cdclk_freq; 9950 } 9951 9952 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9953 if (!intel_state->active_crtcs) 9954 intel_state->dev_cdclk = skl_calc_cdclk(0, vco); 9955 9956 return 0; 9957 } 9958 9959 static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9960 { 9961 struct drm_i915_private *dev_priv = to_i915(old_state->dev); 9962 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state); 9963 unsigned int req_cdclk = intel_state->dev_cdclk; 9964 unsigned int req_vco = intel_state->cdclk_pll_vco; 9965 9966 skl_set_cdclk(dev_priv, req_cdclk, req_vco); 9967 } 9968 9969 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9970 struct intel_crtc_state *crtc_state) 9971 { 9972 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) { 9973 if (!intel_ddi_pll_select(crtc, crtc_state)) 9974 return -EINVAL; 9975 } 9976 9977 crtc->lowfreq_avail = false; 9978 9979 return 0; 9980 } 9981 9982 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9983 enum port port, 9984 struct intel_crtc_state *pipe_config) 9985 { 9986 enum intel_dpll_id id; 9987 9988 switch (port) { 9989 case PORT_A: 9990 pipe_config->ddi_pll_sel = SKL_DPLL0; 9991 id = DPLL_ID_SKL_DPLL0; 9992 break; 9993 case PORT_B: 9994 pipe_config->ddi_pll_sel = SKL_DPLL1; 9995 id = DPLL_ID_SKL_DPLL1; 9996 break; 9997 case PORT_C: 9998 pipe_config->ddi_pll_sel = SKL_DPLL2; 9999 id = DPLL_ID_SKL_DPLL2; 10000 break; 10001 default: 10002 DRM_ERROR("Incorrect port type\n"); 10003 return; 10004 } 10005 10006 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10007 } 10008 10009 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 10010 enum port port, 10011 struct intel_crtc_state *pipe_config) 10012 { 10013 enum intel_dpll_id id; 10014 u32 temp; 10015 10016 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 10017 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 10018 10019 switch (pipe_config->ddi_pll_sel) { 10020 case SKL_DPLL0: 10021 id = DPLL_ID_SKL_DPLL0; 10022 break; 10023 case SKL_DPLL1: 10024 id = DPLL_ID_SKL_DPLL1; 10025 break; 10026 case SKL_DPLL2: 10027 id = DPLL_ID_SKL_DPLL2; 10028 break; 10029 case SKL_DPLL3: 10030 id = DPLL_ID_SKL_DPLL3; 10031 break; 10032 default: 10033 MISSING_CASE(pipe_config->ddi_pll_sel); 10034 return; 10035 } 10036 10037 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10038 } 10039 10040 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 10041 enum port port, 10042 struct intel_crtc_state *pipe_config) 10043 { 10044 enum intel_dpll_id id; 10045 10046 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 10047 10048 switch (pipe_config->ddi_pll_sel) { 10049 case PORT_CLK_SEL_WRPLL1: 10050 id = DPLL_ID_WRPLL1; 10051 break; 10052 case PORT_CLK_SEL_WRPLL2: 10053 id = DPLL_ID_WRPLL2; 10054 break; 10055 case PORT_CLK_SEL_SPLL: 10056 id = DPLL_ID_SPLL; 10057 break; 10058 case PORT_CLK_SEL_LCPLL_810: 10059 id = DPLL_ID_LCPLL_810; 10060 break; 10061 case PORT_CLK_SEL_LCPLL_1350: 10062 id = DPLL_ID_LCPLL_1350; 10063 break; 10064 case PORT_CLK_SEL_LCPLL_2700: 10065 id = DPLL_ID_LCPLL_2700; 10066 break; 10067 default: 10068 MISSING_CASE(pipe_config->ddi_pll_sel); 10069 /* fall through */ 10070 case PORT_CLK_SEL_NONE: 10071 return; 10072 } 10073 10074 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id); 10075 } 10076 10077 static bool hsw_get_transcoder_state(struct intel_crtc *crtc, 10078 struct intel_crtc_state *pipe_config, 10079 unsigned long *power_domain_mask) 10080 { 10081 struct drm_device *dev = crtc->base.dev; 10082 struct drm_i915_private *dev_priv = to_i915(dev); 10083 enum intel_display_power_domain power_domain; 10084 u32 tmp; 10085 10086 /* 10087 * The pipe->transcoder mapping is fixed with the exception of the eDP 10088 * transcoder handled below. 10089 */ 10090 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 10091 10092 /* 10093 * XXX: Do intel_display_power_get_if_enabled before reading this (for 10094 * consistency and less surprising code; it's in always on power). 10095 */ 10096 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 10097 if (tmp & TRANS_DDI_FUNC_ENABLE) { 10098 enum i915_pipe trans_edp_pipe; 10099 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 10100 default: 10101 WARN(1, "unknown pipe linked to edp transcoder\n"); 10102 case TRANS_DDI_EDP_INPUT_A_ONOFF: 10103 case TRANS_DDI_EDP_INPUT_A_ON: 10104 trans_edp_pipe = PIPE_A; 10105 break; 10106 case TRANS_DDI_EDP_INPUT_B_ONOFF: 10107 trans_edp_pipe = PIPE_B; 10108 break; 10109 case TRANS_DDI_EDP_INPUT_C_ONOFF: 10110 trans_edp_pipe = PIPE_C; 10111 break; 10112 } 10113 10114 if (trans_edp_pipe == crtc->pipe) 10115 pipe_config->cpu_transcoder = TRANSCODER_EDP; 10116 } 10117 10118 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 10119 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10120 return false; 10121 *power_domain_mask |= BIT(power_domain); 10122 10123 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 10124 10125 return tmp & PIPECONF_ENABLE; 10126 } 10127 10128 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, 10129 struct intel_crtc_state *pipe_config, 10130 unsigned long *power_domain_mask) 10131 { 10132 struct drm_device *dev = crtc->base.dev; 10133 struct drm_i915_private *dev_priv = to_i915(dev); 10134 enum intel_display_power_domain power_domain; 10135 enum port port; 10136 enum transcoder cpu_transcoder; 10137 u32 tmp; 10138 10139 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10140 if (port == PORT_A) 10141 cpu_transcoder = TRANSCODER_DSI_A; 10142 else 10143 cpu_transcoder = TRANSCODER_DSI_C; 10144 10145 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 10146 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10147 continue; 10148 *power_domain_mask |= BIT(power_domain); 10149 10150 /* 10151 * The PLL needs to be enabled with a valid divider 10152 * configuration, otherwise accessing DSI registers will hang 10153 * the machine. See BSpec North Display Engine 10154 * registers/MIPI[BXT]. We can break out here early, since we 10155 * need the same DSI PLL to be enabled for both DSI ports. 10156 */ 10157 if (!intel_dsi_pll_is_enabled(dev_priv)) 10158 break; 10159 10160 /* XXX: this works for video mode only */ 10161 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port)); 10162 if (!(tmp & DPI_ENABLE)) 10163 continue; 10164 10165 tmp = I915_READ(MIPI_CTRL(port)); 10166 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe)) 10167 continue; 10168 10169 pipe_config->cpu_transcoder = cpu_transcoder; 10170 break; 10171 } 10172 10173 return transcoder_is_dsi(pipe_config->cpu_transcoder); 10174 } 10175 10176 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10177 struct intel_crtc_state *pipe_config) 10178 { 10179 struct drm_device *dev = crtc->base.dev; 10180 struct drm_i915_private *dev_priv = to_i915(dev); 10181 struct intel_shared_dpll *pll; 10182 enum port port; 10183 uint32_t tmp; 10184 10185 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 10186 10187 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 10188 10189 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 10190 skylake_get_ddi_pll(dev_priv, port, pipe_config); 10191 else if (IS_BROXTON(dev)) 10192 bxt_get_ddi_pll(dev_priv, port, pipe_config); 10193 else 10194 haswell_get_ddi_pll(dev_priv, port, pipe_config); 10195 10196 pll = pipe_config->shared_dpll; 10197 if (pll) { 10198 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll, 10199 &pipe_config->dpll_hw_state)); 10200 } 10201 10202 /* 10203 * Haswell has only FDI/PCH transcoder A. It is which is connected to 10204 * DDI E. So just check whether this pipe is wired to DDI E and whether 10205 * the PCH transcoder is on. 10206 */ 10207 if (INTEL_INFO(dev)->gen < 9 && 10208 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 10209 pipe_config->has_pch_encoder = true; 10210 10211 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 10212 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 10213 FDI_DP_PORT_WIDTH_SHIFT) + 1; 10214 10215 ironlake_get_fdi_m_n_config(crtc, pipe_config); 10216 } 10217 } 10218 10219 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 10220 struct intel_crtc_state *pipe_config) 10221 { 10222 struct drm_device *dev = crtc->base.dev; 10223 struct drm_i915_private *dev_priv = to_i915(dev); 10224 enum intel_display_power_domain power_domain; 10225 unsigned long power_domain_mask; 10226 bool active; 10227 10228 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 10229 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 10230 return false; 10231 power_domain_mask = BIT(power_domain); 10232 10233 pipe_config->shared_dpll = NULL; 10234 10235 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 10236 10237 if (IS_BROXTON(dev_priv) && 10238 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) { 10239 WARN_ON(active); 10240 active = true; 10241 } 10242 10243 if (!active) 10244 goto out; 10245 10246 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10247 haswell_get_ddi_port_state(crtc, pipe_config); 10248 intel_get_pipe_timings(crtc, pipe_config); 10249 } 10250 10251 intel_get_pipe_src_size(crtc, pipe_config); 10252 10253 pipe_config->gamma_mode = 10254 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 10255 10256 if (INTEL_INFO(dev)->gen >= 9) { 10257 skl_init_scalers(dev, crtc, pipe_config); 10258 } 10259 10260 if (INTEL_INFO(dev)->gen >= 9) { 10261 pipe_config->scaler_state.scaler_id = -1; 10262 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 10263 } 10264 10265 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 10266 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 10267 power_domain_mask |= BIT(power_domain); 10268 if (INTEL_INFO(dev)->gen >= 9) 10269 skylake_get_pfit_config(crtc, pipe_config); 10270 else 10271 ironlake_get_pfit_config(crtc, pipe_config); 10272 } 10273 10274 if (IS_HASWELL(dev)) 10275 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 10276 (I915_READ(IPS_CTL) & IPS_ENABLE); 10277 10278 if (pipe_config->cpu_transcoder != TRANSCODER_EDP && 10279 !transcoder_is_dsi(pipe_config->cpu_transcoder)) { 10280 pipe_config->pixel_multiplier = 10281 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10282 } else { 10283 pipe_config->pixel_multiplier = 1; 10284 } 10285 10286 out: 10287 for_each_power_domain(power_domain, power_domain_mask) 10288 intel_display_power_put(dev_priv, power_domain); 10289 10290 return active; 10291 } 10292 10293 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 10294 const struct intel_plane_state *plane_state) 10295 { 10296 struct drm_device *dev = crtc->dev; 10297 struct drm_i915_private *dev_priv = to_i915(dev); 10298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10299 uint32_t cntl = 0, size = 0; 10300 10301 if (plane_state && plane_state->visible) { 10302 unsigned int width = plane_state->base.crtc_w; 10303 unsigned int height = plane_state->base.crtc_h; 10304 unsigned int stride = roundup_pow_of_two(width) * 4; 10305 10306 switch (stride) { 10307 default: 10308 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 10309 width, stride); 10310 stride = 256; 10311 /* fallthrough */ 10312 case 256: 10313 case 512: 10314 case 1024: 10315 case 2048: 10316 break; 10317 } 10318 10319 cntl |= CURSOR_ENABLE | 10320 CURSOR_GAMMA_ENABLE | 10321 CURSOR_FORMAT_ARGB | 10322 CURSOR_STRIDE(stride); 10323 10324 size = (height << 12) | width; 10325 } 10326 10327 if (intel_crtc->cursor_cntl != 0 && 10328 (intel_crtc->cursor_base != base || 10329 intel_crtc->cursor_size != size || 10330 intel_crtc->cursor_cntl != cntl)) { 10331 /* On these chipsets we can only modify the base/size/stride 10332 * whilst the cursor is disabled. 10333 */ 10334 I915_WRITE(CURCNTR(PIPE_A), 0); 10335 POSTING_READ(CURCNTR(PIPE_A)); 10336 intel_crtc->cursor_cntl = 0; 10337 } 10338 10339 if (intel_crtc->cursor_base != base) { 10340 I915_WRITE(CURBASE(PIPE_A), base); 10341 intel_crtc->cursor_base = base; 10342 } 10343 10344 if (intel_crtc->cursor_size != size) { 10345 I915_WRITE(CURSIZE, size); 10346 intel_crtc->cursor_size = size; 10347 } 10348 10349 if (intel_crtc->cursor_cntl != cntl) { 10350 I915_WRITE(CURCNTR(PIPE_A), cntl); 10351 POSTING_READ(CURCNTR(PIPE_A)); 10352 intel_crtc->cursor_cntl = cntl; 10353 } 10354 } 10355 10356 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 10357 const struct intel_plane_state *plane_state) 10358 { 10359 struct drm_device *dev = crtc->dev; 10360 struct drm_i915_private *dev_priv = to_i915(dev); 10361 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10362 const struct skl_wm_values *wm = &dev_priv->wm.skl_results; 10363 int pipe = intel_crtc->pipe; 10364 uint32_t cntl = 0; 10365 10366 if (INTEL_GEN(dev_priv) >= 9 && wm->dirty_pipes & drm_crtc_mask(crtc)) 10367 skl_write_cursor_wm(intel_crtc, wm); 10368 10369 if (plane_state && plane_state->visible) { 10370 cntl = MCURSOR_GAMMA_ENABLE; 10371 switch (plane_state->base.crtc_w) { 10372 case 64: 10373 cntl |= CURSOR_MODE_64_ARGB_AX; 10374 break; 10375 case 128: 10376 cntl |= CURSOR_MODE_128_ARGB_AX; 10377 break; 10378 case 256: 10379 cntl |= CURSOR_MODE_256_ARGB_AX; 10380 break; 10381 default: 10382 MISSING_CASE(plane_state->base.crtc_w); 10383 return; 10384 } 10385 cntl |= pipe << 28; /* Connect to correct pipe */ 10386 10387 if (HAS_DDI(dev)) 10388 cntl |= CURSOR_PIPE_CSC_ENABLE; 10389 10390 if (plane_state->base.rotation == DRM_ROTATE_180) 10391 cntl |= CURSOR_ROTATE_180; 10392 } 10393 10394 if (intel_crtc->cursor_cntl != cntl) { 10395 I915_WRITE(CURCNTR(pipe), cntl); 10396 POSTING_READ(CURCNTR(pipe)); 10397 intel_crtc->cursor_cntl = cntl; 10398 } 10399 10400 /* and commit changes on next vblank */ 10401 I915_WRITE(CURBASE(pipe), base); 10402 POSTING_READ(CURBASE(pipe)); 10403 10404 intel_crtc->cursor_base = base; 10405 } 10406 10407 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 10408 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 10409 const struct intel_plane_state *plane_state) 10410 { 10411 struct drm_device *dev = crtc->dev; 10412 struct drm_i915_private *dev_priv = to_i915(dev); 10413 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10414 int pipe = intel_crtc->pipe; 10415 u32 base = intel_crtc->cursor_addr; 10416 u32 pos = 0; 10417 10418 if (plane_state) { 10419 int x = plane_state->base.crtc_x; 10420 int y = plane_state->base.crtc_y; 10421 10422 if (x < 0) { 10423 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10424 x = -x; 10425 } 10426 pos |= x << CURSOR_X_SHIFT; 10427 10428 if (y < 0) { 10429 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10430 y = -y; 10431 } 10432 pos |= y << CURSOR_Y_SHIFT; 10433 10434 /* ILK+ do this automagically */ 10435 if (HAS_GMCH_DISPLAY(dev) && 10436 plane_state->base.rotation == DRM_ROTATE_180) { 10437 base += (plane_state->base.crtc_h * 10438 plane_state->base.crtc_w - 1) * 4; 10439 } 10440 } 10441 10442 I915_WRITE(CURPOS(pipe), pos); 10443 10444 if (IS_845G(dev) || IS_I865G(dev)) 10445 i845_update_cursor(crtc, base, plane_state); 10446 else 10447 i9xx_update_cursor(crtc, base, plane_state); 10448 } 10449 10450 static bool cursor_size_ok(struct drm_device *dev, 10451 uint32_t width, uint32_t height) 10452 { 10453 if (width == 0 || height == 0) 10454 return false; 10455 10456 /* 10457 * 845g/865g are special in that they are only limited by 10458 * the width of their cursors, the height is arbitrary up to 10459 * the precision of the register. Everything else requires 10460 * square cursors, limited to a few power-of-two sizes. 10461 */ 10462 if (IS_845G(dev) || IS_I865G(dev)) { 10463 if ((width & 63) != 0) 10464 return false; 10465 10466 if (width > (IS_845G(dev) ? 64 : 512)) 10467 return false; 10468 10469 if (height > 1023) 10470 return false; 10471 } else { 10472 switch (width | height) { 10473 case 256: 10474 case 128: 10475 if (IS_GEN2(dev)) 10476 return false; 10477 case 64: 10478 break; 10479 default: 10480 return false; 10481 } 10482 } 10483 10484 return true; 10485 } 10486 10487 /* VESA 640x480x72Hz mode to set on the pipe */ 10488 static struct drm_display_mode load_detect_mode = { 10489 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 10490 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 10491 }; 10492 10493 struct drm_framebuffer * 10494 __intel_framebuffer_create(struct drm_device *dev, 10495 struct drm_mode_fb_cmd2 *mode_cmd, 10496 struct drm_i915_gem_object *obj) 10497 { 10498 struct intel_framebuffer *intel_fb; 10499 int ret; 10500 10501 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10502 if (!intel_fb) 10503 return ERR_PTR(-ENOMEM); 10504 10505 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10506 if (ret) 10507 goto err; 10508 10509 return &intel_fb->base; 10510 10511 err: 10512 kfree(intel_fb); 10513 return ERR_PTR(ret); 10514 } 10515 10516 static struct drm_framebuffer * 10517 intel_framebuffer_create(struct drm_device *dev, 10518 struct drm_mode_fb_cmd2 *mode_cmd, 10519 struct drm_i915_gem_object *obj) 10520 { 10521 struct drm_framebuffer *fb; 10522 int ret; 10523 10524 ret = i915_mutex_lock_interruptible(dev); 10525 if (ret) 10526 return ERR_PTR(ret); 10527 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 10528 mutex_unlock(&dev->struct_mutex); 10529 10530 return fb; 10531 } 10532 10533 static u32 10534 intel_framebuffer_pitch_for_width(int width, int bpp) 10535 { 10536 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 10537 return ALIGN(pitch, 64); 10538 } 10539 10540 static u32 10541 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 10542 { 10543 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 10544 return PAGE_ALIGN(pitch * mode->vdisplay); 10545 } 10546 10547 static struct drm_framebuffer * 10548 intel_framebuffer_create_for_mode(struct drm_device *dev, 10549 struct drm_display_mode *mode, 10550 int depth, int bpp) 10551 { 10552 struct drm_framebuffer *fb; 10553 struct drm_i915_gem_object *obj; 10554 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10555 10556 obj = i915_gem_object_create(dev, 10557 intel_framebuffer_size_for_mode(mode, bpp)); 10558 if (IS_ERR(obj)) 10559 return ERR_CAST(obj); 10560 10561 mode_cmd.width = mode->hdisplay; 10562 mode_cmd.height = mode->vdisplay; 10563 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 10564 bpp); 10565 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10566 10567 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 10568 if (IS_ERR(fb)) 10569 drm_gem_object_unreference_unlocked(&obj->base); 10570 10571 return fb; 10572 } 10573 10574 static struct drm_framebuffer * 10575 mode_fits_in_fbdev(struct drm_device *dev, 10576 struct drm_display_mode *mode) 10577 { 10578 #ifdef CONFIG_DRM_FBDEV_EMULATION 10579 struct drm_i915_private *dev_priv = to_i915(dev); 10580 struct drm_i915_gem_object *obj; 10581 struct drm_framebuffer *fb; 10582 10583 if (!dev_priv->fbdev) 10584 return NULL; 10585 10586 if (!dev_priv->fbdev->fb) 10587 return NULL; 10588 10589 obj = dev_priv->fbdev->fb->obj; 10590 BUG_ON(!obj); 10591 10592 fb = &dev_priv->fbdev->fb->base; 10593 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 10594 fb->bits_per_pixel)) 10595 return NULL; 10596 10597 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10598 return NULL; 10599 10600 drm_framebuffer_reference(fb); 10601 return fb; 10602 #else 10603 return NULL; 10604 #endif 10605 } 10606 10607 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 10608 struct drm_crtc *crtc, 10609 struct drm_display_mode *mode, 10610 struct drm_framebuffer *fb, 10611 int x, int y) 10612 { 10613 struct drm_plane_state *plane_state; 10614 int hdisplay, vdisplay; 10615 int ret; 10616 10617 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 10618 if (IS_ERR(plane_state)) 10619 return PTR_ERR(plane_state); 10620 10621 if (mode) 10622 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 10623 else 10624 hdisplay = vdisplay = 0; 10625 10626 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 10627 if (ret) 10628 return ret; 10629 drm_atomic_set_fb_for_plane(plane_state, fb); 10630 plane_state->crtc_x = 0; 10631 plane_state->crtc_y = 0; 10632 plane_state->crtc_w = hdisplay; 10633 plane_state->crtc_h = vdisplay; 10634 plane_state->src_x = x << 16; 10635 plane_state->src_y = y << 16; 10636 plane_state->src_w = hdisplay << 16; 10637 plane_state->src_h = vdisplay << 16; 10638 10639 return 0; 10640 } 10641 10642 bool intel_get_load_detect_pipe(struct drm_connector *connector, 10643 struct drm_display_mode *mode, 10644 struct intel_load_detect_pipe *old, 10645 struct drm_modeset_acquire_ctx *ctx) 10646 { 10647 struct intel_crtc *intel_crtc; 10648 struct intel_encoder *intel_encoder = 10649 intel_attached_encoder(connector); 10650 struct drm_crtc *possible_crtc; 10651 struct drm_encoder *encoder = &intel_encoder->base; 10652 struct drm_crtc *crtc = NULL; 10653 struct drm_device *dev = encoder->dev; 10654 struct drm_framebuffer *fb; 10655 struct drm_mode_config *config = &dev->mode_config; 10656 struct drm_atomic_state *state = NULL, *restore_state = NULL; 10657 struct drm_connector_state *connector_state; 10658 struct intel_crtc_state *crtc_state; 10659 int ret, i = -1; 10660 10661 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10662 connector->base.id, connector->name, 10663 encoder->base.id, encoder->name); 10664 10665 old->restore_state = NULL; 10666 10667 retry: 10668 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10669 if (ret) 10670 goto fail; 10671 10672 /* 10673 * Algorithm gets a little messy: 10674 * 10675 * - if the connector already has an assigned crtc, use it (but make 10676 * sure it's on first) 10677 * 10678 * - try to find the first unused crtc that can drive this connector, 10679 * and use that if we find one 10680 */ 10681 10682 /* See if we already have a CRTC for this connector */ 10683 if (connector->state->crtc) { 10684 crtc = connector->state->crtc; 10685 10686 ret = drm_modeset_lock(&crtc->mutex, ctx); 10687 if (ret) 10688 goto fail; 10689 10690 /* Make sure the crtc and connector are running */ 10691 goto found; 10692 } 10693 10694 /* Find an unused one (if possible) */ 10695 for_each_crtc(dev, possible_crtc) { 10696 i++; 10697 if (!(encoder->possible_crtcs & (1 << i))) 10698 continue; 10699 10700 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 10701 if (ret) 10702 goto fail; 10703 10704 if (possible_crtc->state->enable) { 10705 drm_modeset_unlock(&possible_crtc->mutex); 10706 continue; 10707 } 10708 10709 crtc = possible_crtc; 10710 break; 10711 } 10712 10713 /* 10714 * If we didn't find an unused CRTC, don't use any. 10715 */ 10716 if (!crtc) { 10717 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10718 goto fail; 10719 } 10720 10721 found: 10722 intel_crtc = to_intel_crtc(crtc); 10723 10724 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10725 if (ret) 10726 goto fail; 10727 10728 state = drm_atomic_state_alloc(dev); 10729 restore_state = drm_atomic_state_alloc(dev); 10730 if (!state || !restore_state) { 10731 ret = -ENOMEM; 10732 goto fail; 10733 } 10734 10735 state->acquire_ctx = ctx; 10736 restore_state->acquire_ctx = ctx; 10737 10738 connector_state = drm_atomic_get_connector_state(state, connector); 10739 if (IS_ERR(connector_state)) { 10740 ret = PTR_ERR(connector_state); 10741 goto fail; 10742 } 10743 10744 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 10745 if (ret) 10746 goto fail; 10747 10748 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10749 if (IS_ERR(crtc_state)) { 10750 ret = PTR_ERR(crtc_state); 10751 goto fail; 10752 } 10753 10754 crtc_state->base.active = crtc_state->base.enable = true; 10755 10756 if (!mode) 10757 mode = &load_detect_mode; 10758 10759 /* We need a framebuffer large enough to accommodate all accesses 10760 * that the plane may generate whilst we perform load detection. 10761 * We can not rely on the fbcon either being present (we get called 10762 * during its initialisation to detect all boot displays, or it may 10763 * not even exist) or that it is large enough to satisfy the 10764 * requested mode. 10765 */ 10766 fb = mode_fits_in_fbdev(dev, mode); 10767 if (fb == NULL) { 10768 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10769 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10770 } else 10771 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10772 if (IS_ERR(fb)) { 10773 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10774 goto fail; 10775 } 10776 10777 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10778 if (ret) 10779 goto fail; 10780 10781 drm_framebuffer_unreference(fb); 10782 10783 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 10784 if (ret) 10785 goto fail; 10786 10787 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 10788 if (!ret) 10789 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 10790 if (!ret) 10791 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 10792 if (ret) { 10793 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 10794 goto fail; 10795 } 10796 10797 ret = drm_atomic_commit(state); 10798 if (ret) { 10799 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10800 goto fail; 10801 } 10802 10803 old->restore_state = restore_state; 10804 10805 /* let the connector get through one full cycle before testing */ 10806 intel_wait_for_vblank(dev, intel_crtc->pipe); 10807 return true; 10808 10809 fail: 10810 drm_atomic_state_free(state); 10811 drm_atomic_state_free(restore_state); 10812 restore_state = state = NULL; 10813 10814 if (ret == -EDEADLK) { 10815 drm_modeset_backoff(ctx); 10816 goto retry; 10817 } 10818 10819 return false; 10820 } 10821 10822 void intel_release_load_detect_pipe(struct drm_connector *connector, 10823 struct intel_load_detect_pipe *old, 10824 struct drm_modeset_acquire_ctx *ctx) 10825 { 10826 struct intel_encoder *intel_encoder = 10827 intel_attached_encoder(connector); 10828 struct drm_encoder *encoder = &intel_encoder->base; 10829 struct drm_atomic_state *state = old->restore_state; 10830 int ret; 10831 10832 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10833 connector->base.id, connector->name, 10834 encoder->base.id, encoder->name); 10835 10836 if (!state) 10837 return; 10838 10839 ret = drm_atomic_commit(state); 10840 if (ret) { 10841 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 10842 drm_atomic_state_free(state); 10843 } 10844 } 10845 10846 static int i9xx_pll_refclk(struct drm_device *dev, 10847 const struct intel_crtc_state *pipe_config) 10848 { 10849 struct drm_i915_private *dev_priv = to_i915(dev); 10850 u32 dpll = pipe_config->dpll_hw_state.dpll; 10851 10852 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10853 return dev_priv->vbt.lvds_ssc_freq; 10854 else if (HAS_PCH_SPLIT(dev)) 10855 return 120000; 10856 else if (!IS_GEN2(dev)) 10857 return 96000; 10858 else 10859 return 48000; 10860 } 10861 10862 /* Returns the clock of the currently programmed mode of the given pipe. */ 10863 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10864 struct intel_crtc_state *pipe_config) 10865 { 10866 struct drm_device *dev = crtc->base.dev; 10867 struct drm_i915_private *dev_priv = to_i915(dev); 10868 int pipe = pipe_config->cpu_transcoder; 10869 u32 dpll = pipe_config->dpll_hw_state.dpll; 10870 u32 fp; 10871 struct dpll clock; 10872 int port_clock; 10873 int refclk = i9xx_pll_refclk(dev, pipe_config); 10874 10875 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10876 fp = pipe_config->dpll_hw_state.fp0; 10877 else 10878 fp = pipe_config->dpll_hw_state.fp1; 10879 10880 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10881 if (IS_PINEVIEW(dev)) { 10882 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10883 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10884 } else { 10885 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10886 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10887 } 10888 10889 if (!IS_GEN2(dev)) { 10890 if (IS_PINEVIEW(dev)) 10891 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10892 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10893 else 10894 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10895 DPLL_FPA01_P1_POST_DIV_SHIFT); 10896 10897 switch (dpll & DPLL_MODE_MASK) { 10898 case DPLLB_MODE_DAC_SERIAL: 10899 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10900 5 : 10; 10901 break; 10902 case DPLLB_MODE_LVDS: 10903 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10904 7 : 14; 10905 break; 10906 default: 10907 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10908 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10909 return; 10910 } 10911 10912 if (IS_PINEVIEW(dev)) 10913 port_clock = pnv_calc_dpll_params(refclk, &clock); 10914 else 10915 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10916 } else { 10917 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10918 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10919 10920 if (is_lvds) { 10921 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10922 DPLL_FPA01_P1_POST_DIV_SHIFT); 10923 10924 if (lvds & LVDS_CLKB_POWER_UP) 10925 clock.p2 = 7; 10926 else 10927 clock.p2 = 14; 10928 } else { 10929 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10930 clock.p1 = 2; 10931 else { 10932 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10933 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10934 } 10935 if (dpll & PLL_P2_DIVIDE_BY_4) 10936 clock.p2 = 4; 10937 else 10938 clock.p2 = 2; 10939 } 10940 10941 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10942 } 10943 10944 /* 10945 * This value includes pixel_multiplier. We will use 10946 * port_clock to compute adjusted_mode.crtc_clock in the 10947 * encoder's get_config() function. 10948 */ 10949 pipe_config->port_clock = port_clock; 10950 } 10951 10952 int intel_dotclock_calculate(int link_freq, 10953 const struct intel_link_m_n *m_n) 10954 { 10955 /* 10956 * The calculation for the data clock is: 10957 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10958 * But we want to avoid losing precison if possible, so: 10959 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10960 * 10961 * and the link clock is simpler: 10962 * link_clock = (m * link_clock) / n 10963 */ 10964 10965 if (!m_n->link_n) 10966 return 0; 10967 10968 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10969 } 10970 10971 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10972 struct intel_crtc_state *pipe_config) 10973 { 10974 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 10975 10976 /* read out port_clock from the DPLL */ 10977 i9xx_crtc_clock_get(crtc, pipe_config); 10978 10979 /* 10980 * In case there is an active pipe without active ports, 10981 * we may need some idea for the dotclock anyway. 10982 * Calculate one based on the FDI configuration. 10983 */ 10984 pipe_config->base.adjusted_mode.crtc_clock = 10985 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 10986 &pipe_config->fdi_m_n); 10987 } 10988 10989 /** Returns the currently programmed mode of the given pipe. */ 10990 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10991 struct drm_crtc *crtc) 10992 { 10993 struct drm_i915_private *dev_priv = to_i915(dev); 10994 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10995 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10996 struct drm_display_mode *mode; 10997 struct intel_crtc_state *pipe_config; 10998 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10999 int hsync = I915_READ(HSYNC(cpu_transcoder)); 11000 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 11001 int vsync = I915_READ(VSYNC(cpu_transcoder)); 11002 enum i915_pipe pipe = intel_crtc->pipe; 11003 11004 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 11005 if (!mode) 11006 return NULL; 11007 11008 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 11009 if (!pipe_config) { 11010 kfree(mode); 11011 return NULL; 11012 } 11013 11014 /* 11015 * Construct a pipe_config sufficient for getting the clock info 11016 * back out of crtc_clock_get. 11017 * 11018 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 11019 * to use a real value here instead. 11020 */ 11021 pipe_config->cpu_transcoder = (enum transcoder) pipe; 11022 pipe_config->pixel_multiplier = 1; 11023 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 11024 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 11025 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 11026 i9xx_crtc_clock_get(intel_crtc, pipe_config); 11027 11028 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 11029 mode->hdisplay = (htot & 0xffff) + 1; 11030 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 11031 mode->hsync_start = (hsync & 0xffff) + 1; 11032 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 11033 mode->vdisplay = (vtot & 0xffff) + 1; 11034 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 11035 mode->vsync_start = (vsync & 0xffff) + 1; 11036 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 11037 11038 drm_mode_set_name(mode); 11039 11040 kfree(pipe_config); 11041 11042 return mode; 11043 } 11044 11045 static void intel_crtc_destroy(struct drm_crtc *crtc) 11046 { 11047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11048 struct drm_device *dev = crtc->dev; 11049 struct intel_flip_work *work; 11050 11051 spin_lock_irq(&dev->event_lock); 11052 work = intel_crtc->flip_work; 11053 intel_crtc->flip_work = NULL; 11054 spin_unlock_irq(&dev->event_lock); 11055 11056 if (work) { 11057 cancel_work_sync(&work->mmio_work); 11058 cancel_work_sync(&work->unpin_work); 11059 kfree(work); 11060 } 11061 11062 drm_crtc_cleanup(crtc); 11063 11064 kfree(intel_crtc); 11065 } 11066 11067 static void intel_unpin_work_fn(struct work_struct *__work) 11068 { 11069 struct intel_flip_work *work = 11070 container_of(__work, struct intel_flip_work, unpin_work); 11071 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11072 struct drm_device *dev = crtc->base.dev; 11073 struct drm_plane *primary = crtc->base.primary; 11074 11075 if (is_mmio_work(work)) 11076 flush_work(&work->mmio_work); 11077 11078 mutex_lock(&dev->struct_mutex); 11079 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 11080 drm_gem_object_unreference(&work->pending_flip_obj->base); 11081 11082 if (work->flip_queued_req) 11083 i915_gem_request_assign(&work->flip_queued_req, NULL); 11084 mutex_unlock(&dev->struct_mutex); 11085 11086 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); 11087 intel_fbc_post_update(crtc); 11088 drm_framebuffer_unreference(work->old_fb); 11089 11090 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 11091 atomic_dec(&crtc->unpin_work_count); 11092 11093 kfree(work); 11094 } 11095 11096 /* Is 'a' after or equal to 'b'? */ 11097 static bool g4x_flip_count_after_eq(u32 a, u32 b) 11098 { 11099 return !((a - b) & 0x80000000); 11100 } 11101 11102 static bool __pageflip_finished_cs(struct intel_crtc *crtc, 11103 struct intel_flip_work *work) 11104 { 11105 struct drm_device *dev = crtc->base.dev; 11106 struct drm_i915_private *dev_priv = to_i915(dev); 11107 unsigned reset_counter; 11108 11109 reset_counter = i915_reset_counter(&dev_priv->gpu_error); 11110 if (crtc->reset_counter != reset_counter) 11111 return true; 11112 11113 /* 11114 * The relevant registers doen't exist on pre-ctg. 11115 * As the flip done interrupt doesn't trigger for mmio 11116 * flips on gmch platforms, a flip count check isn't 11117 * really needed there. But since ctg has the registers, 11118 * include it in the check anyway. 11119 */ 11120 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 11121 return true; 11122 11123 /* 11124 * BDW signals flip done immediately if the plane 11125 * is disabled, even if the plane enable is already 11126 * armed to occur at the next vblank :( 11127 */ 11128 11129 /* 11130 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 11131 * used the same base address. In that case the mmio flip might 11132 * have completed, but the CS hasn't even executed the flip yet. 11133 * 11134 * A flip count check isn't enough as the CS might have updated 11135 * the base address just after start of vblank, but before we 11136 * managed to process the interrupt. This means we'd complete the 11137 * CS flip too soon. 11138 * 11139 * Combining both checks should get us a good enough result. It may 11140 * still happen that the CS flip has been executed, but has not 11141 * yet actually completed. But in case the base address is the same 11142 * anyway, we don't really care. 11143 */ 11144 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11145 crtc->flip_work->gtt_offset && 11146 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11147 crtc->flip_work->flip_count); 11148 } 11149 11150 static bool 11151 __pageflip_finished_mmio(struct intel_crtc *crtc, 11152 struct intel_flip_work *work) 11153 { 11154 /* 11155 * MMIO work completes when vblank is different from 11156 * flip_queued_vblank. 11157 * 11158 * Reset counter value doesn't matter, this is handled by 11159 * i915_wait_request finishing early, so no need to handle 11160 * reset here. 11161 */ 11162 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank; 11163 } 11164 11165 11166 static bool pageflip_finished(struct intel_crtc *crtc, 11167 struct intel_flip_work *work) 11168 { 11169 if (!atomic_read(&work->pending)) 11170 return false; 11171 11172 smp_rmb(); 11173 11174 if (is_mmio_work(work)) 11175 return __pageflip_finished_mmio(crtc, work); 11176 else 11177 return __pageflip_finished_cs(crtc, work); 11178 } 11179 11180 void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe) 11181 { 11182 struct drm_device *dev = &dev_priv->drm; 11183 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11185 struct intel_flip_work *work; 11186 unsigned long flags; 11187 11188 /* Ignore early vblank irqs */ 11189 if (!crtc) 11190 return; 11191 11192 /* 11193 * This is called both by irq handlers and the reset code (to complete 11194 * lost pageflips) so needs the full irqsave spinlocks. 11195 */ 11196 spin_lock_irqsave(&dev->event_lock, flags); 11197 work = intel_crtc->flip_work; 11198 11199 if (work != NULL && 11200 !is_mmio_work(work) && 11201 pageflip_finished(intel_crtc, work)) 11202 page_flip_completed(intel_crtc); 11203 11204 spin_unlock_irqrestore(&dev->event_lock, flags); 11205 } 11206 11207 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe) 11208 { 11209 struct drm_device *dev = &dev_priv->drm; 11210 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11212 struct intel_flip_work *work; 11213 unsigned long flags; 11214 11215 /* Ignore early vblank irqs */ 11216 if (!crtc) 11217 return; 11218 11219 /* 11220 * This is called both by irq handlers and the reset code (to complete 11221 * lost pageflips) so needs the full irqsave spinlocks. 11222 */ 11223 spin_lock_irqsave(&dev->event_lock, flags); 11224 work = intel_crtc->flip_work; 11225 11226 if (work != NULL && 11227 is_mmio_work(work) && 11228 pageflip_finished(intel_crtc, work)) 11229 page_flip_completed(intel_crtc); 11230 11231 spin_unlock_irqrestore(&dev->event_lock, flags); 11232 } 11233 11234 static inline void intel_mark_page_flip_active(struct intel_crtc *crtc, 11235 struct intel_flip_work *work) 11236 { 11237 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc); 11238 11239 /* Ensure that the work item is consistent when activating it ... */ 11240 smp_mb__before_atomic(); 11241 atomic_set(&work->pending, 1); 11242 } 11243 11244 static int intel_gen2_queue_flip(struct drm_device *dev, 11245 struct drm_crtc *crtc, 11246 struct drm_framebuffer *fb, 11247 struct drm_i915_gem_object *obj, 11248 struct drm_i915_gem_request *req, 11249 uint32_t flags) 11250 { 11251 struct intel_engine_cs *engine = req->engine; 11252 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11253 u32 flip_mask; 11254 int ret; 11255 11256 ret = intel_ring_begin(req, 6); 11257 if (ret) 11258 return ret; 11259 11260 /* Can't queue multiple flips, so wait for the previous 11261 * one to finish before executing the next. 11262 */ 11263 if (intel_crtc->plane) 11264 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11265 else 11266 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11267 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); 11268 intel_ring_emit(engine, MI_NOOP); 11269 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11270 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11271 intel_ring_emit(engine, fb->pitches[0]); 11272 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); 11273 intel_ring_emit(engine, 0); /* aux display base address, unused */ 11274 11275 return 0; 11276 } 11277 11278 static int intel_gen3_queue_flip(struct drm_device *dev, 11279 struct drm_crtc *crtc, 11280 struct drm_framebuffer *fb, 11281 struct drm_i915_gem_object *obj, 11282 struct drm_i915_gem_request *req, 11283 uint32_t flags) 11284 { 11285 struct intel_engine_cs *engine = req->engine; 11286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11287 u32 flip_mask; 11288 int ret; 11289 11290 ret = intel_ring_begin(req, 6); 11291 if (ret) 11292 return ret; 11293 11294 if (intel_crtc->plane) 11295 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11296 else 11297 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11298 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask); 11299 intel_ring_emit(engine, MI_NOOP); 11300 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | 11301 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11302 intel_ring_emit(engine, fb->pitches[0]); 11303 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); 11304 intel_ring_emit(engine, MI_NOOP); 11305 11306 return 0; 11307 } 11308 11309 static int intel_gen4_queue_flip(struct drm_device *dev, 11310 struct drm_crtc *crtc, 11311 struct drm_framebuffer *fb, 11312 struct drm_i915_gem_object *obj, 11313 struct drm_i915_gem_request *req, 11314 uint32_t flags) 11315 { 11316 struct intel_engine_cs *engine = req->engine; 11317 struct drm_i915_private *dev_priv = to_i915(dev); 11318 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11319 uint32_t pf, pipesrc; 11320 int ret; 11321 11322 ret = intel_ring_begin(req, 4); 11323 if (ret) 11324 return ret; 11325 11326 /* i965+ uses the linear or tiled offsets from the 11327 * Display Registers (which do not change across a page-flip) 11328 * so we need only reprogram the base address. 11329 */ 11330 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11331 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11332 intel_ring_emit(engine, fb->pitches[0]); 11333 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset | 11334 obj->tiling_mode); 11335 11336 /* XXX Enabling the panel-fitter across page-flip is so far 11337 * untested on non-native modes, so ignore it for now. 11338 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 11339 */ 11340 pf = 0; 11341 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11342 intel_ring_emit(engine, pf | pipesrc); 11343 11344 return 0; 11345 } 11346 11347 static int intel_gen6_queue_flip(struct drm_device *dev, 11348 struct drm_crtc *crtc, 11349 struct drm_framebuffer *fb, 11350 struct drm_i915_gem_object *obj, 11351 struct drm_i915_gem_request *req, 11352 uint32_t flags) 11353 { 11354 struct intel_engine_cs *engine = req->engine; 11355 struct drm_i915_private *dev_priv = to_i915(dev); 11356 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11357 uint32_t pf, pipesrc; 11358 int ret; 11359 11360 ret = intel_ring_begin(req, 4); 11361 if (ret) 11362 return ret; 11363 11364 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11365 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11366 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); 11367 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); 11368 11369 /* Contrary to the suggestions in the documentation, 11370 * "Enable Panel Fitter" does not seem to be required when page 11371 * flipping with a non-native mode, and worse causes a normal 11372 * modeset to fail. 11373 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 11374 */ 11375 pf = 0; 11376 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11377 intel_ring_emit(engine, pf | pipesrc); 11378 11379 return 0; 11380 } 11381 11382 static int intel_gen7_queue_flip(struct drm_device *dev, 11383 struct drm_crtc *crtc, 11384 struct drm_framebuffer *fb, 11385 struct drm_i915_gem_object *obj, 11386 struct drm_i915_gem_request *req, 11387 uint32_t flags) 11388 { 11389 struct intel_engine_cs *engine = req->engine; 11390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11391 uint32_t plane_bit = 0; 11392 int len, ret; 11393 11394 switch (intel_crtc->plane) { 11395 case PLANE_A: 11396 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 11397 break; 11398 case PLANE_B: 11399 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 11400 break; 11401 case PLANE_C: 11402 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 11403 break; 11404 default: 11405 WARN_ONCE(1, "unknown plane in flip command\n"); 11406 return -ENODEV; 11407 } 11408 11409 len = 4; 11410 if (engine->id == RCS) { 11411 len += 6; 11412 /* 11413 * On Gen 8, SRM is now taking an extra dword to accommodate 11414 * 48bits addresses, and we need a NOOP for the batch size to 11415 * stay even. 11416 */ 11417 if (IS_GEN8(dev)) 11418 len += 2; 11419 } 11420 11421 /* 11422 * BSpec MI_DISPLAY_FLIP for IVB: 11423 * "The full packet must be contained within the same cache line." 11424 * 11425 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 11426 * cacheline, if we ever start emitting more commands before 11427 * the MI_DISPLAY_FLIP we may need to first emit everything else, 11428 * then do the cacheline alignment, and finally emit the 11429 * MI_DISPLAY_FLIP. 11430 */ 11431 ret = intel_ring_cacheline_align(req); 11432 if (ret) 11433 return ret; 11434 11435 ret = intel_ring_begin(req, len); 11436 if (ret) 11437 return ret; 11438 11439 /* Unmask the flip-done completion message. Note that the bspec says that 11440 * we should do this for both the BCS and RCS, and that we must not unmask 11441 * more than one flip event at any time (or ensure that one flip message 11442 * can be sent by waiting for flip-done prior to queueing new flips). 11443 * Experimentation says that BCS works despite DERRMR masking all 11444 * flip-done completion events and that unmasking all planes at once 11445 * for the RCS also doesn't appear to drop events. Setting the DERRMR 11446 * to zero does lead to lockups within MI_DISPLAY_FLIP. 11447 */ 11448 if (engine->id == RCS) { 11449 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1)); 11450 intel_ring_emit_reg(engine, DERRMR); 11451 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11452 DERRMR_PIPEB_PRI_FLIP_DONE | 11453 DERRMR_PIPEC_PRI_FLIP_DONE)); 11454 if (IS_GEN8(dev)) 11455 intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 | 11456 MI_SRM_LRM_GLOBAL_GTT); 11457 else 11458 intel_ring_emit(engine, MI_STORE_REGISTER_MEM | 11459 MI_SRM_LRM_GLOBAL_GTT); 11460 intel_ring_emit_reg(engine, DERRMR); 11461 intel_ring_emit(engine, engine->scratch.gtt_offset + 256); 11462 if (IS_GEN8(dev)) { 11463 intel_ring_emit(engine, 0); 11464 intel_ring_emit(engine, MI_NOOP); 11465 } 11466 } 11467 11468 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); 11469 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); 11470 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset); 11471 intel_ring_emit(engine, (MI_NOOP)); 11472 11473 return 0; 11474 } 11475 11476 static bool use_mmio_flip(struct intel_engine_cs *engine, 11477 struct drm_i915_gem_object *obj) 11478 { 11479 struct reservation_object *resv; 11480 11481 /* 11482 * This is not being used for older platforms, because 11483 * non-availability of flip done interrupt forces us to use 11484 * CS flips. Older platforms derive flip done using some clever 11485 * tricks involving the flip_pending status bits and vblank irqs. 11486 * So using MMIO flips there would disrupt this mechanism. 11487 */ 11488 11489 if (engine == NULL) 11490 return true; 11491 11492 if (INTEL_GEN(engine->i915) < 5) 11493 return false; 11494 11495 if (i915.use_mmio_flip < 0) 11496 return false; 11497 else if (i915.use_mmio_flip > 0) 11498 return true; 11499 else if (i915.enable_execlists) 11500 return true; 11501 11502 resv = i915_gem_object_get_dmabuf_resv(obj); 11503 if (resv && !reservation_object_test_signaled_rcu(resv, false)) 11504 return true; 11505 11506 return engine != i915_gem_request_get_engine(obj->last_write_req); 11507 } 11508 11509 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11510 unsigned int rotation, 11511 struct intel_flip_work *work) 11512 { 11513 struct drm_device *dev = intel_crtc->base.dev; 11514 struct drm_i915_private *dev_priv = to_i915(dev); 11515 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11516 const enum i915_pipe pipe = intel_crtc->pipe; 11517 u32 ctl, stride, tile_height; 11518 11519 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11520 ctl &= ~PLANE_CTL_TILED_MASK; 11521 switch (fb->modifier[0]) { 11522 case DRM_FORMAT_MOD_NONE: 11523 break; 11524 case I915_FORMAT_MOD_X_TILED: 11525 ctl |= PLANE_CTL_TILED_X; 11526 break; 11527 case I915_FORMAT_MOD_Y_TILED: 11528 ctl |= PLANE_CTL_TILED_Y; 11529 break; 11530 case I915_FORMAT_MOD_Yf_TILED: 11531 ctl |= PLANE_CTL_TILED_YF; 11532 break; 11533 default: 11534 MISSING_CASE(fb->modifier[0]); 11535 } 11536 11537 /* 11538 * The stride is either expressed as a multiple of 64 bytes chunks for 11539 * linear buffers or in number of tiles for tiled buffers. 11540 */ 11541 if (intel_rotation_90_or_270(rotation)) { 11542 /* stride = Surface height in tiles */ 11543 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); 11544 stride = DIV_ROUND_UP(fb->height, tile_height); 11545 } else { 11546 stride = fb->pitches[0] / 11547 intel_fb_stride_alignment(dev_priv, fb->modifier[0], 11548 fb->pixel_format); 11549 } 11550 11551 /* 11552 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11553 * PLANE_SURF updates, the update is then guaranteed to be atomic. 11554 */ 11555 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11556 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11557 11558 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 11559 POSTING_READ(PLANE_SURF(pipe, 0)); 11560 } 11561 11562 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11563 struct intel_flip_work *work) 11564 { 11565 struct drm_device *dev = intel_crtc->base.dev; 11566 struct drm_i915_private *dev_priv = to_i915(dev); 11567 struct intel_framebuffer *intel_fb = 11568 to_intel_framebuffer(intel_crtc->base.primary->fb); 11569 struct drm_i915_gem_object *obj = intel_fb->obj; 11570 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 11571 u32 dspcntr; 11572 11573 dspcntr = I915_READ(reg); 11574 11575 if (obj->tiling_mode != I915_TILING_NONE) 11576 dspcntr |= DISPPLANE_TILED; 11577 else 11578 dspcntr &= ~DISPPLANE_TILED; 11579 11580 I915_WRITE(reg, dspcntr); 11581 11582 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 11583 POSTING_READ(DSPSURF(intel_crtc->plane)); 11584 } 11585 11586 static void intel_mmio_flip_work_func(struct work_struct *w) 11587 { 11588 struct intel_flip_work *work = 11589 container_of(w, struct intel_flip_work, mmio_work); 11590 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 11591 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 11592 struct intel_framebuffer *intel_fb = 11593 to_intel_framebuffer(crtc->base.primary->fb); 11594 struct drm_i915_gem_object *obj = intel_fb->obj; 11595 struct reservation_object *resv; 11596 11597 if (work->flip_queued_req) 11598 WARN_ON(__i915_wait_request(work->flip_queued_req, 11599 false, NULL, 11600 &dev_priv->rps.mmioflips)); 11601 11602 /* For framebuffer backed by dmabuf, wait for fence */ 11603 resv = i915_gem_object_get_dmabuf_resv(obj); 11604 if (resv) 11605 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false, 11606 MAX_SCHEDULE_TIMEOUT) < 0); 11607 11608 intel_pipe_update_start(crtc); 11609 11610 if (INTEL_GEN(dev_priv) >= 9) 11611 skl_do_mmio_flip(crtc, work->rotation, work); 11612 else 11613 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11614 ilk_do_mmio_flip(crtc, work); 11615 11616 intel_pipe_update_end(crtc, work); 11617 } 11618 11619 static int intel_default_queue_flip(struct drm_device *dev, 11620 struct drm_crtc *crtc, 11621 struct drm_framebuffer *fb, 11622 struct drm_i915_gem_object *obj, 11623 struct drm_i915_gem_request *req, 11624 uint32_t flags) 11625 { 11626 return -ENODEV; 11627 } 11628 11629 static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv, 11630 struct intel_crtc *intel_crtc, 11631 struct intel_flip_work *work) 11632 { 11633 u32 addr, vblank; 11634 11635 if (!atomic_read(&work->pending)) 11636 return false; 11637 11638 smp_rmb(); 11639 11640 vblank = intel_crtc_get_vblank_counter(intel_crtc); 11641 if (work->flip_ready_vblank == 0) { 11642 if (work->flip_queued_req && 11643 !i915_gem_request_completed(work->flip_queued_req)) 11644 return false; 11645 11646 work->flip_ready_vblank = vblank; 11647 } 11648 11649 if (vblank - work->flip_ready_vblank < 3) 11650 return false; 11651 11652 /* Potential stall - if we see that the flip has happened, 11653 * assume a missed interrupt. */ 11654 if (INTEL_GEN(dev_priv) >= 4) 11655 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11656 else 11657 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11658 11659 /* There is a potential issue here with a false positive after a flip 11660 * to the same address. We could address this by checking for a 11661 * non-incrementing frame counter. 11662 */ 11663 return addr == work->gtt_offset; 11664 } 11665 11666 void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) 11667 { 11668 struct drm_device *dev = &dev_priv->drm; 11669 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11671 struct intel_flip_work *work; 11672 11673 // WARN_ON(!in_interrupt()); 11674 11675 if (crtc == NULL) 11676 return; 11677 11678 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11679 work = intel_crtc->flip_work; 11680 11681 if (work != NULL && !is_mmio_work(work) && 11682 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) { 11683 WARN_ONCE(1, 11684 "Kicking stuck page flip: queued at %d, now %d\n", 11685 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc)); 11686 page_flip_completed(intel_crtc); 11687 work = NULL; 11688 } 11689 11690 if (work != NULL && !is_mmio_work(work) && 11691 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1) 11692 intel_queue_rps_boost_for_request(work->flip_queued_req); 11693 lockmgr(&dev->event_lock, LK_RELEASE); 11694 } 11695 11696 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11697 struct drm_framebuffer *fb, 11698 struct drm_pending_vblank_event *event, 11699 uint32_t page_flip_flags) 11700 { 11701 struct drm_device *dev = crtc->dev; 11702 struct drm_i915_private *dev_priv = to_i915(dev); 11703 struct drm_framebuffer *old_fb = crtc->primary->fb; 11704 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11706 struct drm_plane *primary = crtc->primary; 11707 enum i915_pipe pipe = intel_crtc->pipe; 11708 struct intel_flip_work *work; 11709 struct intel_engine_cs *engine; 11710 bool mmio_flip; 11711 struct drm_i915_gem_request *request = NULL; 11712 int ret; 11713 11714 /* 11715 * drm_mode_page_flip_ioctl() should already catch this, but double 11716 * check to be safe. In the future we may enable pageflipping from 11717 * a disabled primary plane. 11718 */ 11719 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11720 return -EBUSY; 11721 11722 /* Can't change pixel format via MI display flips. */ 11723 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11724 return -EINVAL; 11725 11726 /* 11727 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11728 * Note that pitch changes could also affect these register. 11729 */ 11730 if (INTEL_INFO(dev)->gen > 3 && 11731 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11732 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11733 return -EINVAL; 11734 11735 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11736 goto out_hang; 11737 11738 work = kzalloc(sizeof(*work), GFP_KERNEL); 11739 if (work == NULL) 11740 return -ENOMEM; 11741 11742 work->event = event; 11743 work->crtc = crtc; 11744 work->old_fb = old_fb; 11745 INIT_WORK(&work->unpin_work, intel_unpin_work_fn); 11746 11747 ret = drm_crtc_vblank_get(crtc); 11748 if (ret) 11749 goto free_work; 11750 11751 /* We borrow the event spin lock for protecting flip_work */ 11752 spin_lock_irq(&dev->event_lock); 11753 if (intel_crtc->flip_work) { 11754 /* Before declaring the flip queue wedged, check if 11755 * the hardware completed the operation behind our backs. 11756 */ 11757 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) { 11758 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11759 page_flip_completed(intel_crtc); 11760 } else { 11761 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11762 spin_unlock_irq(&dev->event_lock); 11763 11764 drm_crtc_vblank_put(crtc); 11765 kfree(work); 11766 return -EBUSY; 11767 } 11768 } 11769 intel_crtc->flip_work = work; 11770 spin_unlock_irq(&dev->event_lock); 11771 11772 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11773 flush_workqueue(dev_priv->wq); 11774 11775 /* Reference the objects for the scheduled work. */ 11776 drm_framebuffer_reference(work->old_fb); 11777 drm_gem_object_reference(&obj->base); 11778 11779 crtc->primary->fb = fb; 11780 update_state_fb(crtc->primary); 11781 11782 intel_fbc_pre_update(intel_crtc, intel_crtc->config, 11783 to_intel_plane_state(primary->state)); 11784 11785 work->pending_flip_obj = obj; 11786 11787 ret = i915_mutex_lock_interruptible(dev); 11788 if (ret) 11789 goto cleanup; 11790 11791 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error); 11792 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) { 11793 ret = -EIO; 11794 goto unlock; 11795 } 11796 11797 atomic_inc(&intel_crtc->unpin_work_count); 11798 11799 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11800 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 11801 11802 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 11803 engine = &dev_priv->engine[BCS]; 11804 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 11805 /* vlv: DISPLAY_FLIP fails to change tiling */ 11806 engine = NULL; 11807 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11808 engine = &dev_priv->engine[BCS]; 11809 } else if (INTEL_INFO(dev)->gen >= 7) { 11810 engine = i915_gem_request_get_engine(obj->last_write_req); 11811 if (engine == NULL || engine->id != RCS) 11812 engine = &dev_priv->engine[BCS]; 11813 } else { 11814 engine = &dev_priv->engine[RCS]; 11815 } 11816 11817 mmio_flip = use_mmio_flip(engine, obj); 11818 11819 /* When using CS flips, we want to emit semaphores between rings. 11820 * However, when using mmio flips we will create a task to do the 11821 * synchronisation, so all we want here is to pin the framebuffer 11822 * into the display plane and skip any waits. 11823 */ 11824 if (!mmio_flip) { 11825 ret = i915_gem_object_sync(obj, engine, &request); 11826 if (!ret && !request) { 11827 request = i915_gem_request_alloc(engine, NULL); 11828 ret = PTR_ERR_OR_ZERO(request); 11829 } 11830 11831 if (ret) 11832 goto cleanup_pending; 11833 } 11834 11835 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 11836 if (ret) 11837 goto cleanup_pending; 11838 11839 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11840 obj, 0); 11841 work->gtt_offset += intel_crtc->dspaddr_offset; 11842 work->rotation = crtc->primary->state->rotation; 11843 11844 if (mmio_flip) { 11845 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func); 11846 11847 i915_gem_request_assign(&work->flip_queued_req, 11848 obj->last_write_req); 11849 11850 schedule_work(&work->mmio_work); 11851 } else { 11852 i915_gem_request_assign(&work->flip_queued_req, request); 11853 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11854 page_flip_flags); 11855 if (ret) 11856 goto cleanup_unpin; 11857 11858 intel_mark_page_flip_active(intel_crtc, work); 11859 11860 i915_add_request_no_flush(request); 11861 } 11862 11863 i915_gem_track_fb(intel_fb_obj(old_fb), obj, 11864 to_intel_plane(primary)->frontbuffer_bit); 11865 mutex_unlock(&dev->struct_mutex); 11866 11867 intel_frontbuffer_flip_prepare(dev, 11868 to_intel_plane(primary)->frontbuffer_bit); 11869 11870 trace_i915_flip_request(intel_crtc->plane, obj); 11871 11872 return 0; 11873 11874 cleanup_unpin: 11875 intel_unpin_fb_obj(fb, crtc->primary->state->rotation); 11876 cleanup_pending: 11877 if (!IS_ERR_OR_NULL(request)) 11878 i915_add_request_no_flush(request); 11879 atomic_dec(&intel_crtc->unpin_work_count); 11880 unlock: 11881 mutex_unlock(&dev->struct_mutex); 11882 cleanup: 11883 crtc->primary->fb = old_fb; 11884 update_state_fb(crtc->primary); 11885 11886 drm_gem_object_unreference_unlocked(&obj->base); 11887 drm_framebuffer_unreference(work->old_fb); 11888 11889 spin_lock_irq(&dev->event_lock); 11890 intel_crtc->flip_work = NULL; 11891 spin_unlock_irq(&dev->event_lock); 11892 11893 drm_crtc_vblank_put(crtc); 11894 free_work: 11895 kfree(work); 11896 11897 if (ret == -EIO) { 11898 struct drm_atomic_state *state; 11899 struct drm_plane_state *plane_state; 11900 11901 out_hang: 11902 state = drm_atomic_state_alloc(dev); 11903 if (!state) 11904 return -ENOMEM; 11905 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 11906 11907 retry: 11908 plane_state = drm_atomic_get_plane_state(state, primary); 11909 ret = PTR_ERR_OR_ZERO(plane_state); 11910 if (!ret) { 11911 drm_atomic_set_fb_for_plane(plane_state, fb); 11912 11913 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 11914 if (!ret) 11915 ret = drm_atomic_commit(state); 11916 } 11917 11918 if (ret == -EDEADLK) { 11919 drm_modeset_backoff(state->acquire_ctx); 11920 drm_atomic_state_clear(state); 11921 goto retry; 11922 } 11923 11924 if (ret) 11925 drm_atomic_state_free(state); 11926 11927 if (ret == 0 && event) { 11928 spin_lock_irq(&dev->event_lock); 11929 drm_crtc_send_vblank_event(crtc, event); 11930 spin_unlock_irq(&dev->event_lock); 11931 } 11932 } 11933 return ret; 11934 } 11935 11936 11937 /** 11938 * intel_wm_need_update - Check whether watermarks need updating 11939 * @plane: drm plane 11940 * @state: new plane state 11941 * 11942 * Check current plane state versus the new one to determine whether 11943 * watermarks need to be recalculated. 11944 * 11945 * Returns true or false. 11946 */ 11947 static bool intel_wm_need_update(struct drm_plane *plane, 11948 struct drm_plane_state *state) 11949 { 11950 struct intel_plane_state *new = to_intel_plane_state(state); 11951 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 11952 11953 /* Update watermarks on tiling or size changes. */ 11954 if (new->visible != cur->visible) 11955 return true; 11956 11957 if (!cur->base.fb || !new->base.fb) 11958 return false; 11959 11960 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || 11961 cur->base.rotation != new->base.rotation || 11962 drm_rect_width(&new->src) != drm_rect_width(&cur->src) || 11963 drm_rect_height(&new->src) != drm_rect_height(&cur->src) || 11964 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || 11965 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) 11966 return true; 11967 11968 return false; 11969 } 11970 11971 static bool needs_scaling(struct intel_plane_state *state) 11972 { 11973 int src_w = drm_rect_width(&state->src) >> 16; 11974 int src_h = drm_rect_height(&state->src) >> 16; 11975 int dst_w = drm_rect_width(&state->dst); 11976 int dst_h = drm_rect_height(&state->dst); 11977 11978 return (src_w != dst_w || src_h != dst_h); 11979 } 11980 11981 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11982 struct drm_plane_state *plane_state) 11983 { 11984 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 11985 struct drm_crtc *crtc = crtc_state->crtc; 11986 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11987 struct drm_plane *plane = plane_state->plane; 11988 struct drm_device *dev = crtc->dev; 11989 struct drm_i915_private *dev_priv = to_i915(dev); 11990 struct intel_plane_state *old_plane_state = 11991 to_intel_plane_state(plane->state); 11992 bool mode_changed = needs_modeset(crtc_state); 11993 bool was_crtc_enabled = crtc->state->active; 11994 bool is_crtc_enabled = crtc_state->active; 11995 bool turn_off, turn_on, visible, was_visible; 11996 struct drm_framebuffer *fb = plane_state->fb; 11997 int ret; 11998 11999 if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) { 12000 ret = skl_update_scaler_plane( 12001 to_intel_crtc_state(crtc_state), 12002 to_intel_plane_state(plane_state)); 12003 if (ret) 12004 return ret; 12005 } 12006 12007 was_visible = old_plane_state->visible; 12008 visible = to_intel_plane_state(plane_state)->visible; 12009 12010 if (!was_crtc_enabled && WARN_ON(was_visible)) 12011 was_visible = false; 12012 12013 /* 12014 * Visibility is calculated as if the crtc was on, but 12015 * after scaler setup everything depends on it being off 12016 * when the crtc isn't active. 12017 * 12018 * FIXME this is wrong for watermarks. Watermarks should also 12019 * be computed as if the pipe would be active. Perhaps move 12020 * per-plane wm computation to the .check_plane() hook, and 12021 * only combine the results from all planes in the current place? 12022 */ 12023 if (!is_crtc_enabled) 12024 to_intel_plane_state(plane_state)->visible = visible = false; 12025 12026 if (!was_visible && !visible) 12027 return 0; 12028 12029 if (fb != old_plane_state->base.fb) 12030 pipe_config->fb_changed = true; 12031 12032 turn_off = was_visible && (!visible || mode_changed); 12033 turn_on = visible && (!was_visible || mode_changed); 12034 12035 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", 12036 intel_crtc->base.base.id, 12037 intel_crtc->base.name, 12038 plane->base.id, plane->name, 12039 fb ? fb->base.id : -1); 12040 12041 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", 12042 plane->base.id, plane->name, 12043 was_visible, visible, 12044 turn_off, turn_on, mode_changed); 12045 12046 if (turn_on) { 12047 pipe_config->update_wm_pre = true; 12048 12049 /* must disable cxsr around plane enable/disable */ 12050 if (plane->type != DRM_PLANE_TYPE_CURSOR) 12051 pipe_config->disable_cxsr = true; 12052 } else if (turn_off) { 12053 pipe_config->update_wm_post = true; 12054 12055 /* must disable cxsr around plane enable/disable */ 12056 if (plane->type != DRM_PLANE_TYPE_CURSOR) 12057 pipe_config->disable_cxsr = true; 12058 } else if (intel_wm_need_update(plane, plane_state)) { 12059 /* FIXME bollocks */ 12060 pipe_config->update_wm_pre = true; 12061 pipe_config->update_wm_post = true; 12062 } 12063 12064 /* Pre-gen9 platforms need two-step watermark updates */ 12065 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) && 12066 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks) 12067 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true; 12068 12069 if (visible || was_visible) 12070 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit; 12071 12072 /* 12073 * WaCxSRDisabledForSpriteScaling:ivb 12074 * 12075 * cstate->update_wm was already set above, so this flag will 12076 * take effect when we commit and program watermarks. 12077 */ 12078 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) && 12079 needs_scaling(to_intel_plane_state(plane_state)) && 12080 !needs_scaling(old_plane_state)) 12081 pipe_config->disable_lp_wm = true; 12082 12083 return 0; 12084 } 12085 12086 static bool encoders_cloneable(const struct intel_encoder *a, 12087 const struct intel_encoder *b) 12088 { 12089 /* masks could be asymmetric, so check both ways */ 12090 return a == b || (a->cloneable & (1 << b->type) && 12091 b->cloneable & (1 << a->type)); 12092 } 12093 12094 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 12095 struct intel_crtc *crtc, 12096 struct intel_encoder *encoder) 12097 { 12098 struct intel_encoder *source_encoder; 12099 struct drm_connector *connector; 12100 struct drm_connector_state *connector_state; 12101 int i; 12102 12103 for_each_connector_in_state(state, connector, connector_state, i) { 12104 if (connector_state->crtc != &crtc->base) 12105 continue; 12106 12107 source_encoder = 12108 to_intel_encoder(connector_state->best_encoder); 12109 if (!encoders_cloneable(encoder, source_encoder)) 12110 return false; 12111 } 12112 12113 return true; 12114 } 12115 12116 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 12117 struct drm_crtc_state *crtc_state) 12118 { 12119 struct drm_device *dev = crtc->dev; 12120 struct drm_i915_private *dev_priv = to_i915(dev); 12121 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12122 struct intel_crtc_state *pipe_config = 12123 to_intel_crtc_state(crtc_state); 12124 struct drm_atomic_state *state = crtc_state->state; 12125 int ret; 12126 bool mode_changed = needs_modeset(crtc_state); 12127 12128 if (mode_changed && !crtc_state->active) 12129 pipe_config->update_wm_post = true; 12130 12131 if (mode_changed && crtc_state->enable && 12132 dev_priv->display.crtc_compute_clock && 12133 !WARN_ON(pipe_config->shared_dpll)) { 12134 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 12135 pipe_config); 12136 if (ret) 12137 return ret; 12138 } 12139 12140 if (crtc_state->color_mgmt_changed) { 12141 ret = intel_color_check(crtc, crtc_state); 12142 if (ret) 12143 return ret; 12144 12145 /* 12146 * Changing color management on Intel hardware is 12147 * handled as part of planes update. 12148 */ 12149 crtc_state->planes_changed = true; 12150 } 12151 12152 ret = 0; 12153 if (dev_priv->display.compute_pipe_wm) { 12154 ret = dev_priv->display.compute_pipe_wm(pipe_config); 12155 if (ret) { 12156 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); 12157 return ret; 12158 } 12159 } 12160 12161 if (dev_priv->display.compute_intermediate_wm && 12162 !to_intel_atomic_state(state)->skip_intermediate_wm) { 12163 if (WARN_ON(!dev_priv->display.compute_pipe_wm)) 12164 return 0; 12165 12166 /* 12167 * Calculate 'intermediate' watermarks that satisfy both the 12168 * old state and the new state. We can program these 12169 * immediately. 12170 */ 12171 ret = dev_priv->display.compute_intermediate_wm(crtc->dev, 12172 intel_crtc, 12173 pipe_config); 12174 if (ret) { 12175 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12176 return ret; 12177 } 12178 } else if (dev_priv->display.compute_intermediate_wm) { 12179 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12180 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal; 12181 } 12182 12183 if (INTEL_INFO(dev)->gen >= 9) { 12184 if (mode_changed) 12185 ret = skl_update_scaler_crtc(pipe_config); 12186 12187 if (!ret) 12188 ret = intel_atomic_setup_scalers(dev, intel_crtc, 12189 pipe_config); 12190 } 12191 12192 return ret; 12193 } 12194 12195 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 12196 .mode_set_base_atomic = intel_pipe_set_base_atomic, 12197 .atomic_begin = intel_begin_crtc_commit, 12198 .atomic_flush = intel_finish_crtc_commit, 12199 .atomic_check = intel_crtc_atomic_check, 12200 }; 12201 12202 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 12203 { 12204 struct intel_connector *connector; 12205 12206 for_each_intel_connector(dev, connector) { 12207 if (connector->base.state->crtc) 12208 drm_connector_unreference(&connector->base); 12209 12210 if (connector->base.encoder) { 12211 connector->base.state->best_encoder = 12212 connector->base.encoder; 12213 connector->base.state->crtc = 12214 connector->base.encoder->crtc; 12215 12216 drm_connector_reference(&connector->base); 12217 } else { 12218 connector->base.state->best_encoder = NULL; 12219 connector->base.state->crtc = NULL; 12220 } 12221 } 12222 } 12223 12224 static void 12225 connected_sink_compute_bpp(struct intel_connector *connector, 12226 struct intel_crtc_state *pipe_config) 12227 { 12228 int bpp = pipe_config->pipe_bpp; 12229 12230 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 12231 connector->base.base.id, 12232 connector->base.name); 12233 12234 /* Don't use an invalid EDID bpc value */ 12235 if (connector->base.display_info.bpc && 12236 connector->base.display_info.bpc * 3 < bpp) { 12237 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 12238 bpp, connector->base.display_info.bpc*3); 12239 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12240 } 12241 12242 /* Clamp bpp to 8 on screens without EDID 1.4 */ 12243 if (connector->base.display_info.bpc == 0 && bpp > 24) { 12244 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 12245 bpp); 12246 pipe_config->pipe_bpp = 24; 12247 } 12248 } 12249 12250 static int 12251 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12252 struct intel_crtc_state *pipe_config) 12253 { 12254 struct drm_device *dev = crtc->base.dev; 12255 struct drm_atomic_state *state; 12256 struct drm_connector *connector; 12257 struct drm_connector_state *connector_state; 12258 int bpp, i; 12259 12260 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 12261 bpp = 10*3; 12262 else if (INTEL_INFO(dev)->gen >= 5) 12263 bpp = 12*3; 12264 else 12265 bpp = 8*3; 12266 12267 12268 pipe_config->pipe_bpp = bpp; 12269 12270 state = pipe_config->base.state; 12271 12272 /* Clamp display bpp to EDID value */ 12273 for_each_connector_in_state(state, connector, connector_state, i) { 12274 if (connector_state->crtc != &crtc->base) 12275 continue; 12276 12277 connected_sink_compute_bpp(to_intel_connector(connector), 12278 pipe_config); 12279 } 12280 12281 return bpp; 12282 } 12283 12284 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12285 { 12286 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12287 "type: 0x%x flags: 0x%x\n", 12288 mode->crtc_clock, 12289 mode->crtc_hdisplay, mode->crtc_hsync_start, 12290 mode->crtc_hsync_end, mode->crtc_htotal, 12291 mode->crtc_vdisplay, mode->crtc_vsync_start, 12292 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 12293 } 12294 12295 static void intel_dump_pipe_config(struct intel_crtc *crtc, 12296 struct intel_crtc_state *pipe_config, 12297 const char *context) 12298 { 12299 struct drm_device *dev = crtc->base.dev; 12300 struct drm_plane *plane; 12301 struct intel_plane *intel_plane; 12302 struct intel_plane_state *state; 12303 struct drm_framebuffer *fb; 12304 12305 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n", 12306 crtc->base.base.id, crtc->base.name, 12307 context, pipe_config, pipe_name(crtc->pipe)); 12308 12309 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12310 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 12311 pipe_config->pipe_bpp, pipe_config->dither); 12312 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12313 pipe_config->has_pch_encoder, 12314 pipe_config->fdi_lanes, 12315 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 12316 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12317 pipe_config->fdi_m_n.tu); 12318 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12319 intel_crtc_has_dp_encoder(pipe_config), 12320 pipe_config->lane_count, 12321 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12322 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12323 pipe_config->dp_m_n.tu); 12324 12325 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12326 intel_crtc_has_dp_encoder(pipe_config), 12327 pipe_config->lane_count, 12328 pipe_config->dp_m2_n2.gmch_m, 12329 pipe_config->dp_m2_n2.gmch_n, 12330 pipe_config->dp_m2_n2.link_m, 12331 pipe_config->dp_m2_n2.link_n, 12332 pipe_config->dp_m2_n2.tu); 12333 12334 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 12335 pipe_config->has_audio, 12336 pipe_config->has_infoframe); 12337 12338 DRM_DEBUG_KMS("requested mode:\n"); 12339 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12340 DRM_DEBUG_KMS("adjusted mode:\n"); 12341 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12342 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12343 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 12344 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 12345 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 12346 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12347 crtc->num_scalers, 12348 pipe_config->scaler_state.scaler_users, 12349 pipe_config->scaler_state.scaler_id); 12350 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12351 pipe_config->gmch_pfit.control, 12352 pipe_config->gmch_pfit.pgm_ratios, 12353 pipe_config->gmch_pfit.lvds_border_bits); 12354 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 12355 pipe_config->pch_pfit.pos, 12356 pipe_config->pch_pfit.size, 12357 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 12358 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 12359 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 12360 12361 if (IS_BROXTON(dev)) { 12362 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 12363 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 12364 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 12365 pipe_config->ddi_pll_sel, 12366 pipe_config->dpll_hw_state.ebb0, 12367 pipe_config->dpll_hw_state.ebb4, 12368 pipe_config->dpll_hw_state.pll0, 12369 pipe_config->dpll_hw_state.pll1, 12370 pipe_config->dpll_hw_state.pll2, 12371 pipe_config->dpll_hw_state.pll3, 12372 pipe_config->dpll_hw_state.pll6, 12373 pipe_config->dpll_hw_state.pll8, 12374 pipe_config->dpll_hw_state.pll9, 12375 pipe_config->dpll_hw_state.pll10, 12376 pipe_config->dpll_hw_state.pcsdw12); 12377 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 12378 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12379 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12380 pipe_config->ddi_pll_sel, 12381 pipe_config->dpll_hw_state.ctrl1, 12382 pipe_config->dpll_hw_state.cfgcr1, 12383 pipe_config->dpll_hw_state.cfgcr2); 12384 } else if (HAS_DDI(dev)) { 12385 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 12386 pipe_config->ddi_pll_sel, 12387 pipe_config->dpll_hw_state.wrpll, 12388 pipe_config->dpll_hw_state.spll); 12389 } else { 12390 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12391 "fp0: 0x%x, fp1: 0x%x\n", 12392 pipe_config->dpll_hw_state.dpll, 12393 pipe_config->dpll_hw_state.dpll_md, 12394 pipe_config->dpll_hw_state.fp0, 12395 pipe_config->dpll_hw_state.fp1); 12396 } 12397 12398 DRM_DEBUG_KMS("planes on this crtc\n"); 12399 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 12400 intel_plane = to_intel_plane(plane); 12401 if (intel_plane->pipe != crtc->pipe) 12402 continue; 12403 12404 state = to_intel_plane_state(plane->state); 12405 fb = state->base.fb; 12406 if (!fb) { 12407 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n", 12408 plane->base.id, plane->name, state->scaler_id); 12409 continue; 12410 } 12411 12412 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled", 12413 plane->base.id, plane->name); 12414 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s", 12415 fb->base.id, fb->width, fb->height, 12416 drm_get_format_name(fb->pixel_format)); 12417 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", 12418 state->scaler_id, 12419 state->src.x1 >> 16, state->src.y1 >> 16, 12420 drm_rect_width(&state->src) >> 16, 12421 drm_rect_height(&state->src) >> 16, 12422 state->dst.x1, state->dst.y1, 12423 drm_rect_width(&state->dst), 12424 drm_rect_height(&state->dst)); 12425 } 12426 } 12427 12428 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12429 { 12430 struct drm_device *dev = state->dev; 12431 struct drm_connector *connector; 12432 unsigned int used_ports = 0; 12433 12434 /* 12435 * Walk the connector list instead of the encoder 12436 * list to detect the problem on ddi platforms 12437 * where there's just one encoder per digital port. 12438 */ 12439 drm_for_each_connector(connector, dev) { 12440 struct drm_connector_state *connector_state; 12441 struct intel_encoder *encoder; 12442 12443 connector_state = drm_atomic_get_existing_connector_state(state, connector); 12444 if (!connector_state) 12445 connector_state = connector->state; 12446 12447 if (!connector_state->best_encoder) 12448 continue; 12449 12450 encoder = to_intel_encoder(connector_state->best_encoder); 12451 12452 WARN_ON(!connector_state->crtc); 12453 12454 switch (encoder->type) { 12455 unsigned int port_mask; 12456 case INTEL_OUTPUT_UNKNOWN: 12457 if (WARN_ON(!HAS_DDI(dev))) 12458 break; 12459 case INTEL_OUTPUT_DP: 12460 case INTEL_OUTPUT_HDMI: 12461 case INTEL_OUTPUT_EDP: 12462 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12463 12464 /* the same port mustn't appear more than once */ 12465 if (used_ports & port_mask) 12466 return false; 12467 12468 used_ports |= port_mask; 12469 default: 12470 break; 12471 } 12472 } 12473 12474 return true; 12475 } 12476 12477 static void 12478 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12479 { 12480 struct drm_crtc_state tmp_state; 12481 struct intel_crtc_scaler_state scaler_state; 12482 struct intel_dpll_hw_state dpll_hw_state; 12483 struct intel_shared_dpll *shared_dpll; 12484 uint32_t ddi_pll_sel; 12485 bool force_thru; 12486 12487 /* FIXME: before the switch to atomic started, a new pipe_config was 12488 * kzalloc'd. Code that depends on any field being zero should be 12489 * fixed, so that the crtc_state can be safely duplicated. For now, 12490 * only fields that are know to not cause problems are preserved. */ 12491 12492 tmp_state = crtc_state->base; 12493 scaler_state = crtc_state->scaler_state; 12494 shared_dpll = crtc_state->shared_dpll; 12495 dpll_hw_state = crtc_state->dpll_hw_state; 12496 ddi_pll_sel = crtc_state->ddi_pll_sel; 12497 force_thru = crtc_state->pch_pfit.force_thru; 12498 12499 memset(crtc_state, 0, sizeof *crtc_state); 12500 12501 crtc_state->base = tmp_state; 12502 crtc_state->scaler_state = scaler_state; 12503 crtc_state->shared_dpll = shared_dpll; 12504 crtc_state->dpll_hw_state = dpll_hw_state; 12505 crtc_state->ddi_pll_sel = ddi_pll_sel; 12506 crtc_state->pch_pfit.force_thru = force_thru; 12507 } 12508 12509 static int 12510 intel_modeset_pipe_config(struct drm_crtc *crtc, 12511 struct intel_crtc_state *pipe_config) 12512 { 12513 struct drm_atomic_state *state = pipe_config->base.state; 12514 struct intel_encoder *encoder; 12515 struct drm_connector *connector; 12516 struct drm_connector_state *connector_state; 12517 int base_bpp, ret = -EINVAL; 12518 int i; 12519 bool retry = true; 12520 12521 clear_intel_crtc_state(pipe_config); 12522 12523 pipe_config->cpu_transcoder = 12524 (enum transcoder) to_intel_crtc(crtc)->pipe; 12525 12526 /* 12527 * Sanitize sync polarity flags based on requested ones. If neither 12528 * positive or negative polarity is requested, treat this as meaning 12529 * negative polarity. 12530 */ 12531 if (!(pipe_config->base.adjusted_mode.flags & 12532 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12533 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12534 12535 if (!(pipe_config->base.adjusted_mode.flags & 12536 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12537 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12538 12539 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12540 pipe_config); 12541 if (base_bpp < 0) 12542 goto fail; 12543 12544 /* 12545 * Determine the real pipe dimensions. Note that stereo modes can 12546 * increase the actual pipe size due to the frame doubling and 12547 * insertion of additional space for blanks between the frame. This 12548 * is stored in the crtc timings. We use the requested mode to do this 12549 * computation to clearly distinguish it from the adjusted mode, which 12550 * can be changed by the connectors in the below retry loop. 12551 */ 12552 drm_crtc_get_hv_timing(&pipe_config->base.mode, 12553 &pipe_config->pipe_src_w, 12554 &pipe_config->pipe_src_h); 12555 12556 for_each_connector_in_state(state, connector, connector_state, i) { 12557 if (connector_state->crtc != crtc) 12558 continue; 12559 12560 encoder = to_intel_encoder(connector_state->best_encoder); 12561 12562 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) { 12563 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 12564 goto fail; 12565 } 12566 12567 /* 12568 * Determine output_types before calling the .compute_config() 12569 * hooks so that the hooks can use this information safely. 12570 */ 12571 pipe_config->output_types |= 1 << encoder->type; 12572 } 12573 12574 encoder_retry: 12575 /* Ensure the port clock defaults are reset when retrying. */ 12576 pipe_config->port_clock = 0; 12577 pipe_config->pixel_multiplier = 1; 12578 12579 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12580 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12581 CRTC_STEREO_DOUBLE); 12582 12583 /* Pass our mode to the connectors and the CRTC to give them a chance to 12584 * adjust it according to limitations or connector properties, and also 12585 * a chance to reject the mode entirely. 12586 */ 12587 for_each_connector_in_state(state, connector, connector_state, i) { 12588 if (connector_state->crtc != crtc) 12589 continue; 12590 12591 encoder = to_intel_encoder(connector_state->best_encoder); 12592 12593 if (!(encoder->compute_config(encoder, pipe_config))) { 12594 DRM_DEBUG_KMS("Encoder config failure\n"); 12595 goto fail; 12596 } 12597 } 12598 12599 /* Set default port clock if not overwritten by the encoder. Needs to be 12600 * done afterwards in case the encoder adjusts the mode. */ 12601 if (!pipe_config->port_clock) 12602 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12603 * pipe_config->pixel_multiplier; 12604 12605 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12606 if (ret < 0) { 12607 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12608 goto fail; 12609 } 12610 12611 if (ret == RETRY) { 12612 if (WARN(!retry, "loop in pipe configuration computation\n")) { 12613 ret = -EINVAL; 12614 goto fail; 12615 } 12616 12617 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12618 retry = false; 12619 goto encoder_retry; 12620 } 12621 12622 /* Dithering seems to not pass-through bits correctly when it should, so 12623 * only enable it on 6bpc panels. */ 12624 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12625 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12626 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12627 12628 fail: 12629 return ret; 12630 } 12631 12632 static void 12633 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 12634 { 12635 struct drm_crtc *crtc; 12636 struct drm_crtc_state *crtc_state; 12637 int i; 12638 12639 /* Double check state. */ 12640 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12641 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); 12642 12643 /* Update hwmode for vblank functions */ 12644 if (crtc->state->active) 12645 crtc->hwmode = crtc->state->adjusted_mode; 12646 else 12647 crtc->hwmode.crtc_clock = 0; 12648 12649 /* 12650 * Update legacy state to satisfy fbc code. This can 12651 * be removed when fbc uses the atomic state. 12652 */ 12653 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12654 struct drm_plane_state *plane_state = crtc->primary->state; 12655 12656 crtc->primary->fb = plane_state->fb; 12657 crtc->x = plane_state->src_x >> 16; 12658 crtc->y = plane_state->src_y >> 16; 12659 } 12660 } 12661 } 12662 12663 static bool intel_fuzzy_clock_check(int clock1, int clock2) 12664 { 12665 int diff; 12666 12667 if (clock1 == clock2) 12668 return true; 12669 12670 if (!clock1 || !clock2) 12671 return false; 12672 12673 diff = abs(clock1 - clock2); 12674 12675 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12676 return true; 12677 12678 return false; 12679 } 12680 12681 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 12682 list_for_each_entry((intel_crtc), \ 12683 &(dev)->mode_config.crtc_list, \ 12684 base.head) \ 12685 for_each_if (mask & (1 <<(intel_crtc)->pipe)) 12686 12687 static bool 12688 intel_compare_m_n(unsigned int m, unsigned int n, 12689 unsigned int m2, unsigned int n2, 12690 bool exact) 12691 { 12692 if (m == m2 && n == n2) 12693 return true; 12694 12695 if (exact || !m || !n || !m2 || !n2) 12696 return false; 12697 12698 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12699 12700 if (n > n2) { 12701 while (n > n2) { 12702 m2 <<= 1; 12703 n2 <<= 1; 12704 } 12705 } else if (n < n2) { 12706 while (n < n2) { 12707 m <<= 1; 12708 n <<= 1; 12709 } 12710 } 12711 12712 if (n != n2) 12713 return false; 12714 12715 return intel_fuzzy_clock_check(m, m2); 12716 } 12717 12718 static bool 12719 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12720 struct intel_link_m_n *m2_n2, 12721 bool adjust) 12722 { 12723 if (m_n->tu == m2_n2->tu && 12724 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12725 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 12726 intel_compare_m_n(m_n->link_m, m_n->link_n, 12727 m2_n2->link_m, m2_n2->link_n, !adjust)) { 12728 if (adjust) 12729 *m2_n2 = *m_n; 12730 12731 return true; 12732 } 12733 12734 return false; 12735 } 12736 12737 static bool 12738 intel_pipe_config_compare(struct drm_device *dev, 12739 struct intel_crtc_state *current_config, 12740 struct intel_crtc_state *pipe_config, 12741 bool adjust) 12742 { 12743 bool ret = true; 12744 12745 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ 12746 do { \ 12747 if (!adjust) \ 12748 DRM_ERROR(fmt, ##__VA_ARGS__); \ 12749 else \ 12750 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ 12751 } while (0) 12752 12753 #define PIPE_CONF_CHECK_X(name) \ 12754 if (current_config->name != pipe_config->name) { \ 12755 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12756 "(expected 0x%08x, found 0x%08x)\n", \ 12757 current_config->name, \ 12758 pipe_config->name); \ 12759 ret = false; \ 12760 } 12761 12762 #define PIPE_CONF_CHECK_I(name) \ 12763 if (current_config->name != pipe_config->name) { \ 12764 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12765 "(expected %i, found %i)\n", \ 12766 current_config->name, \ 12767 pipe_config->name); \ 12768 ret = false; \ 12769 } 12770 12771 #define PIPE_CONF_CHECK_P(name) \ 12772 if (current_config->name != pipe_config->name) { \ 12773 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12774 "(expected %p, found %p)\n", \ 12775 current_config->name, \ 12776 pipe_config->name); \ 12777 ret = false; \ 12778 } 12779 12780 #define PIPE_CONF_CHECK_M_N(name) \ 12781 if (!intel_compare_link_m_n(¤t_config->name, \ 12782 &pipe_config->name,\ 12783 adjust)) { \ 12784 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12785 "(expected tu %i gmch %i/%i link %i/%i, " \ 12786 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12787 current_config->name.tu, \ 12788 current_config->name.gmch_m, \ 12789 current_config->name.gmch_n, \ 12790 current_config->name.link_m, \ 12791 current_config->name.link_n, \ 12792 pipe_config->name.tu, \ 12793 pipe_config->name.gmch_m, \ 12794 pipe_config->name.gmch_n, \ 12795 pipe_config->name.link_m, \ 12796 pipe_config->name.link_n); \ 12797 ret = false; \ 12798 } 12799 12800 /* This is required for BDW+ where there is only one set of registers for 12801 * switching between high and low RR. 12802 * This macro can be used whenever a comparison has to be made between one 12803 * hw state and multiple sw state variables. 12804 */ 12805 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 12806 if (!intel_compare_link_m_n(¤t_config->name, \ 12807 &pipe_config->name, adjust) && \ 12808 !intel_compare_link_m_n(¤t_config->alt_name, \ 12809 &pipe_config->name, adjust)) { \ 12810 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12811 "(expected tu %i gmch %i/%i link %i/%i, " \ 12812 "or tu %i gmch %i/%i link %i/%i, " \ 12813 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12814 current_config->name.tu, \ 12815 current_config->name.gmch_m, \ 12816 current_config->name.gmch_n, \ 12817 current_config->name.link_m, \ 12818 current_config->name.link_n, \ 12819 current_config->alt_name.tu, \ 12820 current_config->alt_name.gmch_m, \ 12821 current_config->alt_name.gmch_n, \ 12822 current_config->alt_name.link_m, \ 12823 current_config->alt_name.link_n, \ 12824 pipe_config->name.tu, \ 12825 pipe_config->name.gmch_m, \ 12826 pipe_config->name.gmch_n, \ 12827 pipe_config->name.link_m, \ 12828 pipe_config->name.link_n); \ 12829 ret = false; \ 12830 } 12831 12832 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 12833 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12834 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ 12835 "(expected %i, found %i)\n", \ 12836 current_config->name & (mask), \ 12837 pipe_config->name & (mask)); \ 12838 ret = false; \ 12839 } 12840 12841 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 12842 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12843 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12844 "(expected %i, found %i)\n", \ 12845 current_config->name, \ 12846 pipe_config->name); \ 12847 ret = false; \ 12848 } 12849 12850 #define PIPE_CONF_QUIRK(quirk) \ 12851 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12852 12853 PIPE_CONF_CHECK_I(cpu_transcoder); 12854 12855 PIPE_CONF_CHECK_I(has_pch_encoder); 12856 PIPE_CONF_CHECK_I(fdi_lanes); 12857 PIPE_CONF_CHECK_M_N(fdi_m_n); 12858 12859 PIPE_CONF_CHECK_I(lane_count); 12860 PIPE_CONF_CHECK_X(lane_lat_optim_mask); 12861 12862 if (INTEL_INFO(dev)->gen < 8) { 12863 PIPE_CONF_CHECK_M_N(dp_m_n); 12864 12865 if (current_config->has_drrs) 12866 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12867 } else 12868 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12869 12870 PIPE_CONF_CHECK_X(output_types); 12871 12872 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12873 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12874 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12875 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12876 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12877 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12878 12879 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12880 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12881 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12882 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12883 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12884 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12885 12886 PIPE_CONF_CHECK_I(pixel_multiplier); 12887 PIPE_CONF_CHECK_I(has_hdmi_sink); 12888 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12889 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 12890 PIPE_CONF_CHECK_I(limited_color_range); 12891 PIPE_CONF_CHECK_I(has_infoframe); 12892 12893 PIPE_CONF_CHECK_I(has_audio); 12894 12895 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12896 DRM_MODE_FLAG_INTERLACE); 12897 12898 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12899 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12900 DRM_MODE_FLAG_PHSYNC); 12901 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12902 DRM_MODE_FLAG_NHSYNC); 12903 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12904 DRM_MODE_FLAG_PVSYNC); 12905 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12906 DRM_MODE_FLAG_NVSYNC); 12907 } 12908 12909 PIPE_CONF_CHECK_X(gmch_pfit.control); 12910 /* pfit ratios are autocomputed by the hw on gen4+ */ 12911 if (INTEL_INFO(dev)->gen < 4) 12912 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios); 12913 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12914 12915 if (!adjust) { 12916 PIPE_CONF_CHECK_I(pipe_src_w); 12917 PIPE_CONF_CHECK_I(pipe_src_h); 12918 12919 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12920 if (current_config->pch_pfit.enabled) { 12921 PIPE_CONF_CHECK_X(pch_pfit.pos); 12922 PIPE_CONF_CHECK_X(pch_pfit.size); 12923 } 12924 12925 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12926 } 12927 12928 /* BDW+ don't expose a synchronous way to read the state */ 12929 if (IS_HASWELL(dev)) 12930 PIPE_CONF_CHECK_I(ips_enabled); 12931 12932 PIPE_CONF_CHECK_I(double_wide); 12933 12934 PIPE_CONF_CHECK_X(ddi_pll_sel); 12935 12936 PIPE_CONF_CHECK_P(shared_dpll); 12937 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12938 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12939 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12940 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12941 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12942 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12943 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12944 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12945 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12946 12947 PIPE_CONF_CHECK_X(dsi_pll.ctrl); 12948 PIPE_CONF_CHECK_X(dsi_pll.div); 12949 12950 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12951 PIPE_CONF_CHECK_I(pipe_bpp); 12952 12953 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12954 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12955 12956 #undef PIPE_CONF_CHECK_X 12957 #undef PIPE_CONF_CHECK_I 12958 #undef PIPE_CONF_CHECK_P 12959 #undef PIPE_CONF_CHECK_FLAGS 12960 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12961 #undef PIPE_CONF_QUIRK 12962 #undef INTEL_ERR_OR_DBG_KMS 12963 12964 return ret; 12965 } 12966 12967 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv, 12968 const struct intel_crtc_state *pipe_config) 12969 { 12970 if (pipe_config->has_pch_encoder) { 12971 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config), 12972 &pipe_config->fdi_m_n); 12973 int dotclock = pipe_config->base.adjusted_mode.crtc_clock; 12974 12975 /* 12976 * FDI already provided one idea for the dotclock. 12977 * Yell if the encoder disagrees. 12978 */ 12979 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock), 12980 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12981 fdi_dotclock, dotclock); 12982 } 12983 } 12984 12985 static void verify_wm_state(struct drm_crtc *crtc, 12986 struct drm_crtc_state *new_state) 12987 { 12988 struct drm_device *dev = crtc->dev; 12989 struct drm_i915_private *dev_priv = to_i915(dev); 12990 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12991 struct skl_ddb_entry *hw_entry, *sw_entry; 12992 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12993 const enum i915_pipe pipe = intel_crtc->pipe; 12994 int plane; 12995 12996 if (INTEL_INFO(dev)->gen < 9 || !new_state->active) 12997 return; 12998 12999 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 13000 sw_ddb = &dev_priv->wm.skl_hw.ddb; 13001 13002 /* planes */ 13003 for_each_plane(dev_priv, pipe, plane) { 13004 hw_entry = &hw_ddb.plane[pipe][plane]; 13005 sw_entry = &sw_ddb->plane[pipe][plane]; 13006 13007 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 13008 continue; 13009 13010 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 13011 "(expected (%u,%u), found (%u,%u))\n", 13012 pipe_name(pipe), plane + 1, 13013 sw_entry->start, sw_entry->end, 13014 hw_entry->start, hw_entry->end); 13015 } 13016 13017 /* 13018 * cursor 13019 * If the cursor plane isn't active, we may not have updated it's ddb 13020 * allocation. In that case since the ddb allocation will be updated 13021 * once the plane becomes visible, we can skip this check 13022 */ 13023 if (intel_crtc->cursor_addr) { 13024 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 13025 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 13026 13027 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) { 13028 DRM_ERROR("mismatch in DDB state pipe %c cursor " 13029 "(expected (%u,%u), found (%u,%u))\n", 13030 pipe_name(pipe), 13031 sw_entry->start, sw_entry->end, 13032 hw_entry->start, hw_entry->end); 13033 } 13034 } 13035 } 13036 13037 static void 13038 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc) 13039 { 13040 struct drm_connector *connector; 13041 13042 drm_for_each_connector(connector, dev) { 13043 struct drm_encoder *encoder = connector->encoder; 13044 struct drm_connector_state *state = connector->state; 13045 13046 if (state->crtc != crtc) 13047 continue; 13048 13049 intel_connector_verify_state(to_intel_connector(connector)); 13050 13051 I915_STATE_WARN(state->best_encoder != encoder, 13052 "connector's atomic encoder doesn't match legacy encoder\n"); 13053 } 13054 } 13055 13056 static void 13057 verify_encoder_state(struct drm_device *dev) 13058 { 13059 struct intel_encoder *encoder; 13060 struct intel_connector *connector; 13061 13062 for_each_intel_encoder(dev, encoder) { 13063 bool enabled = false; 13064 enum i915_pipe pipe; 13065 13066 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 13067 encoder->base.base.id, 13068 encoder->base.name); 13069 13070 for_each_intel_connector(dev, connector) { 13071 if (connector->base.state->best_encoder != &encoder->base) 13072 continue; 13073 enabled = true; 13074 13075 I915_STATE_WARN(connector->base.state->crtc != 13076 encoder->base.crtc, 13077 "connector's crtc doesn't match encoder crtc\n"); 13078 } 13079 13080 I915_STATE_WARN(!!encoder->base.crtc != enabled, 13081 "encoder's enabled state mismatch " 13082 "(expected %i, found %i)\n", 13083 !!encoder->base.crtc, enabled); 13084 13085 if (!encoder->base.crtc) { 13086 bool active; 13087 13088 active = encoder->get_hw_state(encoder, &pipe); 13089 I915_STATE_WARN(active, 13090 "encoder detached but still enabled on pipe %c.\n", 13091 pipe_name(pipe)); 13092 } 13093 } 13094 } 13095 13096 static void 13097 verify_crtc_state(struct drm_crtc *crtc, 13098 struct drm_crtc_state *old_crtc_state, 13099 struct drm_crtc_state *new_crtc_state) 13100 { 13101 struct drm_device *dev = crtc->dev; 13102 struct drm_i915_private *dev_priv = to_i915(dev); 13103 struct intel_encoder *encoder; 13104 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13105 struct intel_crtc_state *pipe_config, *sw_config; 13106 struct drm_atomic_state *old_state; 13107 bool active; 13108 13109 old_state = old_crtc_state->state; 13110 __drm_atomic_helper_crtc_destroy_state(old_crtc_state); 13111 pipe_config = to_intel_crtc_state(old_crtc_state); 13112 memset(pipe_config, 0, sizeof(*pipe_config)); 13113 pipe_config->base.crtc = crtc; 13114 pipe_config->base.state = old_state; 13115 13116 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name); 13117 13118 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 13119 13120 /* hw state is inconsistent with the pipe quirk */ 13121 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 13122 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 13123 active = new_crtc_state->active; 13124 13125 I915_STATE_WARN(new_crtc_state->active != active, 13126 "crtc active state doesn't match with hw state " 13127 "(expected %i, found %i)\n", new_crtc_state->active, active); 13128 13129 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active, 13130 "transitional active state does not match atomic hw state " 13131 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active); 13132 13133 for_each_encoder_on_crtc(dev, crtc, encoder) { 13134 enum i915_pipe pipe; 13135 13136 active = encoder->get_hw_state(encoder, &pipe); 13137 I915_STATE_WARN(active != new_crtc_state->active, 13138 "[ENCODER:%i] active %i with crtc active %i\n", 13139 encoder->base.base.id, active, new_crtc_state->active); 13140 13141 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 13142 "Encoder connected to wrong pipe %c\n", 13143 pipe_name(pipe)); 13144 13145 if (active) { 13146 pipe_config->output_types |= 1 << encoder->type; 13147 encoder->get_config(encoder, pipe_config); 13148 } 13149 } 13150 13151 if (!new_crtc_state->active) 13152 return; 13153 13154 intel_pipe_config_sanity_check(dev_priv, pipe_config); 13155 13156 sw_config = to_intel_crtc_state(crtc->state); 13157 if (!intel_pipe_config_compare(dev, sw_config, 13158 pipe_config, false)) { 13159 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 13160 intel_dump_pipe_config(intel_crtc, pipe_config, 13161 "[hw state]"); 13162 intel_dump_pipe_config(intel_crtc, sw_config, 13163 "[sw state]"); 13164 } 13165 } 13166 13167 static void 13168 verify_single_dpll_state(struct drm_i915_private *dev_priv, 13169 struct intel_shared_dpll *pll, 13170 struct drm_crtc *crtc, 13171 struct drm_crtc_state *new_state) 13172 { 13173 struct intel_dpll_hw_state dpll_hw_state; 13174 unsigned crtc_mask; 13175 bool active; 13176 13177 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 13178 13179 DRM_DEBUG_KMS("%s\n", pll->name); 13180 13181 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state); 13182 13183 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) { 13184 I915_STATE_WARN(!pll->on && pll->active_mask, 13185 "pll in active use but not on in sw tracking\n"); 13186 I915_STATE_WARN(pll->on && !pll->active_mask, 13187 "pll is on but not used by any active crtc\n"); 13188 I915_STATE_WARN(pll->on != active, 13189 "pll on state mismatch (expected %i, found %i)\n", 13190 pll->on, active); 13191 } 13192 13193 if (!crtc) { 13194 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask, 13195 "more active pll users than references: %x vs %x\n", 13196 pll->active_mask, pll->config.crtc_mask); 13197 13198 return; 13199 } 13200 13201 crtc_mask = 1 << drm_crtc_index(crtc); 13202 13203 if (new_state->active) 13204 I915_STATE_WARN(!(pll->active_mask & crtc_mask), 13205 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n", 13206 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 13207 else 13208 I915_STATE_WARN(pll->active_mask & crtc_mask, 13209 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n", 13210 pipe_name(drm_crtc_index(crtc)), pll->active_mask); 13211 13212 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask), 13213 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n", 13214 crtc_mask, pll->config.crtc_mask); 13215 13216 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, 13217 &dpll_hw_state, 13218 sizeof(dpll_hw_state)), 13219 "pll hw state mismatch\n"); 13220 } 13221 13222 static void 13223 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc, 13224 struct drm_crtc_state *old_crtc_state, 13225 struct drm_crtc_state *new_crtc_state) 13226 { 13227 struct drm_i915_private *dev_priv = to_i915(dev); 13228 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 13229 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 13230 13231 if (new_state->shared_dpll) 13232 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state); 13233 13234 if (old_state->shared_dpll && 13235 old_state->shared_dpll != new_state->shared_dpll) { 13236 unsigned crtc_mask = 1 << drm_crtc_index(crtc); 13237 struct intel_shared_dpll *pll = old_state->shared_dpll; 13238 13239 I915_STATE_WARN(pll->active_mask & crtc_mask, 13240 "pll active mismatch (didn't expect pipe %c in active mask)\n", 13241 pipe_name(drm_crtc_index(crtc))); 13242 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask, 13243 "pll enabled crtcs mismatch (found %x in enabled mask)\n", 13244 pipe_name(drm_crtc_index(crtc))); 13245 } 13246 } 13247 13248 static void 13249 intel_modeset_verify_crtc(struct drm_crtc *crtc, 13250 struct drm_crtc_state *old_state, 13251 struct drm_crtc_state *new_state) 13252 { 13253 if (!needs_modeset(new_state) && 13254 !to_intel_crtc_state(new_state)->update_pipe) 13255 return; 13256 13257 verify_wm_state(crtc, new_state); 13258 verify_connector_state(crtc->dev, crtc); 13259 verify_crtc_state(crtc, old_state, new_state); 13260 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state); 13261 } 13262 13263 static void 13264 verify_disabled_dpll_state(struct drm_device *dev) 13265 { 13266 struct drm_i915_private *dev_priv = to_i915(dev); 13267 int i; 13268 13269 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13270 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL); 13271 } 13272 13273 static void 13274 intel_modeset_verify_disabled(struct drm_device *dev) 13275 { 13276 verify_encoder_state(dev); 13277 verify_connector_state(dev, NULL); 13278 verify_disabled_dpll_state(dev); 13279 } 13280 13281 static void update_scanline_offset(struct intel_crtc *crtc) 13282 { 13283 struct drm_device *dev = crtc->base.dev; 13284 13285 /* 13286 * The scanline counter increments at the leading edge of hsync. 13287 * 13288 * On most platforms it starts counting from vtotal-1 on the 13289 * first active line. That means the scanline counter value is 13290 * always one less than what we would expect. Ie. just after 13291 * start of vblank, which also occurs at start of hsync (on the 13292 * last active line), the scanline counter will read vblank_start-1. 13293 * 13294 * On gen2 the scanline counter starts counting from 1 instead 13295 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13296 * to keep the value positive), instead of adding one. 13297 * 13298 * On HSW+ the behaviour of the scanline counter depends on the output 13299 * type. For DP ports it behaves like most other platforms, but on HDMI 13300 * there's an extra 1 line difference. So we need to add two instead of 13301 * one to the value. 13302 */ 13303 if (IS_GEN2(dev)) { 13304 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 13305 int vtotal; 13306 13307 vtotal = adjusted_mode->crtc_vtotal; 13308 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13309 vtotal /= 2; 13310 13311 crtc->scanline_offset = vtotal - 1; 13312 } else if (HAS_DDI(dev) && 13313 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) { 13314 crtc->scanline_offset = 2; 13315 } else 13316 crtc->scanline_offset = 1; 13317 } 13318 13319 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 13320 { 13321 struct drm_device *dev = state->dev; 13322 struct drm_i915_private *dev_priv = to_i915(dev); 13323 struct intel_shared_dpll_config *shared_dpll = NULL; 13324 struct drm_crtc *crtc; 13325 struct drm_crtc_state *crtc_state; 13326 int i; 13327 13328 if (!dev_priv->display.crtc_compute_clock) 13329 return; 13330 13331 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13333 struct intel_shared_dpll *old_dpll = 13334 to_intel_crtc_state(crtc->state)->shared_dpll; 13335 13336 if (!needs_modeset(crtc_state)) 13337 continue; 13338 13339 to_intel_crtc_state(crtc_state)->shared_dpll = NULL; 13340 13341 if (!old_dpll) 13342 continue; 13343 13344 if (!shared_dpll) 13345 shared_dpll = intel_atomic_get_shared_dpll_state(state); 13346 13347 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc); 13348 } 13349 } 13350 13351 /* 13352 * This implements the workaround described in the "notes" section of the mode 13353 * set sequence documentation. When going from no pipes or single pipe to 13354 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13355 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13356 */ 13357 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 13358 { 13359 struct drm_crtc_state *crtc_state; 13360 struct intel_crtc *intel_crtc; 13361 struct drm_crtc *crtc; 13362 struct intel_crtc_state *first_crtc_state = NULL; 13363 struct intel_crtc_state *other_crtc_state = NULL; 13364 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13365 int i; 13366 13367 /* look at all crtc's that are going to be enabled in during modeset */ 13368 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13369 intel_crtc = to_intel_crtc(crtc); 13370 13371 if (!crtc_state->active || !needs_modeset(crtc_state)) 13372 continue; 13373 13374 if (first_crtc_state) { 13375 other_crtc_state = to_intel_crtc_state(crtc_state); 13376 break; 13377 } else { 13378 first_crtc_state = to_intel_crtc_state(crtc_state); 13379 first_pipe = intel_crtc->pipe; 13380 } 13381 } 13382 13383 /* No workaround needed? */ 13384 if (!first_crtc_state) 13385 return 0; 13386 13387 /* w/a possibly needed, check how many crtc's are already enabled. */ 13388 for_each_intel_crtc(state->dev, intel_crtc) { 13389 struct intel_crtc_state *pipe_config; 13390 13391 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 13392 if (IS_ERR(pipe_config)) 13393 return PTR_ERR(pipe_config); 13394 13395 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 13396 13397 if (!pipe_config->base.active || 13398 needs_modeset(&pipe_config->base)) 13399 continue; 13400 13401 /* 2 or more enabled crtcs means no need for w/a */ 13402 if (enabled_pipe != INVALID_PIPE) 13403 return 0; 13404 13405 enabled_pipe = intel_crtc->pipe; 13406 } 13407 13408 if (enabled_pipe != INVALID_PIPE) 13409 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13410 else if (other_crtc_state) 13411 other_crtc_state->hsw_workaround_pipe = first_pipe; 13412 13413 return 0; 13414 } 13415 13416 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 13417 { 13418 struct drm_crtc *crtc; 13419 struct drm_crtc_state *crtc_state; 13420 int ret = 0; 13421 13422 /* add all active pipes to the state */ 13423 for_each_crtc(state->dev, crtc) { 13424 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13425 if (IS_ERR(crtc_state)) 13426 return PTR_ERR(crtc_state); 13427 13428 if (!crtc_state->active || needs_modeset(crtc_state)) 13429 continue; 13430 13431 crtc_state->mode_changed = true; 13432 13433 ret = drm_atomic_add_affected_connectors(state, crtc); 13434 if (ret) 13435 break; 13436 13437 ret = drm_atomic_add_affected_planes(state, crtc); 13438 if (ret) 13439 break; 13440 } 13441 13442 return ret; 13443 } 13444 13445 static int intel_modeset_checks(struct drm_atomic_state *state) 13446 { 13447 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13448 struct drm_i915_private *dev_priv = to_i915(state->dev); 13449 struct drm_crtc *crtc; 13450 struct drm_crtc_state *crtc_state; 13451 int ret = 0, i; 13452 13453 if (!check_digital_port_conflicts(state)) { 13454 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13455 return -EINVAL; 13456 } 13457 13458 intel_state->modeset = true; 13459 intel_state->active_crtcs = dev_priv->active_crtcs; 13460 13461 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13462 if (crtc_state->active) 13463 intel_state->active_crtcs |= 1 << i; 13464 else 13465 intel_state->active_crtcs &= ~(1 << i); 13466 13467 if (crtc_state->active != crtc->state->active) 13468 intel_state->active_pipe_changes |= drm_crtc_mask(crtc); 13469 } 13470 13471 /* 13472 * See if the config requires any additional preparation, e.g. 13473 * to adjust global state with pipes off. We need to do this 13474 * here so we can get the modeset_pipe updated config for the new 13475 * mode set on this crtc. For other crtcs we need to use the 13476 * adjusted_mode bits in the crtc directly. 13477 */ 13478 if (dev_priv->display.modeset_calc_cdclk) { 13479 if (!intel_state->cdclk_pll_vco) 13480 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco; 13481 if (!intel_state->cdclk_pll_vco) 13482 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq; 13483 13484 ret = dev_priv->display.modeset_calc_cdclk(state); 13485 if (ret < 0) 13486 return ret; 13487 13488 if (intel_state->dev_cdclk != dev_priv->cdclk_freq || 13489 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco) 13490 ret = intel_modeset_all_pipes(state); 13491 13492 if (ret < 0) 13493 return ret; 13494 13495 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", 13496 intel_state->cdclk, intel_state->dev_cdclk); 13497 } else { 13498 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; 13499 } 13500 13501 intel_modeset_clear_plls(state); 13502 13503 if (IS_HASWELL(dev_priv)) 13504 return haswell_mode_set_planes_workaround(state); 13505 13506 return 0; 13507 } 13508 13509 /* 13510 * Handle calculation of various watermark data at the end of the atomic check 13511 * phase. The code here should be run after the per-crtc and per-plane 'check' 13512 * handlers to ensure that all derived state has been updated. 13513 */ 13514 static int calc_watermark_data(struct drm_atomic_state *state) 13515 { 13516 struct drm_device *dev = state->dev; 13517 struct drm_i915_private *dev_priv = to_i915(dev); 13518 13519 /* Is there platform-specific watermark information to calculate? */ 13520 if (dev_priv->display.compute_global_watermarks) 13521 return dev_priv->display.compute_global_watermarks(state); 13522 13523 return 0; 13524 } 13525 13526 /** 13527 * intel_atomic_check - validate state object 13528 * @dev: drm device 13529 * @state: state to validate 13530 */ 13531 static int intel_atomic_check(struct drm_device *dev, 13532 struct drm_atomic_state *state) 13533 { 13534 struct drm_i915_private *dev_priv = to_i915(dev); 13535 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13536 struct drm_crtc *crtc; 13537 struct drm_crtc_state *crtc_state; 13538 int ret, i; 13539 bool any_ms = false; 13540 13541 ret = drm_atomic_helper_check_modeset(dev, state); 13542 if (ret) 13543 return ret; 13544 13545 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13546 struct intel_crtc_state *pipe_config = 13547 to_intel_crtc_state(crtc_state); 13548 13549 /* Catch I915_MODE_FLAG_INHERITED */ 13550 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13551 crtc_state->mode_changed = true; 13552 13553 if (!needs_modeset(crtc_state)) 13554 continue; 13555 13556 if (!crtc_state->enable) { 13557 any_ms = true; 13558 continue; 13559 } 13560 13561 /* FIXME: For only active_changed we shouldn't need to do any 13562 * state recomputation at all. */ 13563 13564 ret = drm_atomic_add_affected_connectors(state, crtc); 13565 if (ret) 13566 return ret; 13567 13568 ret = intel_modeset_pipe_config(crtc, pipe_config); 13569 if (ret) { 13570 intel_dump_pipe_config(to_intel_crtc(crtc), 13571 pipe_config, "[failed]"); 13572 return ret; 13573 } 13574 13575 if (i915.fastboot && 13576 intel_pipe_config_compare(dev, 13577 to_intel_crtc_state(crtc->state), 13578 pipe_config, true)) { 13579 crtc_state->mode_changed = false; 13580 to_intel_crtc_state(crtc_state)->update_pipe = true; 13581 } 13582 13583 if (needs_modeset(crtc_state)) 13584 any_ms = true; 13585 13586 ret = drm_atomic_add_affected_planes(state, crtc); 13587 if (ret) 13588 return ret; 13589 13590 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13591 needs_modeset(crtc_state) ? 13592 "[modeset]" : "[fastset]"); 13593 } 13594 13595 if (any_ms) { 13596 ret = intel_modeset_checks(state); 13597 13598 if (ret) 13599 return ret; 13600 } else { 13601 intel_state->cdclk = dev_priv->atomic_cdclk_freq; 13602 } 13603 13604 ret = drm_atomic_helper_check_planes(dev, state); 13605 if (ret) 13606 return ret; 13607 13608 intel_fbc_choose_crtc(dev_priv, state); 13609 return calc_watermark_data(state); 13610 } 13611 13612 static int intel_atomic_prepare_commit(struct drm_device *dev, 13613 struct drm_atomic_state *state, 13614 bool nonblock) 13615 { 13616 struct drm_i915_private *dev_priv = to_i915(dev); 13617 struct drm_plane_state *plane_state; 13618 struct drm_crtc_state *crtc_state; 13619 struct drm_plane *plane; 13620 struct drm_crtc *crtc; 13621 int i, ret; 13622 13623 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13624 if (state->legacy_cursor_update) 13625 continue; 13626 13627 ret = intel_crtc_wait_for_pending_flips(crtc); 13628 if (ret) 13629 return ret; 13630 13631 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 13632 flush_workqueue(dev_priv->wq); 13633 } 13634 13635 ret = mutex_lock_interruptible(&dev->struct_mutex); 13636 if (ret) 13637 return ret; 13638 13639 ret = drm_atomic_helper_prepare_planes(dev, state); 13640 mutex_unlock(&dev->struct_mutex); 13641 13642 if (!ret && !nonblock) { 13643 for_each_plane_in_state(state, plane, plane_state, i) { 13644 struct intel_plane_state *intel_plane_state = 13645 to_intel_plane_state(plane_state); 13646 13647 if (!intel_plane_state->wait_req) 13648 continue; 13649 13650 ret = __i915_wait_request(intel_plane_state->wait_req, 13651 true, NULL, NULL); 13652 if (ret) { 13653 /* Any hang should be swallowed by the wait */ 13654 WARN_ON(ret == -EIO); 13655 mutex_lock(&dev->struct_mutex); 13656 drm_atomic_helper_cleanup_planes(dev, state); 13657 mutex_unlock(&dev->struct_mutex); 13658 break; 13659 } 13660 } 13661 } 13662 13663 return ret; 13664 } 13665 13666 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) 13667 { 13668 struct drm_device *dev = crtc->base.dev; 13669 13670 if (!dev->max_vblank_count) 13671 return drm_accurate_vblank_count(&crtc->base); 13672 13673 return dev->driver->get_vblank_counter(dev, crtc->pipe); 13674 } 13675 13676 static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13677 struct drm_i915_private *dev_priv, 13678 unsigned crtc_mask) 13679 { 13680 unsigned last_vblank_count[I915_MAX_PIPES]; 13681 enum i915_pipe pipe; 13682 int ret; 13683 13684 if (!crtc_mask) 13685 return; 13686 13687 for_each_pipe(dev_priv, pipe) { 13688 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13689 13690 if (!((1 << pipe) & crtc_mask)) 13691 continue; 13692 13693 ret = drm_crtc_vblank_get(crtc); 13694 if (WARN_ON(ret != 0)) { 13695 crtc_mask &= ~(1 << pipe); 13696 continue; 13697 } 13698 13699 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc); 13700 } 13701 13702 for_each_pipe(dev_priv, pipe) { 13703 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13704 long lret; 13705 13706 if (!((1 << pipe) & crtc_mask)) 13707 continue; 13708 13709 lret = wait_event_timeout(dev->vblank[pipe].queue, 13710 last_vblank_count[pipe] != 13711 drm_crtc_vblank_count(crtc), 13712 msecs_to_jiffies(50)); 13713 13714 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe)); 13715 13716 drm_crtc_vblank_put(crtc); 13717 } 13718 } 13719 13720 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 13721 { 13722 /* fb updated, need to unpin old fb */ 13723 if (crtc_state->fb_changed) 13724 return true; 13725 13726 /* wm changes, need vblank before final wm's */ 13727 if (crtc_state->update_wm_post) 13728 return true; 13729 13730 /* 13731 * cxsr is re-enabled after vblank. 13732 * This is already handled by crtc_state->update_wm_post, 13733 * but added for clarity. 13734 */ 13735 if (crtc_state->disable_cxsr) 13736 return true; 13737 13738 return false; 13739 } 13740 13741 static void intel_update_crtc(struct drm_crtc *crtc, 13742 struct drm_atomic_state *state, 13743 struct drm_crtc_state *old_crtc_state, 13744 unsigned int *crtc_vblank_mask) 13745 { 13746 struct drm_device *dev = crtc->dev; 13747 struct drm_i915_private *dev_priv = to_i915(dev); 13748 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13749 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc->state); 13750 bool modeset = needs_modeset(crtc->state); 13751 13752 if (modeset) { 13753 update_scanline_offset(intel_crtc); 13754 dev_priv->display.crtc_enable(crtc); 13755 } else { 13756 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13757 } 13758 13759 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 13760 intel_fbc_enable( 13761 intel_crtc, pipe_config, 13762 to_intel_plane_state(crtc->primary->state)); 13763 } 13764 13765 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 13766 13767 if (needs_vblank_wait(pipe_config)) 13768 *crtc_vblank_mask |= drm_crtc_mask(crtc); 13769 } 13770 13771 static void intel_update_crtcs(struct drm_atomic_state *state, 13772 unsigned int *crtc_vblank_mask) 13773 { 13774 struct drm_crtc *crtc; 13775 struct drm_crtc_state *old_crtc_state; 13776 int i; 13777 13778 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13779 if (!crtc->state->active) 13780 continue; 13781 13782 intel_update_crtc(crtc, state, old_crtc_state, 13783 crtc_vblank_mask); 13784 } 13785 } 13786 13787 static void skl_update_crtcs(struct drm_atomic_state *state, 13788 unsigned int *crtc_vblank_mask) 13789 { 13790 struct drm_device *dev = state->dev; 13791 struct drm_i915_private *dev_priv = to_i915(dev); 13792 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13793 struct drm_crtc *crtc; 13794 struct drm_crtc_state *old_crtc_state; 13795 struct skl_ddb_allocation *new_ddb = &intel_state->wm_results.ddb; 13796 struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 13797 unsigned int updated = 0; 13798 bool progress; 13799 enum i915_pipe pipe; 13800 13801 /* 13802 * Whenever the number of active pipes changes, we need to make sure we 13803 * update the pipes in the right order so that their ddb allocations 13804 * never overlap with eachother inbetween CRTC updates. Otherwise we'll 13805 * cause pipe underruns and other bad stuff. 13806 */ 13807 do { 13808 int i; 13809 progress = false; 13810 13811 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13812 bool vbl_wait = false; 13813 unsigned int cmask = drm_crtc_mask(crtc); 13814 pipe = to_intel_crtc(crtc)->pipe; 13815 13816 if (updated & cmask || !crtc->state->active) 13817 continue; 13818 if (skl_ddb_allocation_overlaps(state, cur_ddb, new_ddb, 13819 pipe)) 13820 continue; 13821 13822 updated |= cmask; 13823 13824 /* 13825 * If this is an already active pipe, it's DDB changed, 13826 * and this isn't the last pipe that needs updating 13827 * then we need to wait for a vblank to pass for the 13828 * new ddb allocation to take effect. 13829 */ 13830 if (!skl_ddb_allocation_equals(cur_ddb, new_ddb, pipe) && 13831 !crtc->state->active_changed && 13832 intel_state->wm_results.dirty_pipes != updated) 13833 vbl_wait = true; 13834 13835 intel_update_crtc(crtc, state, old_crtc_state, 13836 crtc_vblank_mask); 13837 13838 if (vbl_wait) 13839 intel_wait_for_vblank(dev, pipe); 13840 13841 progress = true; 13842 } 13843 } while (progress); 13844 } 13845 13846 static void intel_atomic_commit_tail(struct drm_atomic_state *state) 13847 { 13848 struct drm_device *dev = state->dev; 13849 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13850 struct drm_i915_private *dev_priv = to_i915(dev); 13851 struct drm_crtc_state *old_crtc_state; 13852 struct drm_crtc *crtc; 13853 struct intel_crtc_state *intel_cstate; 13854 struct drm_plane *plane; 13855 struct drm_plane_state *plane_state; 13856 bool hw_check = intel_state->modeset; 13857 unsigned long put_domains[I915_MAX_PIPES] = {}; 13858 unsigned crtc_vblank_mask = 0; 13859 int i, ret; 13860 13861 for_each_plane_in_state(state, plane, plane_state, i) { 13862 struct intel_plane_state *intel_plane_state = 13863 to_intel_plane_state(plane->state); 13864 13865 if (!intel_plane_state->wait_req) 13866 continue; 13867 13868 ret = __i915_wait_request(intel_plane_state->wait_req, 13869 true, NULL, NULL); 13870 /* EIO should be eaten, and we can't get interrupted in the 13871 * worker, and blocking commits have waited already. */ 13872 WARN_ON(ret); 13873 } 13874 13875 drm_atomic_helper_wait_for_dependencies(state); 13876 13877 if (intel_state->modeset) { 13878 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13879 sizeof(intel_state->min_pixclk)); 13880 dev_priv->active_crtcs = intel_state->active_crtcs; 13881 dev_priv->atomic_cdclk_freq = intel_state->cdclk; 13882 13883 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13884 } 13885 13886 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13887 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13888 13889 if (needs_modeset(crtc->state) || 13890 to_intel_crtc_state(crtc->state)->update_pipe) { 13891 hw_check = true; 13892 13893 put_domains[to_intel_crtc(crtc)->pipe] = 13894 modeset_get_crtc_power_domains(crtc, 13895 to_intel_crtc_state(crtc->state)); 13896 } 13897 13898 if (!needs_modeset(crtc->state)) 13899 continue; 13900 13901 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13902 13903 if (old_crtc_state->active) { 13904 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask); 13905 dev_priv->display.crtc_disable(crtc); 13906 intel_crtc->active = false; 13907 intel_fbc_disable(intel_crtc); 13908 intel_disable_shared_dpll(intel_crtc); 13909 13910 /* 13911 * Underruns don't always raise 13912 * interrupts, so check manually. 13913 */ 13914 intel_check_cpu_fifo_underruns(dev_priv); 13915 intel_check_pch_fifo_underruns(dev_priv); 13916 13917 if (!crtc->state->active) 13918 intel_update_watermarks(crtc); 13919 } 13920 } 13921 13922 /* Only after disabling all output pipelines that will be changed can we 13923 * update the the output configuration. */ 13924 intel_modeset_update_crtc_state(state); 13925 13926 if (intel_state->modeset) { 13927 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13928 13929 if (dev_priv->display.modeset_commit_cdclk && 13930 (intel_state->dev_cdclk != dev_priv->cdclk_freq || 13931 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)) 13932 dev_priv->display.modeset_commit_cdclk(state); 13933 13934 /* 13935 * SKL workaround: bspec recommends we disable the SAGV when we 13936 * have more then one pipe enabled 13937 */ 13938 if (!intel_can_enable_sagv(state)) 13939 intel_disable_sagv(dev_priv); 13940 13941 intel_modeset_verify_disabled(dev); 13942 } 13943 13944 /* Complete the events for pipes that have now been disabled */ 13945 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13946 bool modeset = needs_modeset(crtc->state); 13947 13948 /* Complete events for now disable pipes here. */ 13949 if (modeset && !crtc->state->active && crtc->state->event) { 13950 spin_lock_irq(&dev->event_lock); 13951 drm_crtc_send_vblank_event(crtc, crtc->state->event); 13952 spin_unlock_irq(&dev->event_lock); 13953 13954 crtc->state->event = NULL; 13955 } 13956 } 13957 13958 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13959 dev_priv->display.update_crtcs(state, &crtc_vblank_mask); 13960 13961 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here 13962 * already, but still need the state for the delayed optimization. To 13963 * fix this: 13964 * - wrap the optimization/post_plane_update stuff into a per-crtc work. 13965 * - schedule that vblank worker _before_ calling hw_done 13966 * - at the start of commit_tail, cancel it _synchrously 13967 * - switch over to the vblank wait helper in the core after that since 13968 * we don't need out special handling any more. 13969 */ 13970 if (!state->legacy_cursor_update) 13971 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13972 13973 /* 13974 * Now that the vblank has passed, we can go ahead and program the 13975 * optimal watermarks on platforms that need two-step watermark 13976 * programming. 13977 * 13978 * TODO: Move this (and other cleanup) to an async worker eventually. 13979 */ 13980 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13981 intel_cstate = to_intel_crtc_state(crtc->state); 13982 13983 if (dev_priv->display.optimize_watermarks) 13984 dev_priv->display.optimize_watermarks(intel_cstate); 13985 } 13986 13987 for_each_crtc_in_state(state, crtc, old_crtc_state, i) { 13988 intel_post_plane_update(to_intel_crtc_state(old_crtc_state)); 13989 13990 if (put_domains[i]) 13991 modeset_put_power_domains(dev_priv, put_domains[i]); 13992 13993 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13994 } 13995 13996 if (intel_state->modeset && intel_can_enable_sagv(state)) 13997 intel_enable_sagv(dev_priv); 13998 13999 drm_atomic_helper_commit_hw_done(state); 14000 14001 if (intel_state->modeset) 14002 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 14003 14004 mutex_lock(&dev->struct_mutex); 14005 drm_atomic_helper_cleanup_planes(dev, state); 14006 mutex_unlock(&dev->struct_mutex); 14007 14008 drm_atomic_helper_commit_cleanup_done(state); 14009 14010 drm_atomic_state_free(state); 14011 14012 /* As one of the primary mmio accessors, KMS has a high likelihood 14013 * of triggering bugs in unclaimed access. After we finish 14014 * modesetting, see if an error has been flagged, and if so 14015 * enable debugging for the next modeset - and hope we catch 14016 * the culprit. 14017 * 14018 * XXX note that we assume display power is on at this point. 14019 * This might hold true now but we need to add pm helper to check 14020 * unclaimed only when the hardware is on, as atomic commits 14021 * can happen also when the device is completely off. 14022 */ 14023 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 14024 } 14025 14026 static void intel_atomic_commit_work(struct work_struct *work) 14027 { 14028 struct drm_atomic_state *state = container_of(work, 14029 struct drm_atomic_state, 14030 commit_work); 14031 intel_atomic_commit_tail(state); 14032 } 14033 14034 static void intel_atomic_track_fbs(struct drm_atomic_state *state) 14035 { 14036 struct drm_plane_state *old_plane_state; 14037 struct drm_plane *plane; 14038 struct drm_i915_gem_object *obj, *old_obj; 14039 struct intel_plane *intel_plane; 14040 int i; 14041 14042 mutex_lock(&state->dev->struct_mutex); 14043 for_each_plane_in_state(state, plane, old_plane_state, i) { 14044 obj = intel_fb_obj(plane->state->fb); 14045 old_obj = intel_fb_obj(old_plane_state->fb); 14046 intel_plane = to_intel_plane(plane); 14047 14048 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 14049 } 14050 mutex_unlock(&state->dev->struct_mutex); 14051 } 14052 14053 /** 14054 * intel_atomic_commit - commit validated state object 14055 * @dev: DRM device 14056 * @state: the top-level driver state object 14057 * @nonblock: nonblocking commit 14058 * 14059 * This function commits a top-level state object that has been validated 14060 * with drm_atomic_helper_check(). 14061 * 14062 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment 14063 * nonblocking commits are only safe for pure plane updates. Everything else 14064 * should work though. 14065 * 14066 * RETURNS 14067 * Zero for success or -errno. 14068 */ 14069 static int intel_atomic_commit(struct drm_device *dev, 14070 struct drm_atomic_state *state, 14071 bool nonblock) 14072 { 14073 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 14074 struct drm_i915_private *dev_priv = to_i915(dev); 14075 int ret = 0; 14076 14077 if (intel_state->modeset && nonblock) { 14078 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n"); 14079 return -EINVAL; 14080 } 14081 14082 ret = drm_atomic_helper_setup_commit(state, nonblock); 14083 if (ret) 14084 return ret; 14085 14086 INIT_WORK(&state->commit_work, intel_atomic_commit_work); 14087 14088 ret = intel_atomic_prepare_commit(dev, state, nonblock); 14089 if (ret) { 14090 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 14091 return ret; 14092 } 14093 14094 drm_atomic_helper_swap_state(state, true); 14095 dev_priv->wm.distrust_bios_wm = false; 14096 dev_priv->wm.skl_results = intel_state->wm_results; 14097 intel_shared_dpll_commit(state); 14098 intel_atomic_track_fbs(state); 14099 14100 if (nonblock) 14101 queue_work(system_unbound_wq, &state->commit_work); 14102 else 14103 intel_atomic_commit_tail(state); 14104 14105 return 0; 14106 } 14107 14108 void intel_crtc_restore_mode(struct drm_crtc *crtc) 14109 { 14110 struct drm_device *dev = crtc->dev; 14111 struct drm_atomic_state *state; 14112 struct drm_crtc_state *crtc_state; 14113 int ret; 14114 14115 state = drm_atomic_state_alloc(dev); 14116 if (!state) { 14117 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory", 14118 crtc->base.id, crtc->name); 14119 return; 14120 } 14121 14122 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 14123 14124 retry: 14125 crtc_state = drm_atomic_get_crtc_state(state, crtc); 14126 ret = PTR_ERR_OR_ZERO(crtc_state); 14127 if (!ret) { 14128 if (!crtc_state->active) 14129 goto out; 14130 14131 crtc_state->mode_changed = true; 14132 ret = drm_atomic_commit(state); 14133 } 14134 14135 if (ret == -EDEADLK) { 14136 drm_atomic_state_clear(state); 14137 drm_modeset_backoff(state->acquire_ctx); 14138 goto retry; 14139 } 14140 14141 if (ret) 14142 out: 14143 drm_atomic_state_free(state); 14144 } 14145 14146 #undef for_each_intel_crtc_masked 14147 14148 /* 14149 * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling 14150 * drm_atomic_helper_legacy_gamma_set() directly. 14151 */ 14152 static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc, 14153 u16 *red, u16 *green, u16 *blue, 14154 uint32_t size) 14155 { 14156 struct drm_device *dev = crtc->dev; 14157 struct drm_mode_config *config = &dev->mode_config; 14158 struct drm_crtc_state *state; 14159 int ret; 14160 14161 ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size); 14162 if (ret) 14163 return ret; 14164 14165 /* 14166 * Make sure we update the legacy properties so this works when 14167 * atomic is not enabled. 14168 */ 14169 14170 state = crtc->state; 14171 14172 drm_object_property_set_value(&crtc->base, 14173 config->degamma_lut_property, 14174 (state->degamma_lut) ? 14175 state->degamma_lut->base.id : 0); 14176 14177 drm_object_property_set_value(&crtc->base, 14178 config->ctm_property, 14179 (state->ctm) ? 14180 state->ctm->base.id : 0); 14181 14182 drm_object_property_set_value(&crtc->base, 14183 config->gamma_lut_property, 14184 (state->gamma_lut) ? 14185 state->gamma_lut->base.id : 0); 14186 14187 return 0; 14188 } 14189 14190 static const struct drm_crtc_funcs intel_crtc_funcs = { 14191 .gamma_set = intel_atomic_legacy_gamma_set, 14192 .set_config = drm_atomic_helper_set_config, 14193 .set_property = drm_atomic_helper_crtc_set_property, 14194 .destroy = intel_crtc_destroy, 14195 .page_flip = intel_crtc_page_flip, 14196 .atomic_duplicate_state = intel_crtc_duplicate_state, 14197 .atomic_destroy_state = intel_crtc_destroy_state, 14198 }; 14199 14200 /** 14201 * intel_prepare_plane_fb - Prepare fb for usage on plane 14202 * @plane: drm plane to prepare for 14203 * @fb: framebuffer to prepare for presentation 14204 * 14205 * Prepares a framebuffer for usage on a display plane. Generally this 14206 * involves pinning the underlying object and updating the frontbuffer tracking 14207 * bits. Some older platforms need special physical address handling for 14208 * cursor planes. 14209 * 14210 * Must be called with struct_mutex held. 14211 * 14212 * Returns 0 on success, negative error code on failure. 14213 */ 14214 int 14215 intel_prepare_plane_fb(struct drm_plane *plane, 14216 struct drm_plane_state *new_state) 14217 { 14218 struct drm_device *dev = plane->dev; 14219 struct drm_framebuffer *fb = new_state->fb; 14220 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14221 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14222 struct reservation_object *resv; 14223 int ret = 0; 14224 14225 if (!obj && !old_obj) 14226 return 0; 14227 14228 if (old_obj) { 14229 struct drm_crtc_state *crtc_state = 14230 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); 14231 14232 /* Big Hammer, we also need to ensure that any pending 14233 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 14234 * current scanout is retired before unpinning the old 14235 * framebuffer. Note that we rely on userspace rendering 14236 * into the buffer attached to the pipe they are waiting 14237 * on. If not, userspace generates a GPU hang with IPEHR 14238 * point to the MI_WAIT_FOR_EVENT. 14239 * 14240 * This should only fail upon a hung GPU, in which case we 14241 * can safely continue. 14242 */ 14243 if (needs_modeset(crtc_state)) 14244 ret = i915_gem_object_wait_rendering(old_obj, true); 14245 if (ret) { 14246 /* GPU hangs should have been swallowed by the wait */ 14247 WARN_ON(ret == -EIO); 14248 return ret; 14249 } 14250 } 14251 14252 if (!obj) 14253 return 0; 14254 14255 /* For framebuffer backed by dmabuf, wait for fence */ 14256 resv = i915_gem_object_get_dmabuf_resv(obj); 14257 if (resv) { 14258 long lret; 14259 14260 lret = reservation_object_wait_timeout_rcu(resv, false, true, 14261 MAX_SCHEDULE_TIMEOUT); 14262 if (lret == -ERESTARTSYS) 14263 return lret; 14264 14265 WARN(lret < 0, "waiting returns %li\n", lret); 14266 } 14267 14268 if (plane->type == DRM_PLANE_TYPE_CURSOR && 14269 INTEL_INFO(dev)->cursor_needs_physical) { 14270 int align = IS_I830(dev) ? 16 * 1024 : 256; 14271 ret = i915_gem_object_attach_phys(obj, align); 14272 if (ret) 14273 DRM_DEBUG_KMS("failed to attach phys object\n"); 14274 } else { 14275 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 14276 } 14277 14278 if (ret == 0) { 14279 struct intel_plane_state *plane_state = 14280 to_intel_plane_state(new_state); 14281 14282 i915_gem_request_assign(&plane_state->wait_req, 14283 obj->last_write_req); 14284 } 14285 14286 return ret; 14287 } 14288 14289 /** 14290 * intel_cleanup_plane_fb - Cleans up an fb after plane use 14291 * @plane: drm plane to clean up for 14292 * @fb: old framebuffer that was on plane 14293 * 14294 * Cleans up a framebuffer that has just been removed from a plane. 14295 * 14296 * Must be called with struct_mutex held. 14297 */ 14298 void 14299 intel_cleanup_plane_fb(struct drm_plane *plane, 14300 struct drm_plane_state *old_state) 14301 { 14302 struct drm_device *dev = plane->dev; 14303 struct intel_plane_state *old_intel_state; 14304 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 14305 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 14306 14307 old_intel_state = to_intel_plane_state(old_state); 14308 14309 if (!obj && !old_obj) 14310 return; 14311 14312 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 14313 !INTEL_INFO(dev)->cursor_needs_physical)) 14314 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14315 14316 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 14317 } 14318 14319 int 14320 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 14321 { 14322 int max_scale; 14323 int crtc_clock, cdclk; 14324 14325 if (!intel_crtc || !crtc_state->base.enable) 14326 return DRM_PLANE_HELPER_NO_SCALING; 14327 14328 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14329 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 14330 14331 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) 14332 return DRM_PLANE_HELPER_NO_SCALING; 14333 14334 /* 14335 * skl max scale is lower of: 14336 * close to 3 but not 3, -1 is for that purpose 14337 * or 14338 * cdclk/crtc_clock 14339 */ 14340 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 14341 14342 return max_scale; 14343 } 14344 14345 static int 14346 intel_check_primary_plane(struct drm_plane *plane, 14347 struct intel_crtc_state *crtc_state, 14348 struct intel_plane_state *state) 14349 { 14350 struct drm_crtc *crtc = state->base.crtc; 14351 struct drm_framebuffer *fb = state->base.fb; 14352 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 14353 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 14354 bool can_position = false; 14355 14356 if (INTEL_INFO(plane->dev)->gen >= 9) { 14357 /* use scaler when colorkey is not required */ 14358 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 14359 min_scale = 1; 14360 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 14361 } 14362 can_position = true; 14363 } 14364 14365 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14366 &state->dst, &state->clip, 14367 state->base.rotation, 14368 min_scale, max_scale, 14369 can_position, true, 14370 &state->visible); 14371 } 14372 14373 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 14374 struct drm_crtc_state *old_crtc_state) 14375 { 14376 struct drm_device *dev = crtc->dev; 14377 struct drm_i915_private *dev_priv = to_i915(dev); 14378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14379 struct intel_crtc_state *old_intel_state = 14380 to_intel_crtc_state(old_crtc_state); 14381 bool modeset = needs_modeset(crtc->state); 14382 enum i915_pipe pipe = intel_crtc->pipe; 14383 14384 /* Perform vblank evasion around commit operation */ 14385 intel_pipe_update_start(intel_crtc); 14386 14387 if (modeset) 14388 return; 14389 14390 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) { 14391 intel_color_set_csc(crtc->state); 14392 intel_color_load_luts(crtc->state); 14393 } 14394 14395 if (to_intel_crtc_state(crtc->state)->update_pipe) 14396 intel_update_pipe_config(intel_crtc, old_intel_state); 14397 else if (INTEL_GEN(dev_priv) >= 9) { 14398 skl_detach_scalers(intel_crtc); 14399 14400 I915_WRITE(PIPE_WM_LINETIME(pipe), 14401 dev_priv->wm.skl_hw.wm_linetime[pipe]); 14402 } 14403 } 14404 14405 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 14406 struct drm_crtc_state *old_crtc_state) 14407 { 14408 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14409 14410 intel_pipe_update_end(intel_crtc, NULL); 14411 } 14412 14413 /** 14414 * intel_plane_destroy - destroy a plane 14415 * @plane: plane to destroy 14416 * 14417 * Common destruction function for all types of planes (primary, cursor, 14418 * sprite). 14419 */ 14420 void intel_plane_destroy(struct drm_plane *plane) 14421 { 14422 if (!plane) 14423 return; 14424 14425 drm_plane_cleanup(plane); 14426 kfree(to_intel_plane(plane)); 14427 } 14428 14429 const struct drm_plane_funcs intel_plane_funcs = { 14430 .update_plane = drm_atomic_helper_update_plane, 14431 .disable_plane = drm_atomic_helper_disable_plane, 14432 .destroy = intel_plane_destroy, 14433 .set_property = drm_atomic_helper_plane_set_property, 14434 .atomic_get_property = intel_plane_atomic_get_property, 14435 .atomic_set_property = intel_plane_atomic_set_property, 14436 .atomic_duplicate_state = intel_plane_duplicate_state, 14437 .atomic_destroy_state = intel_plane_destroy_state, 14438 14439 }; 14440 14441 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 14442 int pipe) 14443 { 14444 struct intel_plane *primary = NULL; 14445 struct intel_plane_state *state = NULL; 14446 const uint32_t *intel_primary_formats; 14447 unsigned int num_formats; 14448 int ret; 14449 14450 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 14451 if (!primary) 14452 goto fail; 14453 14454 state = intel_create_plane_state(&primary->base); 14455 if (!state) 14456 goto fail; 14457 primary->base.state = &state->base; 14458 14459 primary->can_scale = false; 14460 primary->max_downscale = 1; 14461 if (INTEL_INFO(dev)->gen >= 9) { 14462 primary->can_scale = true; 14463 state->scaler_id = -1; 14464 } 14465 primary->pipe = pipe; 14466 primary->plane = pipe; 14467 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 14468 primary->check_plane = intel_check_primary_plane; 14469 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 14470 primary->plane = !pipe; 14471 14472 if (INTEL_INFO(dev)->gen >= 9) { 14473 intel_primary_formats = skl_primary_formats; 14474 num_formats = ARRAY_SIZE(skl_primary_formats); 14475 14476 primary->update_plane = skylake_update_primary_plane; 14477 primary->disable_plane = skylake_disable_primary_plane; 14478 } else if (HAS_PCH_SPLIT(dev)) { 14479 intel_primary_formats = i965_primary_formats; 14480 num_formats = ARRAY_SIZE(i965_primary_formats); 14481 14482 primary->update_plane = ironlake_update_primary_plane; 14483 primary->disable_plane = i9xx_disable_primary_plane; 14484 } else if (INTEL_INFO(dev)->gen >= 4) { 14485 intel_primary_formats = i965_primary_formats; 14486 num_formats = ARRAY_SIZE(i965_primary_formats); 14487 14488 primary->update_plane = i9xx_update_primary_plane; 14489 primary->disable_plane = i9xx_disable_primary_plane; 14490 } else { 14491 intel_primary_formats = i8xx_primary_formats; 14492 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14493 14494 primary->update_plane = i9xx_update_primary_plane; 14495 primary->disable_plane = i9xx_disable_primary_plane; 14496 } 14497 14498 if (INTEL_INFO(dev)->gen >= 9) 14499 ret = drm_universal_plane_init(dev, &primary->base, 0, 14500 &intel_plane_funcs, 14501 intel_primary_formats, num_formats, 14502 DRM_PLANE_TYPE_PRIMARY, 14503 "plane 1%c", pipe_name(pipe)); 14504 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 14505 ret = drm_universal_plane_init(dev, &primary->base, 0, 14506 &intel_plane_funcs, 14507 intel_primary_formats, num_formats, 14508 DRM_PLANE_TYPE_PRIMARY, 14509 "primary %c", pipe_name(pipe)); 14510 else 14511 ret = drm_universal_plane_init(dev, &primary->base, 0, 14512 &intel_plane_funcs, 14513 intel_primary_formats, num_formats, 14514 DRM_PLANE_TYPE_PRIMARY, 14515 "plane %c", plane_name(primary->plane)); 14516 if (ret) 14517 goto fail; 14518 14519 if (INTEL_INFO(dev)->gen >= 4) 14520 intel_create_rotation_property(dev, primary); 14521 14522 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 14523 14524 return &primary->base; 14525 14526 fail: 14527 kfree(state); 14528 kfree(primary); 14529 14530 return NULL; 14531 } 14532 14533 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 14534 { 14535 if (!dev->mode_config.rotation_property) { 14536 unsigned long flags = DRM_ROTATE_0 | 14537 DRM_ROTATE_180; 14538 14539 if (INTEL_INFO(dev)->gen >= 9) 14540 flags |= DRM_ROTATE_90 | DRM_ROTATE_270; 14541 14542 dev->mode_config.rotation_property = 14543 drm_mode_create_rotation_property(dev, flags); 14544 } 14545 if (dev->mode_config.rotation_property) 14546 drm_object_attach_property(&plane->base.base, 14547 dev->mode_config.rotation_property, 14548 plane->base.state->rotation); 14549 } 14550 14551 static int 14552 intel_check_cursor_plane(struct drm_plane *plane, 14553 struct intel_crtc_state *crtc_state, 14554 struct intel_plane_state *state) 14555 { 14556 struct drm_crtc *crtc = crtc_state->base.crtc; 14557 struct drm_framebuffer *fb = state->base.fb; 14558 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14559 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 14560 unsigned stride; 14561 int ret; 14562 14563 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14564 &state->dst, &state->clip, 14565 state->base.rotation, 14566 DRM_PLANE_HELPER_NO_SCALING, 14567 DRM_PLANE_HELPER_NO_SCALING, 14568 true, true, &state->visible); 14569 if (ret) 14570 return ret; 14571 14572 /* if we want to turn off the cursor ignore width and height */ 14573 if (!obj) 14574 return 0; 14575 14576 /* Check for which cursor types we support */ 14577 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 14578 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 14579 state->base.crtc_w, state->base.crtc_h); 14580 return -EINVAL; 14581 } 14582 14583 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 14584 if (obj->base.size < stride * state->base.crtc_h) { 14585 DRM_DEBUG_KMS("buffer is too small\n"); 14586 return -ENOMEM; 14587 } 14588 14589 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 14590 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 14591 return -EINVAL; 14592 } 14593 14594 /* 14595 * There's something wrong with the cursor on CHV pipe C. 14596 * If it straddles the left edge of the screen then 14597 * moving it away from the edge or disabling it often 14598 * results in a pipe underrun, and often that can lead to 14599 * dead pipe (constant underrun reported, and it scans 14600 * out just a solid color). To recover from that, the 14601 * display power well must be turned off and on again. 14602 * Refuse the put the cursor into that compromised position. 14603 */ 14604 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 14605 state->visible && state->base.crtc_x < 0) { 14606 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 14607 return -EINVAL; 14608 } 14609 14610 return 0; 14611 } 14612 14613 static void 14614 intel_disable_cursor_plane(struct drm_plane *plane, 14615 struct drm_crtc *crtc) 14616 { 14617 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14618 14619 intel_crtc->cursor_addr = 0; 14620 intel_crtc_update_cursor(crtc, NULL); 14621 } 14622 14623 static void 14624 intel_update_cursor_plane(struct drm_plane *plane, 14625 const struct intel_crtc_state *crtc_state, 14626 const struct intel_plane_state *state) 14627 { 14628 struct drm_crtc *crtc = crtc_state->base.crtc; 14629 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14630 struct drm_device *dev = plane->dev; 14631 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 14632 uint32_t addr; 14633 14634 if (!obj) 14635 addr = 0; 14636 else if (!INTEL_INFO(dev)->cursor_needs_physical) 14637 addr = i915_gem_obj_ggtt_offset(obj); 14638 else 14639 addr = obj->phys_handle->busaddr; 14640 14641 intel_crtc->cursor_addr = addr; 14642 intel_crtc_update_cursor(crtc, state); 14643 } 14644 14645 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14646 int pipe) 14647 { 14648 struct intel_plane *cursor = NULL; 14649 struct intel_plane_state *state = NULL; 14650 int ret; 14651 14652 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 14653 if (!cursor) 14654 goto fail; 14655 14656 state = intel_create_plane_state(&cursor->base); 14657 if (!state) 14658 goto fail; 14659 cursor->base.state = &state->base; 14660 14661 cursor->can_scale = false; 14662 cursor->max_downscale = 1; 14663 cursor->pipe = pipe; 14664 cursor->plane = pipe; 14665 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 14666 cursor->check_plane = intel_check_cursor_plane; 14667 cursor->update_plane = intel_update_cursor_plane; 14668 cursor->disable_plane = intel_disable_cursor_plane; 14669 14670 ret = drm_universal_plane_init(dev, &cursor->base, 0, 14671 &intel_plane_funcs, 14672 intel_cursor_formats, 14673 ARRAY_SIZE(intel_cursor_formats), 14674 DRM_PLANE_TYPE_CURSOR, 14675 "cursor %c", pipe_name(pipe)); 14676 if (ret) 14677 goto fail; 14678 14679 if (INTEL_INFO(dev)->gen >= 4) { 14680 if (!dev->mode_config.rotation_property) 14681 dev->mode_config.rotation_property = 14682 drm_mode_create_rotation_property(dev, 14683 DRM_ROTATE_0 | 14684 DRM_ROTATE_180); 14685 if (dev->mode_config.rotation_property) 14686 drm_object_attach_property(&cursor->base.base, 14687 dev->mode_config.rotation_property, 14688 state->base.rotation); 14689 } 14690 14691 if (INTEL_INFO(dev)->gen >=9) 14692 state->scaler_id = -1; 14693 14694 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14695 14696 return &cursor->base; 14697 14698 fail: 14699 kfree(state); 14700 kfree(cursor); 14701 14702 return NULL; 14703 } 14704 14705 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 14706 struct intel_crtc_state *crtc_state) 14707 { 14708 int i; 14709 struct intel_scaler *intel_scaler; 14710 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 14711 14712 for (i = 0; i < intel_crtc->num_scalers; i++) { 14713 intel_scaler = &scaler_state->scalers[i]; 14714 intel_scaler->in_use = 0; 14715 intel_scaler->mode = PS_SCALER_MODE_DYN; 14716 } 14717 14718 scaler_state->scaler_id = -1; 14719 } 14720 14721 static void intel_crtc_init(struct drm_device *dev, int pipe) 14722 { 14723 struct drm_i915_private *dev_priv = to_i915(dev); 14724 struct intel_crtc *intel_crtc; 14725 struct intel_crtc_state *crtc_state = NULL; 14726 struct drm_plane *primary = NULL; 14727 struct drm_plane *cursor = NULL; 14728 int ret; 14729 14730 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 14731 if (intel_crtc == NULL) 14732 return; 14733 14734 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 14735 if (!crtc_state) 14736 goto fail; 14737 intel_crtc->config = crtc_state; 14738 intel_crtc->base.state = &crtc_state->base; 14739 crtc_state->base.crtc = &intel_crtc->base; 14740 14741 /* initialize shared scalers */ 14742 if (INTEL_INFO(dev)->gen >= 9) { 14743 if (pipe == PIPE_C) 14744 intel_crtc->num_scalers = 1; 14745 else 14746 intel_crtc->num_scalers = SKL_NUM_SCALERS; 14747 14748 skl_init_scalers(dev, intel_crtc, crtc_state); 14749 } 14750 14751 primary = intel_primary_plane_create(dev, pipe); 14752 if (!primary) 14753 goto fail; 14754 14755 cursor = intel_cursor_plane_create(dev, pipe); 14756 if (!cursor) 14757 goto fail; 14758 14759 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14760 cursor, &intel_crtc_funcs, 14761 "pipe %c", pipe_name(pipe)); 14762 if (ret) 14763 goto fail; 14764 14765 /* 14766 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 14767 * is hooked to pipe B. Hence we want plane A feeding pipe B. 14768 */ 14769 intel_crtc->pipe = pipe; 14770 intel_crtc->plane = pipe; 14771 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 14772 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 14773 intel_crtc->plane = !pipe; 14774 } 14775 14776 intel_crtc->cursor_base = ~0; 14777 intel_crtc->cursor_cntl = ~0; 14778 intel_crtc->cursor_size = ~0; 14779 14780 intel_crtc->wm.cxsr_allowed = true; 14781 14782 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 14783 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 14784 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 14785 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 14786 14787 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14788 14789 intel_color_init(&intel_crtc->base); 14790 14791 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14792 return; 14793 14794 fail: 14795 intel_plane_destroy(primary); 14796 intel_plane_destroy(cursor); 14797 kfree(crtc_state); 14798 kfree(intel_crtc); 14799 } 14800 14801 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14802 { 14803 struct drm_encoder *encoder = connector->base.encoder; 14804 struct drm_device *dev = connector->base.dev; 14805 14806 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14807 14808 if (!encoder || WARN_ON(!encoder->crtc)) 14809 return INVALID_PIPE; 14810 14811 return to_intel_crtc(encoder->crtc)->pipe; 14812 } 14813 14814 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14815 struct drm_file *file) 14816 { 14817 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14818 struct drm_crtc *drmmode_crtc; 14819 struct intel_crtc *crtc; 14820 14821 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14822 if (!drmmode_crtc) 14823 return -ENOENT; 14824 14825 crtc = to_intel_crtc(drmmode_crtc); 14826 pipe_from_crtc_id->pipe = crtc->pipe; 14827 14828 return 0; 14829 } 14830 14831 static int intel_encoder_clones(struct intel_encoder *encoder) 14832 { 14833 struct drm_device *dev = encoder->base.dev; 14834 struct intel_encoder *source_encoder; 14835 int index_mask = 0; 14836 int entry = 0; 14837 14838 for_each_intel_encoder(dev, source_encoder) { 14839 if (encoders_cloneable(encoder, source_encoder)) 14840 index_mask |= (1 << entry); 14841 14842 entry++; 14843 } 14844 14845 return index_mask; 14846 } 14847 14848 static bool has_edp_a(struct drm_device *dev) 14849 { 14850 struct drm_i915_private *dev_priv = to_i915(dev); 14851 14852 if (!IS_MOBILE(dev)) 14853 return false; 14854 14855 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14856 return false; 14857 14858 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14859 return false; 14860 14861 return true; 14862 } 14863 14864 static bool intel_crt_present(struct drm_device *dev) 14865 { 14866 struct drm_i915_private *dev_priv = to_i915(dev); 14867 14868 if (INTEL_INFO(dev)->gen >= 9) 14869 return false; 14870 14871 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 14872 return false; 14873 14874 if (IS_CHERRYVIEW(dev)) 14875 return false; 14876 14877 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14878 return false; 14879 14880 /* DDI E can't be used if DDI A requires 4 lanes */ 14881 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14882 return false; 14883 14884 if (!dev_priv->vbt.int_crt_support) 14885 return false; 14886 14887 return true; 14888 } 14889 14890 static void intel_setup_outputs(struct drm_device *dev) 14891 { 14892 struct drm_i915_private *dev_priv = to_i915(dev); 14893 struct intel_encoder *encoder; 14894 bool dpd_is_edp = false; 14895 14896 /* 14897 * intel_edp_init_connector() depends on this completing first, to 14898 * prevent the registeration of both eDP and LVDS and the incorrect 14899 * sharing of the PPS. 14900 */ 14901 intel_lvds_init(dev); 14902 14903 if (intel_crt_present(dev)) 14904 intel_crt_init(dev); 14905 14906 if (IS_BROXTON(dev)) { 14907 /* 14908 * FIXME: Broxton doesn't support port detection via the 14909 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14910 * detect the ports. 14911 */ 14912 intel_ddi_init(dev, PORT_A); 14913 intel_ddi_init(dev, PORT_B); 14914 intel_ddi_init(dev, PORT_C); 14915 14916 intel_dsi_init(dev); 14917 } else if (HAS_DDI(dev)) { 14918 int found; 14919 14920 /* 14921 * Haswell uses DDI functions to detect digital outputs. 14922 * On SKL pre-D0 the strap isn't connected, so we assume 14923 * it's there. 14924 */ 14925 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14926 /* WaIgnoreDDIAStrap: skl */ 14927 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14928 intel_ddi_init(dev, PORT_A); 14929 14930 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14931 * register */ 14932 found = I915_READ(SFUSE_STRAP); 14933 14934 if (found & SFUSE_STRAP_DDIB_DETECTED) 14935 intel_ddi_init(dev, PORT_B); 14936 if (found & SFUSE_STRAP_DDIC_DETECTED) 14937 intel_ddi_init(dev, PORT_C); 14938 if (found & SFUSE_STRAP_DDID_DETECTED) 14939 intel_ddi_init(dev, PORT_D); 14940 /* 14941 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14942 */ 14943 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && 14944 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14945 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14946 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14947 intel_ddi_init(dev, PORT_E); 14948 14949 } else if (HAS_PCH_SPLIT(dev)) { 14950 int found; 14951 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 14952 14953 if (has_edp_a(dev)) 14954 intel_dp_init(dev, DP_A, PORT_A); 14955 14956 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14957 /* PCH SDVOB multiplex with HDMIB */ 14958 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); 14959 if (!found) 14960 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14961 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14962 intel_dp_init(dev, PCH_DP_B, PORT_B); 14963 } 14964 14965 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14966 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14967 14968 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14969 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14970 14971 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14972 intel_dp_init(dev, PCH_DP_C, PORT_C); 14973 14974 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14975 intel_dp_init(dev, PCH_DP_D, PORT_D); 14976 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14977 bool has_edp, has_port; 14978 14979 /* 14980 * The DP_DETECTED bit is the latched state of the DDC 14981 * SDA pin at boot. However since eDP doesn't require DDC 14982 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14983 * eDP ports may have been muxed to an alternate function. 14984 * Thus we can't rely on the DP_DETECTED bit alone to detect 14985 * eDP ports. Consult the VBT as well as DP_DETECTED to 14986 * detect eDP ports. 14987 * 14988 * Sadly the straps seem to be missing sometimes even for HDMI 14989 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap 14990 * and VBT for the presence of the port. Additionally we can't 14991 * trust the port type the VBT declares as we've seen at least 14992 * HDMI ports that the VBT claim are DP or eDP. 14993 */ 14994 has_edp = intel_dp_is_edp(dev, PORT_B); 14995 has_port = intel_bios_is_port_present(dev_priv, PORT_B); 14996 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port) 14997 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B); 14998 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp) 14999 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 15000 15001 has_edp = intel_dp_is_edp(dev, PORT_C); 15002 has_port = intel_bios_is_port_present(dev_priv, PORT_C); 15003 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port) 15004 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C); 15005 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp) 15006 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 15007 15008 if (IS_CHERRYVIEW(dev)) { 15009 /* 15010 * eDP not supported on port D, 15011 * so no need to worry about it 15012 */ 15013 has_port = intel_bios_is_port_present(dev_priv, PORT_D); 15014 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port) 15015 intel_dp_init(dev, CHV_DP_D, PORT_D); 15016 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port) 15017 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 15018 } 15019 15020 intel_dsi_init(dev); 15021 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 15022 bool found = false; 15023 15024 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15025 DRM_DEBUG_KMS("probing SDVOB\n"); 15026 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); 15027 if (!found && IS_G4X(dev)) { 15028 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 15029 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 15030 } 15031 15032 if (!found && IS_G4X(dev)) 15033 intel_dp_init(dev, DP_B, PORT_B); 15034 } 15035 15036 /* Before G4X SDVOC doesn't have its own detect register */ 15037 15038 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 15039 DRM_DEBUG_KMS("probing SDVOC\n"); 15040 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); 15041 } 15042 15043 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 15044 15045 if (IS_G4X(dev)) { 15046 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 15047 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 15048 } 15049 if (IS_G4X(dev)) 15050 intel_dp_init(dev, DP_C, PORT_C); 15051 } 15052 15053 if (IS_G4X(dev) && 15054 (I915_READ(DP_D) & DP_DETECTED)) 15055 intel_dp_init(dev, DP_D, PORT_D); 15056 } else if (IS_GEN2(dev)) 15057 intel_dvo_init(dev); 15058 15059 if (SUPPORTS_TV(dev)) 15060 intel_tv_init(dev); 15061 15062 intel_psr_init(dev); 15063 15064 for_each_intel_encoder(dev, encoder) { 15065 encoder->base.possible_crtcs = encoder->crtc_mask; 15066 encoder->base.possible_clones = 15067 intel_encoder_clones(encoder); 15068 } 15069 15070 intel_init_pch_refclk(dev); 15071 15072 drm_helper_move_panel_connectors_to_head(dev); 15073 } 15074 15075 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 15076 { 15077 struct drm_device *dev = fb->dev; 15078 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15079 15080 drm_framebuffer_cleanup(fb); 15081 mutex_lock(&dev->struct_mutex); 15082 WARN_ON(!intel_fb->obj->framebuffer_references--); 15083 drm_gem_object_unreference(&intel_fb->obj->base); 15084 mutex_unlock(&dev->struct_mutex); 15085 kfree(intel_fb); 15086 } 15087 15088 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 15089 struct drm_file *file, 15090 unsigned int *handle) 15091 { 15092 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15093 struct drm_i915_gem_object *obj = intel_fb->obj; 15094 15095 if (obj->userptr.mm) { 15096 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 15097 return -EINVAL; 15098 } 15099 15100 return drm_gem_handle_create(file, &obj->base, handle); 15101 } 15102 15103 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 15104 struct drm_file *file, 15105 unsigned flags, unsigned color, 15106 struct drm_clip_rect *clips, 15107 unsigned num_clips) 15108 { 15109 struct drm_device *dev = fb->dev; 15110 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 15111 struct drm_i915_gem_object *obj = intel_fb->obj; 15112 15113 mutex_lock(&dev->struct_mutex); 15114 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 15115 mutex_unlock(&dev->struct_mutex); 15116 15117 return 0; 15118 } 15119 15120 static const struct drm_framebuffer_funcs intel_fb_funcs = { 15121 .destroy = intel_user_framebuffer_destroy, 15122 .create_handle = intel_user_framebuffer_create_handle, 15123 .dirty = intel_user_framebuffer_dirty, 15124 }; 15125 15126 static 15127 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 15128 uint32_t pixel_format) 15129 { 15130 u32 gen = INTEL_INFO(dev)->gen; 15131 15132 if (gen >= 9) { 15133 int cpp = drm_format_plane_cpp(pixel_format, 0); 15134 15135 /* "The stride in bytes must not exceed the of the size of 8K 15136 * pixels and 32K bytes." 15137 */ 15138 return min(8192 * cpp, 32768); 15139 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 15140 return 32*1024; 15141 } else if (gen >= 4) { 15142 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 15143 return 16*1024; 15144 else 15145 return 32*1024; 15146 } else if (gen >= 3) { 15147 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 15148 return 8*1024; 15149 else 15150 return 16*1024; 15151 } else { 15152 /* XXX DSPC is limited to 4k tiled */ 15153 return 8*1024; 15154 } 15155 } 15156 15157 static int intel_framebuffer_init(struct drm_device *dev, 15158 struct intel_framebuffer *intel_fb, 15159 struct drm_mode_fb_cmd2 *mode_cmd, 15160 struct drm_i915_gem_object *obj) 15161 { 15162 struct drm_i915_private *dev_priv = to_i915(dev); 15163 unsigned int aligned_height; 15164 int ret; 15165 u32 pitch_limit, stride_alignment; 15166 15167 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 15168 15169 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 15170 /* Enforce that fb modifier and tiling mode match, but only for 15171 * X-tiled. This is needed for FBC. */ 15172 if (!!(obj->tiling_mode == I915_TILING_X) != 15173 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 15174 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 15175 return -EINVAL; 15176 } 15177 } else { 15178 if (obj->tiling_mode == I915_TILING_X) 15179 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 15180 else if (obj->tiling_mode == I915_TILING_Y) { 15181 DRM_DEBUG("No Y tiling for legacy addfb\n"); 15182 return -EINVAL; 15183 } 15184 } 15185 15186 /* Passed in modifier sanity checking. */ 15187 switch (mode_cmd->modifier[0]) { 15188 case I915_FORMAT_MOD_Y_TILED: 15189 case I915_FORMAT_MOD_Yf_TILED: 15190 if (INTEL_INFO(dev)->gen < 9) { 15191 DRM_DEBUG("Unsupported tiling 0x%llx!\n", 15192 mode_cmd->modifier[0]); 15193 return -EINVAL; 15194 } 15195 case DRM_FORMAT_MOD_NONE: 15196 case I915_FORMAT_MOD_X_TILED: 15197 break; 15198 default: 15199 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n", 15200 mode_cmd->modifier[0]); 15201 return -EINVAL; 15202 } 15203 15204 stride_alignment = intel_fb_stride_alignment(dev_priv, 15205 mode_cmd->modifier[0], 15206 mode_cmd->pixel_format); 15207 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 15208 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 15209 mode_cmd->pitches[0], stride_alignment); 15210 return -EINVAL; 15211 } 15212 15213 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 15214 mode_cmd->pixel_format); 15215 if (mode_cmd->pitches[0] > pitch_limit) { 15216 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 15217 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 15218 "tiled" : "linear", 15219 mode_cmd->pitches[0], pitch_limit); 15220 return -EINVAL; 15221 } 15222 15223 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 15224 mode_cmd->pitches[0] != obj->stride) { 15225 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 15226 mode_cmd->pitches[0], obj->stride); 15227 return -EINVAL; 15228 } 15229 15230 /* Reject formats not supported by any plane early. */ 15231 switch (mode_cmd->pixel_format) { 15232 case DRM_FORMAT_C8: 15233 case DRM_FORMAT_RGB565: 15234 case DRM_FORMAT_XRGB8888: 15235 case DRM_FORMAT_ARGB8888: 15236 break; 15237 case DRM_FORMAT_XRGB1555: 15238 if (INTEL_INFO(dev)->gen > 3) { 15239 DRM_DEBUG("unsupported pixel format: %s\n", 15240 drm_get_format_name(mode_cmd->pixel_format)); 15241 return -EINVAL; 15242 } 15243 break; 15244 case DRM_FORMAT_ABGR8888: 15245 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 15246 INTEL_INFO(dev)->gen < 9) { 15247 DRM_DEBUG("unsupported pixel format: %s\n", 15248 drm_get_format_name(mode_cmd->pixel_format)); 15249 return -EINVAL; 15250 } 15251 break; 15252 case DRM_FORMAT_XBGR8888: 15253 case DRM_FORMAT_XRGB2101010: 15254 case DRM_FORMAT_XBGR2101010: 15255 if (INTEL_INFO(dev)->gen < 4) { 15256 DRM_DEBUG("unsupported pixel format: %s\n", 15257 drm_get_format_name(mode_cmd->pixel_format)); 15258 return -EINVAL; 15259 } 15260 break; 15261 case DRM_FORMAT_ABGR2101010: 15262 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 15263 DRM_DEBUG("unsupported pixel format: %s\n", 15264 drm_get_format_name(mode_cmd->pixel_format)); 15265 return -EINVAL; 15266 } 15267 break; 15268 case DRM_FORMAT_YUYV: 15269 case DRM_FORMAT_UYVY: 15270 case DRM_FORMAT_YVYU: 15271 case DRM_FORMAT_VYUY: 15272 if (INTEL_INFO(dev)->gen < 5) { 15273 DRM_DEBUG("unsupported pixel format: %s\n", 15274 drm_get_format_name(mode_cmd->pixel_format)); 15275 return -EINVAL; 15276 } 15277 break; 15278 default: 15279 DRM_DEBUG("unsupported pixel format: %s\n", 15280 drm_get_format_name(mode_cmd->pixel_format)); 15281 return -EINVAL; 15282 } 15283 15284 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 15285 if (mode_cmd->offsets[0] != 0) 15286 return -EINVAL; 15287 15288 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 15289 mode_cmd->pixel_format, 15290 mode_cmd->modifier[0]); 15291 /* FIXME drm helper for size checks (especially planar formats)? */ 15292 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 15293 return -EINVAL; 15294 15295 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 15296 intel_fb->obj = obj; 15297 15298 intel_fill_fb_info(dev_priv, &intel_fb->base); 15299 15300 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 15301 if (ret) { 15302 DRM_ERROR("framebuffer init failed %d\n", ret); 15303 return ret; 15304 } 15305 15306 intel_fb->obj->framebuffer_references++; 15307 15308 return 0; 15309 } 15310 15311 static struct drm_framebuffer * 15312 intel_user_framebuffer_create(struct drm_device *dev, 15313 struct drm_file *filp, 15314 const struct drm_mode_fb_cmd2 *user_mode_cmd) 15315 { 15316 struct drm_framebuffer *fb; 15317 struct drm_i915_gem_object *obj; 15318 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 15319 15320 obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0])); 15321 if (&obj->base == NULL) 15322 return ERR_PTR(-ENOENT); 15323 15324 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 15325 if (IS_ERR(fb)) 15326 drm_gem_object_unreference_unlocked(&obj->base); 15327 15328 return fb; 15329 } 15330 15331 #ifndef CONFIG_DRM_FBDEV_EMULATION 15332 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 15333 { 15334 } 15335 #endif 15336 15337 static const struct drm_mode_config_funcs intel_mode_funcs = { 15338 .fb_create = intel_user_framebuffer_create, 15339 .output_poll_changed = intel_fbdev_output_poll_changed, 15340 .atomic_check = intel_atomic_check, 15341 .atomic_commit = intel_atomic_commit, 15342 .atomic_state_alloc = intel_atomic_state_alloc, 15343 .atomic_state_clear = intel_atomic_state_clear, 15344 }; 15345 15346 /** 15347 * intel_init_display_hooks - initialize the display modesetting hooks 15348 * @dev_priv: device private 15349 */ 15350 void intel_init_display_hooks(struct drm_i915_private *dev_priv) 15351 { 15352 if (INTEL_INFO(dev_priv)->gen >= 9) { 15353 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15354 dev_priv->display.get_initial_plane_config = 15355 skylake_get_initial_plane_config; 15356 dev_priv->display.crtc_compute_clock = 15357 haswell_crtc_compute_clock; 15358 dev_priv->display.crtc_enable = haswell_crtc_enable; 15359 dev_priv->display.crtc_disable = haswell_crtc_disable; 15360 } else if (HAS_DDI(dev_priv)) { 15361 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 15362 dev_priv->display.get_initial_plane_config = 15363 ironlake_get_initial_plane_config; 15364 dev_priv->display.crtc_compute_clock = 15365 haswell_crtc_compute_clock; 15366 dev_priv->display.crtc_enable = haswell_crtc_enable; 15367 dev_priv->display.crtc_disable = haswell_crtc_disable; 15368 } else if (HAS_PCH_SPLIT(dev_priv)) { 15369 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 15370 dev_priv->display.get_initial_plane_config = 15371 ironlake_get_initial_plane_config; 15372 dev_priv->display.crtc_compute_clock = 15373 ironlake_crtc_compute_clock; 15374 dev_priv->display.crtc_enable = ironlake_crtc_enable; 15375 dev_priv->display.crtc_disable = ironlake_crtc_disable; 15376 } else if (IS_CHERRYVIEW(dev_priv)) { 15377 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15378 dev_priv->display.get_initial_plane_config = 15379 i9xx_get_initial_plane_config; 15380 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock; 15381 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15382 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15383 } else if (IS_VALLEYVIEW(dev_priv)) { 15384 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15385 dev_priv->display.get_initial_plane_config = 15386 i9xx_get_initial_plane_config; 15387 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock; 15388 dev_priv->display.crtc_enable = valleyview_crtc_enable; 15389 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15390 } else if (IS_G4X(dev_priv)) { 15391 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15392 dev_priv->display.get_initial_plane_config = 15393 i9xx_get_initial_plane_config; 15394 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock; 15395 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15396 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15397 } else if (IS_PINEVIEW(dev_priv)) { 15398 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15399 dev_priv->display.get_initial_plane_config = 15400 i9xx_get_initial_plane_config; 15401 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock; 15402 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15403 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15404 } else if (!IS_GEN2(dev_priv)) { 15405 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15406 dev_priv->display.get_initial_plane_config = 15407 i9xx_get_initial_plane_config; 15408 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 15409 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15410 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15411 } else { 15412 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 15413 dev_priv->display.get_initial_plane_config = 15414 i9xx_get_initial_plane_config; 15415 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock; 15416 dev_priv->display.crtc_enable = i9xx_crtc_enable; 15417 dev_priv->display.crtc_disable = i9xx_crtc_disable; 15418 } 15419 15420 /* Returns the core display clock speed */ 15421 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 15422 dev_priv->display.get_display_clock_speed = 15423 skylake_get_display_clock_speed; 15424 else if (IS_BROXTON(dev_priv)) 15425 dev_priv->display.get_display_clock_speed = 15426 broxton_get_display_clock_speed; 15427 else if (IS_BROADWELL(dev_priv)) 15428 dev_priv->display.get_display_clock_speed = 15429 broadwell_get_display_clock_speed; 15430 else if (IS_HASWELL(dev_priv)) 15431 dev_priv->display.get_display_clock_speed = 15432 haswell_get_display_clock_speed; 15433 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 15434 dev_priv->display.get_display_clock_speed = 15435 valleyview_get_display_clock_speed; 15436 else if (IS_GEN5(dev_priv)) 15437 dev_priv->display.get_display_clock_speed = 15438 ilk_get_display_clock_speed; 15439 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) || 15440 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 15441 dev_priv->display.get_display_clock_speed = 15442 i945_get_display_clock_speed; 15443 else if (IS_GM45(dev_priv)) 15444 dev_priv->display.get_display_clock_speed = 15445 gm45_get_display_clock_speed; 15446 else if (IS_CRESTLINE(dev_priv)) 15447 dev_priv->display.get_display_clock_speed = 15448 i965gm_get_display_clock_speed; 15449 else if (IS_PINEVIEW(dev_priv)) 15450 dev_priv->display.get_display_clock_speed = 15451 pnv_get_display_clock_speed; 15452 else if (IS_G33(dev_priv) || IS_G4X(dev_priv)) 15453 dev_priv->display.get_display_clock_speed = 15454 g33_get_display_clock_speed; 15455 else if (IS_I915G(dev_priv)) 15456 dev_priv->display.get_display_clock_speed = 15457 i915_get_display_clock_speed; 15458 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv)) 15459 dev_priv->display.get_display_clock_speed = 15460 i9xx_misc_get_display_clock_speed; 15461 else if (IS_I915GM(dev_priv)) 15462 dev_priv->display.get_display_clock_speed = 15463 i915gm_get_display_clock_speed; 15464 else if (IS_I865G(dev_priv)) 15465 dev_priv->display.get_display_clock_speed = 15466 i865_get_display_clock_speed; 15467 else if (IS_I85X(dev_priv)) 15468 dev_priv->display.get_display_clock_speed = 15469 i85x_get_display_clock_speed; 15470 else { /* 830 */ 15471 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n"); 15472 dev_priv->display.get_display_clock_speed = 15473 i830_get_display_clock_speed; 15474 } 15475 15476 if (IS_GEN5(dev_priv)) { 15477 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15478 } else if (IS_GEN6(dev_priv)) { 15479 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15480 } else if (IS_IVYBRIDGE(dev_priv)) { 15481 /* FIXME: detect B0+ stepping and use auto training */ 15482 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15483 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15484 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15485 } 15486 15487 if (IS_BROADWELL(dev_priv)) { 15488 dev_priv->display.modeset_commit_cdclk = 15489 broadwell_modeset_commit_cdclk; 15490 dev_priv->display.modeset_calc_cdclk = 15491 broadwell_modeset_calc_cdclk; 15492 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15493 dev_priv->display.modeset_commit_cdclk = 15494 valleyview_modeset_commit_cdclk; 15495 dev_priv->display.modeset_calc_cdclk = 15496 valleyview_modeset_calc_cdclk; 15497 } else if (IS_BROXTON(dev_priv)) { 15498 dev_priv->display.modeset_commit_cdclk = 15499 bxt_modeset_commit_cdclk; 15500 dev_priv->display.modeset_calc_cdclk = 15501 bxt_modeset_calc_cdclk; 15502 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 15503 dev_priv->display.modeset_commit_cdclk = 15504 skl_modeset_commit_cdclk; 15505 dev_priv->display.modeset_calc_cdclk = 15506 skl_modeset_calc_cdclk; 15507 } 15508 15509 if (dev_priv->info.gen >= 9) 15510 dev_priv->display.update_crtcs = skl_update_crtcs; 15511 else 15512 dev_priv->display.update_crtcs = intel_update_crtcs; 15513 15514 switch (INTEL_INFO(dev_priv)->gen) { 15515 case 2: 15516 dev_priv->display.queue_flip = intel_gen2_queue_flip; 15517 break; 15518 15519 case 3: 15520 dev_priv->display.queue_flip = intel_gen3_queue_flip; 15521 break; 15522 15523 case 4: 15524 case 5: 15525 dev_priv->display.queue_flip = intel_gen4_queue_flip; 15526 break; 15527 15528 case 6: 15529 dev_priv->display.queue_flip = intel_gen6_queue_flip; 15530 break; 15531 case 7: 15532 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 15533 dev_priv->display.queue_flip = intel_gen7_queue_flip; 15534 break; 15535 case 9: 15536 /* Drop through - unsupported since execlist only. */ 15537 default: 15538 /* Default just returns -ENODEV to indicate unsupported */ 15539 dev_priv->display.queue_flip = intel_default_queue_flip; 15540 } 15541 } 15542 15543 /* 15544 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 15545 * resume, or other times. This quirk makes sure that's the case for 15546 * affected systems. 15547 */ 15548 static void quirk_pipea_force(struct drm_device *dev) 15549 { 15550 struct drm_i915_private *dev_priv = to_i915(dev); 15551 15552 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15553 DRM_INFO("applying pipe a force quirk\n"); 15554 } 15555 15556 static void quirk_pipeb_force(struct drm_device *dev) 15557 { 15558 struct drm_i915_private *dev_priv = to_i915(dev); 15559 15560 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15561 DRM_INFO("applying pipe b force quirk\n"); 15562 } 15563 15564 /* 15565 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 15566 */ 15567 static void quirk_ssc_force_disable(struct drm_device *dev) 15568 { 15569 struct drm_i915_private *dev_priv = to_i915(dev); 15570 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15571 DRM_INFO("applying lvds SSC disable quirk\n"); 15572 } 15573 15574 /* 15575 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 15576 * brightness value 15577 */ 15578 static void quirk_invert_brightness(struct drm_device *dev) 15579 { 15580 struct drm_i915_private *dev_priv = to_i915(dev); 15581 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15582 DRM_INFO("applying inverted panel brightness quirk\n"); 15583 } 15584 15585 /* Some VBT's incorrectly indicate no backlight is present */ 15586 static void quirk_backlight_present(struct drm_device *dev) 15587 { 15588 struct drm_i915_private *dev_priv = to_i915(dev); 15589 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15590 DRM_INFO("applying backlight present quirk\n"); 15591 } 15592 15593 struct intel_quirk { 15594 int device; 15595 int subsystem_vendor; 15596 int subsystem_device; 15597 void (*hook)(struct drm_device *dev); 15598 }; 15599 15600 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 15601 struct intel_dmi_quirk { 15602 void (*hook)(struct drm_device *dev); 15603 const struct dmi_system_id (*dmi_id_list)[]; 15604 }; 15605 15606 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 15607 { 15608 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 15609 return 1; 15610 } 15611 15612 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 15613 { 15614 .dmi_id_list = &(const struct dmi_system_id[]) { 15615 { 15616 .callback = intel_dmi_reverse_brightness, 15617 .ident = "NCR Corporation", 15618 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 15619 DMI_MATCH(DMI_PRODUCT_NAME, ""), 15620 }, 15621 }, 15622 { } /* terminating entry */ 15623 }, 15624 .hook = quirk_invert_brightness, 15625 }, 15626 }; 15627 15628 static struct intel_quirk intel_quirks[] = { 15629 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 15630 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 15631 15632 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 15633 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 15634 15635 /* 830 needs to leave pipe A & dpll A up */ 15636 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 15637 15638 /* 830 needs to leave pipe B & dpll B up */ 15639 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 15640 15641 /* Lenovo U160 cannot use SSC on LVDS */ 15642 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 15643 15644 /* Sony Vaio Y cannot use SSC on LVDS */ 15645 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 15646 15647 /* Acer Aspire 5734Z must invert backlight brightness */ 15648 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 15649 15650 /* Acer/eMachines G725 */ 15651 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 15652 15653 /* Acer/eMachines e725 */ 15654 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 15655 15656 /* Acer/Packard Bell NCL20 */ 15657 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 15658 15659 /* Acer Aspire 4736Z */ 15660 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 15661 15662 /* Acer Aspire 5336 */ 15663 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 15664 15665 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 15666 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 15667 15668 /* Acer C720 Chromebook (Core i3 4005U) */ 15669 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 15670 15671 /* Apple Macbook 2,1 (Core 2 T7400) */ 15672 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 15673 15674 /* Apple Macbook 4,1 */ 15675 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 15676 15677 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 15678 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 15679 15680 /* HP Chromebook 14 (Celeron 2955U) */ 15681 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 15682 15683 /* Dell Chromebook 11 */ 15684 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 15685 15686 /* Dell Chromebook 11 (2015 version) */ 15687 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 15688 }; 15689 15690 static void intel_init_quirks(struct drm_device *dev) 15691 { 15692 struct pci_dev *d = dev->pdev; 15693 int i; 15694 15695 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 15696 struct intel_quirk *q = &intel_quirks[i]; 15697 15698 if (d->device == q->device && 15699 (d->subsystem_vendor == q->subsystem_vendor || 15700 q->subsystem_vendor == PCI_ANY_ID) && 15701 (d->subsystem_device == q->subsystem_device || 15702 q->subsystem_device == PCI_ANY_ID)) 15703 q->hook(dev); 15704 } 15705 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 15706 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 15707 intel_dmi_quirks[i].hook(dev); 15708 } 15709 } 15710 15711 /* Disable the VGA plane that we never use */ 15712 static void i915_disable_vga(struct drm_device *dev) 15713 { 15714 struct drm_i915_private *dev_priv = to_i915(dev); 15715 u8 sr1; 15716 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15717 15718 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15719 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15720 outb(VGA_SR_INDEX, SR01); 15721 sr1 = inb(VGA_SR_DATA); 15722 outb(VGA_SR_DATA, sr1 | 1 << 5); 15723 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 15724 udelay(300); 15725 15726 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15727 POSTING_READ(vga_reg); 15728 } 15729 15730 void intel_modeset_init_hw(struct drm_device *dev) 15731 { 15732 struct drm_i915_private *dev_priv = to_i915(dev); 15733 15734 intel_update_cdclk(dev); 15735 15736 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15737 15738 intel_init_clock_gating(dev); 15739 intel_enable_gt_powersave(dev_priv); 15740 } 15741 15742 /* 15743 * Calculate what we think the watermarks should be for the state we've read 15744 * out of the hardware and then immediately program those watermarks so that 15745 * we ensure the hardware settings match our internal state. 15746 * 15747 * We can calculate what we think WM's should be by creating a duplicate of the 15748 * current state (which was constructed during hardware readout) and running it 15749 * through the atomic check code to calculate new watermark values in the 15750 * state object. 15751 */ 15752 static void sanitize_watermarks(struct drm_device *dev) 15753 { 15754 struct drm_i915_private *dev_priv = to_i915(dev); 15755 struct drm_atomic_state *state; 15756 struct drm_crtc *crtc; 15757 struct drm_crtc_state *cstate; 15758 struct drm_modeset_acquire_ctx ctx; 15759 int ret; 15760 int i; 15761 15762 /* Only supported on platforms that use atomic watermark design */ 15763 if (!dev_priv->display.optimize_watermarks) 15764 return; 15765 15766 /* 15767 * We need to hold connection_mutex before calling duplicate_state so 15768 * that the connector loop is protected. 15769 */ 15770 drm_modeset_acquire_init(&ctx, 0); 15771 retry: 15772 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15773 if (ret == -EDEADLK) { 15774 drm_modeset_backoff(&ctx); 15775 goto retry; 15776 } else if (WARN_ON(ret)) { 15777 goto fail; 15778 } 15779 15780 state = drm_atomic_helper_duplicate_state(dev, &ctx); 15781 if (WARN_ON(IS_ERR(state))) 15782 goto fail; 15783 15784 /* 15785 * Hardware readout is the only time we don't want to calculate 15786 * intermediate watermarks (since we don't trust the current 15787 * watermarks). 15788 */ 15789 to_intel_atomic_state(state)->skip_intermediate_wm = true; 15790 15791 ret = intel_atomic_check(dev, state); 15792 if (ret) { 15793 /* 15794 * If we fail here, it means that the hardware appears to be 15795 * programmed in a way that shouldn't be possible, given our 15796 * understanding of watermark requirements. This might mean a 15797 * mistake in the hardware readout code or a mistake in the 15798 * watermark calculations for a given platform. Raise a WARN 15799 * so that this is noticeable. 15800 * 15801 * If this actually happens, we'll have to just leave the 15802 * BIOS-programmed watermarks untouched and hope for the best. 15803 */ 15804 WARN(true, "Could not determine valid watermarks for inherited state\n"); 15805 goto fail; 15806 } 15807 15808 /* Write calculated watermark values back */ 15809 for_each_crtc_in_state(state, crtc, cstate, i) { 15810 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15811 15812 cs->wm.need_postvbl_update = true; 15813 dev_priv->display.optimize_watermarks(cs); 15814 } 15815 15816 drm_atomic_state_free(state); 15817 fail: 15818 drm_modeset_drop_locks(&ctx); 15819 drm_modeset_acquire_fini(&ctx); 15820 } 15821 15822 void intel_modeset_init(struct drm_device *dev) 15823 { 15824 struct drm_i915_private *dev_priv = to_i915(dev); 15825 struct i915_ggtt *ggtt = &dev_priv->ggtt; 15826 int sprite, ret; 15827 enum i915_pipe pipe; 15828 struct intel_crtc *crtc; 15829 15830 drm_mode_config_init(dev); 15831 15832 dev->mode_config.min_width = 0; 15833 dev->mode_config.min_height = 0; 15834 15835 dev->mode_config.preferred_depth = 24; 15836 dev->mode_config.prefer_shadow = 1; 15837 15838 dev->mode_config.allow_fb_modifiers = true; 15839 15840 dev->mode_config.funcs = &intel_mode_funcs; 15841 15842 intel_init_quirks(dev); 15843 15844 intel_init_pm(dev); 15845 15846 if (INTEL_INFO(dev)->num_pipes == 0) 15847 return; 15848 15849 /* 15850 * There may be no VBT; and if the BIOS enabled SSC we can 15851 * just keep using it to avoid unnecessary flicker. Whereas if the 15852 * BIOS isn't using it, don't assume it will work even if the VBT 15853 * indicates as much. 15854 */ 15855 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 15856 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15857 DREF_SSC1_ENABLE); 15858 15859 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15860 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15861 bios_lvds_use_ssc ? "en" : "dis", 15862 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15863 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15864 } 15865 } 15866 15867 if (IS_GEN2(dev)) { 15868 dev->mode_config.max_width = 2048; 15869 dev->mode_config.max_height = 2048; 15870 } else if (IS_GEN3(dev)) { 15871 dev->mode_config.max_width = 4096; 15872 dev->mode_config.max_height = 4096; 15873 } else { 15874 dev->mode_config.max_width = 8192; 15875 dev->mode_config.max_height = 8192; 15876 } 15877 15878 if (IS_845G(dev) || IS_I865G(dev)) { 15879 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 15880 dev->mode_config.cursor_height = 1023; 15881 } else if (IS_GEN2(dev)) { 15882 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15883 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15884 } else { 15885 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15886 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15887 } 15888 15889 dev->mode_config.fb_base = ggtt->mappable_base; 15890 15891 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15892 INTEL_INFO(dev)->num_pipes, 15893 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 15894 15895 for_each_pipe(dev_priv, pipe) { 15896 intel_crtc_init(dev, pipe); 15897 for_each_sprite(dev_priv, pipe, sprite) { 15898 ret = intel_plane_init(dev, pipe, sprite); 15899 if (ret) 15900 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 15901 pipe_name(pipe), sprite_name(pipe, sprite), ret); 15902 } 15903 } 15904 15905 intel_update_czclk(dev_priv); 15906 intel_update_cdclk(dev); 15907 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15908 15909 intel_shared_dpll_init(dev); 15910 15911 if (dev_priv->max_cdclk_freq == 0) 15912 intel_update_max_cdclk(dev); 15913 15914 /* Just disable it once at startup */ 15915 i915_disable_vga(dev); 15916 intel_setup_outputs(dev); 15917 15918 drm_modeset_lock_all(dev); 15919 intel_modeset_setup_hw_state(dev); 15920 drm_modeset_unlock_all(dev); 15921 15922 for_each_intel_crtc(dev, crtc) { 15923 struct intel_initial_plane_config plane_config = {}; 15924 15925 if (!crtc->active) 15926 continue; 15927 15928 /* 15929 * Note that reserving the BIOS fb up front prevents us 15930 * from stuffing other stolen allocations like the ring 15931 * on top. This prevents some ugliness at boot time, and 15932 * can even allow for smooth boot transitions if the BIOS 15933 * fb is large enough for the active pipe configuration. 15934 */ 15935 dev_priv->display.get_initial_plane_config(crtc, 15936 &plane_config); 15937 15938 /* 15939 * If the fb is shared between multiple heads, we'll 15940 * just get the first one. 15941 */ 15942 intel_find_initial_plane_obj(crtc, &plane_config); 15943 } 15944 15945 /* 15946 * Make sure hardware watermarks really match the state we read out. 15947 * Note that we need to do this after reconstructing the BIOS fb's 15948 * since the watermark calculation done here will use pstate->fb. 15949 */ 15950 sanitize_watermarks(dev); 15951 } 15952 15953 static void intel_enable_pipe_a(struct drm_device *dev) 15954 { 15955 struct intel_connector *connector; 15956 struct drm_connector *crt = NULL; 15957 struct intel_load_detect_pipe load_detect_temp; 15958 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15959 15960 /* We can't just switch on the pipe A, we need to set things up with a 15961 * proper mode and output configuration. As a gross hack, enable pipe A 15962 * by enabling the load detect pipe once. */ 15963 for_each_intel_connector(dev, connector) { 15964 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15965 crt = &connector->base; 15966 break; 15967 } 15968 } 15969 15970 if (!crt) 15971 return; 15972 15973 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15974 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15975 } 15976 15977 static bool 15978 intel_check_plane_mapping(struct intel_crtc *crtc) 15979 { 15980 struct drm_device *dev = crtc->base.dev; 15981 struct drm_i915_private *dev_priv = to_i915(dev); 15982 u32 val; 15983 15984 if (INTEL_INFO(dev)->num_pipes == 1) 15985 return true; 15986 15987 val = I915_READ(DSPCNTR(!crtc->plane)); 15988 15989 if ((val & DISPLAY_PLANE_ENABLE) && 15990 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15991 return false; 15992 15993 return true; 15994 } 15995 15996 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15997 { 15998 struct drm_device *dev = crtc->base.dev; 15999 struct intel_encoder *encoder; 16000 16001 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 16002 return true; 16003 16004 return false; 16005 } 16006 16007 static bool intel_encoder_has_connectors(struct intel_encoder *encoder) 16008 { 16009 struct drm_device *dev = encoder->base.dev; 16010 struct intel_connector *connector; 16011 16012 for_each_connector_on_encoder(dev, &encoder->base, connector) 16013 return true; 16014 16015 return false; 16016 } 16017 16018 static void intel_sanitize_crtc(struct intel_crtc *crtc) 16019 { 16020 struct drm_device *dev = crtc->base.dev; 16021 struct drm_i915_private *dev_priv = to_i915(dev); 16022 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 16023 16024 /* Clear any frame start delays used for debugging left by the BIOS */ 16025 if (!transcoder_is_dsi(cpu_transcoder)) { 16026 i915_reg_t reg = PIPECONF(cpu_transcoder); 16027 16028 I915_WRITE(reg, 16029 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 16030 } 16031 16032 /* restore vblank interrupts to correct state */ 16033 drm_crtc_vblank_reset(&crtc->base); 16034 if (crtc->active) { 16035 struct intel_plane *plane; 16036 16037 drm_crtc_vblank_on(&crtc->base); 16038 16039 /* Disable everything but the primary plane */ 16040 for_each_intel_plane_on_crtc(dev, crtc, plane) { 16041 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 16042 continue; 16043 16044 plane->disable_plane(&plane->base, &crtc->base); 16045 } 16046 } 16047 16048 /* We need to sanitize the plane -> pipe mapping first because this will 16049 * disable the crtc (and hence change the state) if it is wrong. Note 16050 * that gen4+ has a fixed plane -> pipe mapping. */ 16051 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 16052 bool plane; 16053 16054 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n", 16055 crtc->base.base.id, crtc->base.name); 16056 16057 /* Pipe has the wrong plane attached and the plane is active. 16058 * Temporarily change the plane mapping and disable everything 16059 * ... */ 16060 plane = crtc->plane; 16061 to_intel_plane_state(crtc->base.primary->state)->visible = true; 16062 crtc->plane = !plane; 16063 intel_crtc_disable_noatomic(&crtc->base); 16064 crtc->plane = plane; 16065 } 16066 16067 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 16068 crtc->pipe == PIPE_A && !crtc->active) { 16069 /* BIOS forgot to enable pipe A, this mostly happens after 16070 * resume. Force-enable the pipe to fix this, the update_dpms 16071 * call below we restore the pipe to the right state, but leave 16072 * the required bits on. */ 16073 intel_enable_pipe_a(dev); 16074 } 16075 16076 /* Adjust the state of the output pipe according to whether we 16077 * have active connectors/encoders. */ 16078 if (crtc->active && !intel_crtc_has_encoders(crtc)) 16079 intel_crtc_disable_noatomic(&crtc->base); 16080 16081 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 16082 /* 16083 * We start out with underrun reporting disabled to avoid races. 16084 * For correct bookkeeping mark this on active crtcs. 16085 * 16086 * Also on gmch platforms we dont have any hardware bits to 16087 * disable the underrun reporting. Which means we need to start 16088 * out with underrun reporting disabled also on inactive pipes, 16089 * since otherwise we'll complain about the garbage we read when 16090 * e.g. coming up after runtime pm. 16091 * 16092 * No protection against concurrent access is required - at 16093 * worst a fifo underrun happens which also sets this to false. 16094 */ 16095 crtc->cpu_fifo_underrun_disabled = true; 16096 crtc->pch_fifo_underrun_disabled = true; 16097 } 16098 } 16099 16100 static void intel_sanitize_encoder(struct intel_encoder *encoder) 16101 { 16102 struct intel_connector *connector; 16103 struct drm_device *dev = encoder->base.dev; 16104 16105 /* We need to check both for a crtc link (meaning that the 16106 * encoder is active and trying to read from a pipe) and the 16107 * pipe itself being active. */ 16108 bool has_active_crtc = encoder->base.crtc && 16109 to_intel_crtc(encoder->base.crtc)->active; 16110 16111 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) { 16112 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 16113 encoder->base.base.id, 16114 encoder->base.name); 16115 16116 /* Connector is active, but has no active pipe. This is 16117 * fallout from our resume register restoring. Disable 16118 * the encoder manually again. */ 16119 if (encoder->base.crtc) { 16120 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 16121 encoder->base.base.id, 16122 encoder->base.name); 16123 encoder->disable(encoder); 16124 if (encoder->post_disable) 16125 encoder->post_disable(encoder); 16126 } 16127 encoder->base.crtc = NULL; 16128 16129 /* Inconsistent output/port/pipe state happens presumably due to 16130 * a bug in one of the get_hw_state functions. Or someplace else 16131 * in our code, like the register restore mess on resume. Clamp 16132 * things to off as a safer default. */ 16133 for_each_intel_connector(dev, connector) { 16134 if (connector->encoder != encoder) 16135 continue; 16136 connector->base.dpms = DRM_MODE_DPMS_OFF; 16137 connector->base.encoder = NULL; 16138 } 16139 } 16140 /* Enabled encoders without active connectors will be fixed in 16141 * the crtc fixup. */ 16142 } 16143 16144 void i915_redisable_vga_power_on(struct drm_device *dev) 16145 { 16146 struct drm_i915_private *dev_priv = to_i915(dev); 16147 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 16148 16149 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 16150 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 16151 i915_disable_vga(dev); 16152 } 16153 } 16154 16155 void i915_redisable_vga(struct drm_device *dev) 16156 { 16157 struct drm_i915_private *dev_priv = to_i915(dev); 16158 16159 /* This function can be called both from intel_modeset_setup_hw_state or 16160 * at a very early point in our resume sequence, where the power well 16161 * structures are not yet restored. Since this function is at a very 16162 * paranoid "someone might have enabled VGA while we were not looking" 16163 * level, just check if the power well is enabled instead of trying to 16164 * follow the "don't touch the power well if we don't need it" policy 16165 * the rest of the driver uses. */ 16166 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 16167 return; 16168 16169 i915_redisable_vga_power_on(dev); 16170 16171 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 16172 } 16173 16174 static bool primary_get_hw_state(struct intel_plane *plane) 16175 { 16176 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 16177 16178 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 16179 } 16180 16181 /* FIXME read out full plane state for all planes */ 16182 static void readout_plane_state(struct intel_crtc *crtc) 16183 { 16184 struct drm_plane *primary = crtc->base.primary; 16185 struct intel_plane_state *plane_state = 16186 to_intel_plane_state(primary->state); 16187 16188 plane_state->visible = crtc->active && 16189 primary_get_hw_state(to_intel_plane(primary)); 16190 16191 if (plane_state->visible) 16192 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 16193 } 16194 16195 static void intel_modeset_readout_hw_state(struct drm_device *dev) 16196 { 16197 struct drm_i915_private *dev_priv = to_i915(dev); 16198 enum i915_pipe pipe; 16199 struct intel_crtc *crtc; 16200 struct intel_encoder *encoder; 16201 struct intel_connector *connector; 16202 int i; 16203 16204 dev_priv->active_crtcs = 0; 16205 16206 for_each_intel_crtc(dev, crtc) { 16207 struct intel_crtc_state *crtc_state = crtc->config; 16208 int pixclk = 0; 16209 16210 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base); 16211 memset(crtc_state, 0, sizeof(*crtc_state)); 16212 crtc_state->base.crtc = &crtc->base; 16213 16214 crtc_state->base.active = crtc_state->base.enable = 16215 dev_priv->display.get_pipe_config(crtc, crtc_state); 16216 16217 crtc->base.enabled = crtc_state->base.enable; 16218 crtc->active = crtc_state->base.active; 16219 16220 if (crtc_state->base.active) { 16221 dev_priv->active_crtcs |= 1 << crtc->pipe; 16222 16223 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) 16224 pixclk = ilk_pipe_pixel_rate(crtc_state); 16225 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 16226 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 16227 else 16228 WARN_ON(dev_priv->display.modeset_calc_cdclk); 16229 16230 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 16231 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 16232 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 16233 } 16234 16235 dev_priv->min_pixclk[crtc->pipe] = pixclk; 16236 16237 readout_plane_state(crtc); 16238 16239 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", 16240 crtc->base.base.id, crtc->base.name, 16241 crtc->active ? "enabled" : "disabled"); 16242 } 16243 16244 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16245 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16246 16247 pll->on = pll->funcs.get_hw_state(dev_priv, pll, 16248 &pll->config.hw_state); 16249 pll->config.crtc_mask = 0; 16250 for_each_intel_crtc(dev, crtc) { 16251 if (crtc->active && crtc->config->shared_dpll == pll) 16252 pll->config.crtc_mask |= 1 << crtc->pipe; 16253 } 16254 pll->active_mask = pll->config.crtc_mask; 16255 16256 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 16257 pll->name, pll->config.crtc_mask, pll->on); 16258 } 16259 16260 for_each_intel_encoder(dev, encoder) { 16261 pipe = 0; 16262 16263 if (encoder->get_hw_state(encoder, &pipe)) { 16264 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 16265 encoder->base.crtc = &crtc->base; 16266 crtc->config->output_types |= 1 << encoder->type; 16267 encoder->get_config(encoder, crtc->config); 16268 } else { 16269 encoder->base.crtc = NULL; 16270 } 16271 16272 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 16273 encoder->base.base.id, 16274 encoder->base.name, 16275 encoder->base.crtc ? "enabled" : "disabled", 16276 pipe_name(pipe)); 16277 } 16278 16279 for_each_intel_connector(dev, connector) { 16280 if (connector->get_hw_state(connector)) { 16281 connector->base.dpms = DRM_MODE_DPMS_ON; 16282 16283 encoder = connector->encoder; 16284 connector->base.encoder = &encoder->base; 16285 16286 if (encoder->base.crtc && 16287 encoder->base.crtc->state->active) { 16288 /* 16289 * This has to be done during hardware readout 16290 * because anything calling .crtc_disable may 16291 * rely on the connector_mask being accurate. 16292 */ 16293 encoder->base.crtc->state->connector_mask |= 16294 1 << drm_connector_index(&connector->base); 16295 encoder->base.crtc->state->encoder_mask |= 16296 1 << drm_encoder_index(&encoder->base); 16297 } 16298 16299 } else { 16300 connector->base.dpms = DRM_MODE_DPMS_OFF; 16301 connector->base.encoder = NULL; 16302 } 16303 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 16304 connector->base.base.id, 16305 connector->base.name, 16306 connector->base.encoder ? "enabled" : "disabled"); 16307 } 16308 16309 for_each_intel_crtc(dev, crtc) { 16310 crtc->base.hwmode = crtc->config->base.adjusted_mode; 16311 16312 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 16313 if (crtc->base.state->active) { 16314 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 16315 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 16316 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 16317 16318 /* 16319 * The initial mode needs to be set in order to keep 16320 * the atomic core happy. It wants a valid mode if the 16321 * crtc's enabled, so we do the above call. 16322 * 16323 * At this point some state updated by the connectors 16324 * in their ->detect() callback has not run yet, so 16325 * no recalculation can be done yet. 16326 * 16327 * Even if we could do a recalculation and modeset 16328 * right now it would cause a double modeset if 16329 * fbdev or userspace chooses a different initial mode. 16330 * 16331 * If that happens, someone indicated they wanted a 16332 * mode change, which means it's safe to do a full 16333 * recalculation. 16334 */ 16335 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 16336 16337 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 16338 update_scanline_offset(crtc); 16339 } 16340 16341 intel_pipe_config_sanity_check(dev_priv, crtc->config); 16342 } 16343 } 16344 16345 /* Scan out the current hw modeset state, 16346 * and sanitizes it to the current state 16347 */ 16348 static void 16349 intel_modeset_setup_hw_state(struct drm_device *dev) 16350 { 16351 struct drm_i915_private *dev_priv = to_i915(dev); 16352 enum i915_pipe pipe; 16353 struct intel_crtc *crtc; 16354 struct intel_encoder *encoder; 16355 int i; 16356 16357 intel_modeset_readout_hw_state(dev); 16358 16359 /* HW state is read out, now we need to sanitize this mess. */ 16360 for_each_intel_encoder(dev, encoder) { 16361 intel_sanitize_encoder(encoder); 16362 } 16363 16364 for_each_pipe(dev_priv, pipe) { 16365 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 16366 intel_sanitize_crtc(crtc); 16367 intel_dump_pipe_config(crtc, crtc->config, 16368 "[setup_hw_state]"); 16369 } 16370 16371 intel_modeset_update_connector_atomic_state(dev); 16372 16373 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 16374 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 16375 16376 if (!pll->on || pll->active_mask) 16377 continue; 16378 16379 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 16380 16381 pll->funcs.disable(dev_priv, pll); 16382 pll->on = false; 16383 } 16384 16385 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 16386 vlv_wm_get_hw_state(dev); 16387 else if (IS_GEN9(dev)) 16388 skl_wm_get_hw_state(dev); 16389 else if (HAS_PCH_SPLIT(dev)) 16390 ilk_wm_get_hw_state(dev); 16391 16392 for_each_intel_crtc(dev, crtc) { 16393 unsigned long put_domains; 16394 16395 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 16396 if (WARN_ON(put_domains)) 16397 modeset_put_power_domains(dev_priv, put_domains); 16398 } 16399 intel_display_set_init_power(dev_priv, false); 16400 16401 intel_fbc_init_pipe_state(dev_priv); 16402 } 16403 16404 void intel_display_resume(struct drm_device *dev) 16405 { 16406 struct drm_i915_private *dev_priv = to_i915(dev); 16407 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 16408 struct drm_modeset_acquire_ctx ctx; 16409 int ret; 16410 16411 dev_priv->modeset_restore_state = NULL; 16412 if (state) 16413 state->acquire_ctx = &ctx; 16414 16415 /* 16416 * This is a cludge because with real atomic modeset mode_config.mutex 16417 * won't be taken. Unfortunately some probed state like 16418 * audio_codec_enable is still protected by mode_config.mutex, so lock 16419 * it here for now. 16420 */ 16421 mutex_lock(&dev->mode_config.mutex); 16422 drm_modeset_acquire_init(&ctx, 0); 16423 16424 while (1) { 16425 ret = drm_modeset_lock_all_ctx(dev, &ctx); 16426 if (ret != -EDEADLK) 16427 break; 16428 16429 drm_modeset_backoff(&ctx); 16430 } 16431 16432 if (!ret) 16433 ret = __intel_display_resume(dev, state); 16434 16435 drm_modeset_drop_locks(&ctx); 16436 drm_modeset_acquire_fini(&ctx); 16437 mutex_unlock(&dev->mode_config.mutex); 16438 16439 if (ret) { 16440 DRM_ERROR("Restoring old state failed with %i\n", ret); 16441 drm_atomic_state_free(state); 16442 } 16443 } 16444 16445 void intel_modeset_gem_init(struct drm_device *dev) 16446 { 16447 struct drm_i915_private *dev_priv = to_i915(dev); 16448 struct drm_crtc *c; 16449 struct drm_i915_gem_object *obj; 16450 int ret; 16451 16452 intel_init_gt_powersave(dev_priv); 16453 16454 intel_modeset_init_hw(dev); 16455 16456 intel_setup_overlay(dev_priv); 16457 16458 /* 16459 * Make sure any fbs we allocated at startup are properly 16460 * pinned & fenced. When we do the allocation it's too early 16461 * for this. 16462 */ 16463 for_each_crtc(dev, c) { 16464 obj = intel_fb_obj(c->primary->fb); 16465 if (obj == NULL) 16466 continue; 16467 16468 mutex_lock(&dev->struct_mutex); 16469 ret = intel_pin_and_fence_fb_obj(c->primary->fb, 16470 c->primary->state->rotation); 16471 mutex_unlock(&dev->struct_mutex); 16472 if (ret) { 16473 DRM_ERROR("failed to pin boot fb on pipe %d\n", 16474 to_intel_crtc(c)->pipe); 16475 drm_framebuffer_unreference(c->primary->fb); 16476 c->primary->fb = NULL; 16477 c->primary->crtc = c->primary->state->crtc = NULL; 16478 update_state_fb(c->primary); 16479 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16480 } 16481 } 16482 } 16483 16484 int intel_connector_register(struct drm_connector *connector) 16485 { 16486 struct intel_connector *intel_connector = to_intel_connector(connector); 16487 int ret; 16488 16489 ret = intel_backlight_device_register(intel_connector); 16490 if (ret) 16491 goto err; 16492 16493 return 0; 16494 16495 err: 16496 return ret; 16497 } 16498 16499 void intel_connector_unregister(struct drm_connector *connector) 16500 { 16501 struct intel_connector *intel_connector = to_intel_connector(connector); 16502 16503 intel_backlight_device_unregister(intel_connector); 16504 intel_panel_destroy_backlight(connector); 16505 } 16506 16507 void intel_modeset_cleanup(struct drm_device *dev) 16508 { 16509 struct drm_i915_private *dev_priv = to_i915(dev); 16510 16511 intel_disable_gt_powersave(dev_priv); 16512 16513 /* 16514 * Interrupts and polling as the first thing to avoid creating havoc. 16515 * Too much stuff here (turning of connectors, ...) would 16516 * experience fancy races otherwise. 16517 */ 16518 intel_irq_uninstall(dev_priv); 16519 16520 /* 16521 * Due to the hpd irq storm handling the hotplug work can re-arm the 16522 * poll handlers. Hence disable polling after hpd handling is shut down. 16523 */ 16524 drm_kms_helper_poll_fini(dev); 16525 16526 intel_unregister_dsm_handler(); 16527 16528 intel_fbc_global_disable(dev_priv); 16529 16530 /* flush any delayed tasks or pending work */ 16531 flush_scheduled_work(); 16532 16533 drm_mode_config_cleanup(dev); 16534 16535 intel_cleanup_overlay(dev_priv); 16536 16537 intel_cleanup_gt_powersave(dev_priv); 16538 16539 intel_teardown_gmbus(dev); 16540 } 16541 16542 void intel_connector_attach_encoder(struct intel_connector *connector, 16543 struct intel_encoder *encoder) 16544 { 16545 connector->encoder = encoder; 16546 drm_mode_connector_attach_encoder(&connector->base, 16547 &encoder->base); 16548 } 16549 16550 /* 16551 * set vga decode state - true == enable VGA decode 16552 */ 16553 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 16554 { 16555 struct drm_i915_private *dev_priv = to_i915(dev); 16556 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 16557 u16 gmch_ctrl; 16558 16559 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 16560 DRM_ERROR("failed to read control word\n"); 16561 return -EIO; 16562 } 16563 16564 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 16565 return 0; 16566 16567 if (state) 16568 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 16569 else 16570 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 16571 16572 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 16573 DRM_ERROR("failed to write control word\n"); 16574 return -EIO; 16575 } 16576 16577 return 0; 16578 } 16579 16580 struct intel_display_error_state { 16581 16582 u32 power_well_driver; 16583 16584 int num_transcoders; 16585 16586 struct intel_cursor_error_state { 16587 u32 control; 16588 u32 position; 16589 u32 base; 16590 u32 size; 16591 } cursor[I915_MAX_PIPES]; 16592 16593 struct intel_pipe_error_state { 16594 bool power_domain_on; 16595 u32 source; 16596 u32 stat; 16597 } pipe[I915_MAX_PIPES]; 16598 16599 struct intel_plane_error_state { 16600 u32 control; 16601 u32 stride; 16602 u32 size; 16603 u32 pos; 16604 u32 addr; 16605 u32 surface; 16606 u32 tile_offset; 16607 } plane[I915_MAX_PIPES]; 16608 16609 struct intel_transcoder_error_state { 16610 bool power_domain_on; 16611 enum transcoder cpu_transcoder; 16612 16613 u32 conf; 16614 16615 u32 htotal; 16616 u32 hblank; 16617 u32 hsync; 16618 u32 vtotal; 16619 u32 vblank; 16620 u32 vsync; 16621 } transcoder[4]; 16622 }; 16623 16624 struct intel_display_error_state * 16625 intel_display_capture_error_state(struct drm_i915_private *dev_priv) 16626 { 16627 struct intel_display_error_state *error; 16628 int transcoders[] = { 16629 TRANSCODER_A, 16630 TRANSCODER_B, 16631 TRANSCODER_C, 16632 TRANSCODER_EDP, 16633 }; 16634 int i; 16635 16636 if (INTEL_INFO(dev_priv)->num_pipes == 0) 16637 return NULL; 16638 16639 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16640 if (error == NULL) 16641 return NULL; 16642 16643 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 16644 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16645 16646 for_each_pipe(dev_priv, i) { 16647 error->pipe[i].power_domain_on = 16648 __intel_display_power_is_enabled(dev_priv, 16649 POWER_DOMAIN_PIPE(i)); 16650 if (!error->pipe[i].power_domain_on) 16651 continue; 16652 16653 error->cursor[i].control = I915_READ(CURCNTR(i)); 16654 error->cursor[i].position = I915_READ(CURPOS(i)); 16655 error->cursor[i].base = I915_READ(CURBASE(i)); 16656 16657 error->plane[i].control = I915_READ(DSPCNTR(i)); 16658 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16659 if (INTEL_GEN(dev_priv) <= 3) { 16660 error->plane[i].size = I915_READ(DSPSIZE(i)); 16661 error->plane[i].pos = I915_READ(DSPPOS(i)); 16662 } 16663 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv)) 16664 error->plane[i].addr = I915_READ(DSPADDR(i)); 16665 if (INTEL_GEN(dev_priv) >= 4) { 16666 error->plane[i].surface = I915_READ(DSPSURF(i)); 16667 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16668 } 16669 16670 error->pipe[i].source = I915_READ(PIPESRC(i)); 16671 16672 if (HAS_GMCH_DISPLAY(dev_priv)) 16673 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16674 } 16675 16676 /* Note: this does not include DSI transcoders. */ 16677 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes; 16678 if (HAS_DDI(dev_priv)) 16679 error->num_transcoders++; /* Account for eDP. */ 16680 16681 for (i = 0; i < error->num_transcoders; i++) { 16682 enum transcoder cpu_transcoder = transcoders[i]; 16683 16684 error->transcoder[i].power_domain_on = 16685 __intel_display_power_is_enabled(dev_priv, 16686 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 16687 if (!error->transcoder[i].power_domain_on) 16688 continue; 16689 16690 error->transcoder[i].cpu_transcoder = cpu_transcoder; 16691 16692 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 16693 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 16694 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 16695 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 16696 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 16697 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 16698 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 16699 } 16700 16701 return error; 16702 } 16703 16704 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 16705 16706 void 16707 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 16708 struct drm_device *dev, 16709 struct intel_display_error_state *error) 16710 { 16711 struct drm_i915_private *dev_priv = to_i915(dev); 16712 int i; 16713 16714 if (!error) 16715 return; 16716 16717 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 16718 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16719 err_printf(m, "PWR_WELL_CTL2: %08x\n", 16720 error->power_well_driver); 16721 for_each_pipe(dev_priv, i) { 16722 err_printf(m, "Pipe [%d]:\n", i); 16723 err_printf(m, " Power: %s\n", 16724 onoff(error->pipe[i].power_domain_on)); 16725 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 16726 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 16727 16728 err_printf(m, "Plane [%d]:\n", i); 16729 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 16730 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 16731 if (INTEL_INFO(dev)->gen <= 3) { 16732 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 16733 err_printf(m, " POS: %08x\n", error->plane[i].pos); 16734 } 16735 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16736 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 16737 if (INTEL_INFO(dev)->gen >= 4) { 16738 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 16739 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 16740 } 16741 16742 err_printf(m, "Cursor [%d]:\n", i); 16743 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 16744 err_printf(m, " POS: %08x\n", error->cursor[i].position); 16745 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 16746 } 16747 16748 for (i = 0; i < error->num_transcoders; i++) { 16749 err_printf(m, "CPU transcoder: %s\n", 16750 transcoder_name(error->transcoder[i].cpu_transcoder)); 16751 err_printf(m, " Power: %s\n", 16752 onoff(error->transcoder[i].power_domain_on)); 16753 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 16754 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 16755 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 16756 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 16757 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 16758 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 16759 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 16760 } 16761 } 16762