1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats for gen <= 3 */ 45 static const uint32_t i8xx_primary_formats[] = { 46 DRM_FORMAT_C8, 47 DRM_FORMAT_RGB565, 48 DRM_FORMAT_XRGB1555, 49 DRM_FORMAT_XRGB8888, 50 }; 51 52 /* Primary plane formats for gen >= 4 */ 53 static const uint32_t i965_primary_formats[] = { 54 DRM_FORMAT_C8, 55 DRM_FORMAT_RGB565, 56 DRM_FORMAT_XRGB8888, 57 DRM_FORMAT_XBGR8888, 58 DRM_FORMAT_XRGB2101010, 59 DRM_FORMAT_XBGR2101010, 60 }; 61 62 static const uint32_t skl_primary_formats[] = { 63 DRM_FORMAT_C8, 64 DRM_FORMAT_RGB565, 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_XBGR8888, 67 DRM_FORMAT_ARGB8888, 68 DRM_FORMAT_ABGR8888, 69 DRM_FORMAT_XRGB2101010, 70 DRM_FORMAT_XBGR2101010, 71 DRM_FORMAT_YUYV, 72 DRM_FORMAT_YVYU, 73 DRM_FORMAT_UYVY, 74 DRM_FORMAT_VYUY, 75 }; 76 77 /* Cursor formats */ 78 static const uint32_t intel_cursor_formats[] = { 79 DRM_FORMAT_ARGB8888, 80 }; 81 82 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 83 struct intel_crtc_state *pipe_config); 84 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 85 struct intel_crtc_state *pipe_config); 86 87 static int intel_framebuffer_init(struct drm_device *dev, 88 struct intel_framebuffer *ifb, 89 struct drm_mode_fb_cmd2 *mode_cmd, 90 struct drm_i915_gem_object *obj); 91 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 92 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 93 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 94 struct intel_link_m_n *m_n, 95 struct intel_link_m_n *m2_n2); 96 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 97 static void haswell_set_pipeconf(struct drm_crtc *crtc); 98 static void intel_set_pipe_csc(struct drm_crtc *crtc); 99 static void vlv_prepare_pll(struct intel_crtc *crtc, 100 const struct intel_crtc_state *pipe_config); 101 static void chv_prepare_pll(struct intel_crtc *crtc, 102 const struct intel_crtc_state *pipe_config); 103 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 104 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *); 105 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 106 struct intel_crtc_state *crtc_state); 107 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 108 int num_connectors); 109 static void skylake_pfit_enable(struct intel_crtc *crtc); 110 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force); 111 static void ironlake_pfit_enable(struct intel_crtc *crtc); 112 static void intel_modeset_setup_hw_state(struct drm_device *dev); 113 static void intel_pre_disable_primary(struct drm_crtc *crtc); 114 115 typedef struct { 116 int min, max; 117 } intel_range_t; 118 119 typedef struct { 120 int dot_limit; 121 int p2_slow, p2_fast; 122 } intel_p2_t; 123 124 typedef struct intel_limit intel_limit_t; 125 struct intel_limit { 126 intel_range_t dot, vco, n, m, m1, m2, p, p1; 127 intel_p2_t p2; 128 }; 129 130 /* returns HPLL frequency in kHz */ 131 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 132 { 133 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 134 135 /* Obtain SKU information */ 136 mutex_lock(&dev_priv->sb_lock); 137 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 138 CCK_FUSE_HPLL_FREQ_MASK; 139 mutex_unlock(&dev_priv->sb_lock); 140 141 return vco_freq[hpll_freq] * 1000; 142 } 143 144 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 145 const char *name, u32 reg) 146 { 147 u32 val; 148 int divider; 149 150 if (dev_priv->hpll_freq == 0) 151 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 152 153 mutex_lock(&dev_priv->sb_lock); 154 val = vlv_cck_read(dev_priv, reg); 155 mutex_unlock(&dev_priv->sb_lock); 156 157 divider = val & CCK_FREQUENCY_VALUES; 158 159 WARN((val & CCK_FREQUENCY_STATUS) != 160 (divider << CCK_FREQUENCY_STATUS_SHIFT), 161 "%s change in progress\n", name); 162 163 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 164 } 165 166 int 167 intel_pch_rawclk(struct drm_device *dev) 168 { 169 struct drm_i915_private *dev_priv = dev->dev_private; 170 171 WARN_ON(!HAS_PCH_SPLIT(dev)); 172 173 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 174 } 175 176 /* hrawclock is 1/4 the FSB frequency */ 177 int intel_hrawclk(struct drm_device *dev) 178 { 179 struct drm_i915_private *dev_priv = dev->dev_private; 180 uint32_t clkcfg; 181 182 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */ 183 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 184 return 200; 185 186 clkcfg = I915_READ(CLKCFG); 187 switch (clkcfg & CLKCFG_FSB_MASK) { 188 case CLKCFG_FSB_400: 189 return 100; 190 case CLKCFG_FSB_533: 191 return 133; 192 case CLKCFG_FSB_667: 193 return 166; 194 case CLKCFG_FSB_800: 195 return 200; 196 case CLKCFG_FSB_1067: 197 return 266; 198 case CLKCFG_FSB_1333: 199 return 333; 200 /* these two are just a guess; one of them might be right */ 201 case CLKCFG_FSB_1600: 202 case CLKCFG_FSB_1600_ALT: 203 return 400; 204 default: 205 return 133; 206 } 207 } 208 209 static void intel_update_czclk(struct drm_i915_private *dev_priv) 210 { 211 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) 212 return; 213 214 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 215 CCK_CZ_CLOCK_CONTROL); 216 217 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 218 } 219 220 static inline u32 /* units of 100MHz */ 221 intel_fdi_link_freq(struct drm_device *dev) 222 { 223 if (IS_GEN5(dev)) { 224 struct drm_i915_private *dev_priv = dev->dev_private; 225 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 226 } else 227 return 27; 228 } 229 230 static const intel_limit_t intel_limits_i8xx_dac = { 231 .dot = { .min = 25000, .max = 350000 }, 232 .vco = { .min = 908000, .max = 1512000 }, 233 .n = { .min = 2, .max = 16 }, 234 .m = { .min = 96, .max = 140 }, 235 .m1 = { .min = 18, .max = 26 }, 236 .m2 = { .min = 6, .max = 16 }, 237 .p = { .min = 4, .max = 128 }, 238 .p1 = { .min = 2, .max = 33 }, 239 .p2 = { .dot_limit = 165000, 240 .p2_slow = 4, .p2_fast = 2 }, 241 }; 242 243 static const intel_limit_t intel_limits_i8xx_dvo = { 244 .dot = { .min = 25000, .max = 350000 }, 245 .vco = { .min = 908000, .max = 1512000 }, 246 .n = { .min = 2, .max = 16 }, 247 .m = { .min = 96, .max = 140 }, 248 .m1 = { .min = 18, .max = 26 }, 249 .m2 = { .min = 6, .max = 16 }, 250 .p = { .min = 4, .max = 128 }, 251 .p1 = { .min = 2, .max = 33 }, 252 .p2 = { .dot_limit = 165000, 253 .p2_slow = 4, .p2_fast = 4 }, 254 }; 255 256 static const intel_limit_t intel_limits_i8xx_lvds = { 257 .dot = { .min = 25000, .max = 350000 }, 258 .vco = { .min = 908000, .max = 1512000 }, 259 .n = { .min = 2, .max = 16 }, 260 .m = { .min = 96, .max = 140 }, 261 .m1 = { .min = 18, .max = 26 }, 262 .m2 = { .min = 6, .max = 16 }, 263 .p = { .min = 4, .max = 128 }, 264 .p1 = { .min = 1, .max = 6 }, 265 .p2 = { .dot_limit = 165000, 266 .p2_slow = 14, .p2_fast = 7 }, 267 }; 268 269 static const intel_limit_t intel_limits_i9xx_sdvo = { 270 .dot = { .min = 20000, .max = 400000 }, 271 .vco = { .min = 1400000, .max = 2800000 }, 272 .n = { .min = 1, .max = 6 }, 273 .m = { .min = 70, .max = 120 }, 274 .m1 = { .min = 8, .max = 18 }, 275 .m2 = { .min = 3, .max = 7 }, 276 .p = { .min = 5, .max = 80 }, 277 .p1 = { .min = 1, .max = 8 }, 278 .p2 = { .dot_limit = 200000, 279 .p2_slow = 10, .p2_fast = 5 }, 280 }; 281 282 static const intel_limit_t intel_limits_i9xx_lvds = { 283 .dot = { .min = 20000, .max = 400000 }, 284 .vco = { .min = 1400000, .max = 2800000 }, 285 .n = { .min = 1, .max = 6 }, 286 .m = { .min = 70, .max = 120 }, 287 .m1 = { .min = 8, .max = 18 }, 288 .m2 = { .min = 3, .max = 7 }, 289 .p = { .min = 7, .max = 98 }, 290 .p1 = { .min = 1, .max = 8 }, 291 .p2 = { .dot_limit = 112000, 292 .p2_slow = 14, .p2_fast = 7 }, 293 }; 294 295 296 static const intel_limit_t intel_limits_g4x_sdvo = { 297 .dot = { .min = 25000, .max = 270000 }, 298 .vco = { .min = 1750000, .max = 3500000}, 299 .n = { .min = 1, .max = 4 }, 300 .m = { .min = 104, .max = 138 }, 301 .m1 = { .min = 17, .max = 23 }, 302 .m2 = { .min = 5, .max = 11 }, 303 .p = { .min = 10, .max = 30 }, 304 .p1 = { .min = 1, .max = 3}, 305 .p2 = { .dot_limit = 270000, 306 .p2_slow = 10, 307 .p2_fast = 10 308 }, 309 }; 310 311 static const intel_limit_t intel_limits_g4x_hdmi = { 312 .dot = { .min = 22000, .max = 400000 }, 313 .vco = { .min = 1750000, .max = 3500000}, 314 .n = { .min = 1, .max = 4 }, 315 .m = { .min = 104, .max = 138 }, 316 .m1 = { .min = 16, .max = 23 }, 317 .m2 = { .min = 5, .max = 11 }, 318 .p = { .min = 5, .max = 80 }, 319 .p1 = { .min = 1, .max = 8}, 320 .p2 = { .dot_limit = 165000, 321 .p2_slow = 10, .p2_fast = 5 }, 322 }; 323 324 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 325 .dot = { .min = 20000, .max = 115000 }, 326 .vco = { .min = 1750000, .max = 3500000 }, 327 .n = { .min = 1, .max = 3 }, 328 .m = { .min = 104, .max = 138 }, 329 .m1 = { .min = 17, .max = 23 }, 330 .m2 = { .min = 5, .max = 11 }, 331 .p = { .min = 28, .max = 112 }, 332 .p1 = { .min = 2, .max = 8 }, 333 .p2 = { .dot_limit = 0, 334 .p2_slow = 14, .p2_fast = 14 335 }, 336 }; 337 338 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 339 .dot = { .min = 80000, .max = 224000 }, 340 .vco = { .min = 1750000, .max = 3500000 }, 341 .n = { .min = 1, .max = 3 }, 342 .m = { .min = 104, .max = 138 }, 343 .m1 = { .min = 17, .max = 23 }, 344 .m2 = { .min = 5, .max = 11 }, 345 .p = { .min = 14, .max = 42 }, 346 .p1 = { .min = 2, .max = 6 }, 347 .p2 = { .dot_limit = 0, 348 .p2_slow = 7, .p2_fast = 7 349 }, 350 }; 351 352 static const intel_limit_t intel_limits_pineview_sdvo = { 353 .dot = { .min = 20000, .max = 400000}, 354 .vco = { .min = 1700000, .max = 3500000 }, 355 /* Pineview's Ncounter is a ring counter */ 356 .n = { .min = 3, .max = 6 }, 357 .m = { .min = 2, .max = 256 }, 358 /* Pineview only has one combined m divider, which we treat as m2. */ 359 .m1 = { .min = 0, .max = 0 }, 360 .m2 = { .min = 0, .max = 254 }, 361 .p = { .min = 5, .max = 80 }, 362 .p1 = { .min = 1, .max = 8 }, 363 .p2 = { .dot_limit = 200000, 364 .p2_slow = 10, .p2_fast = 5 }, 365 }; 366 367 static const intel_limit_t intel_limits_pineview_lvds = { 368 .dot = { .min = 20000, .max = 400000 }, 369 .vco = { .min = 1700000, .max = 3500000 }, 370 .n = { .min = 3, .max = 6 }, 371 .m = { .min = 2, .max = 256 }, 372 .m1 = { .min = 0, .max = 0 }, 373 .m2 = { .min = 0, .max = 254 }, 374 .p = { .min = 7, .max = 112 }, 375 .p1 = { .min = 1, .max = 8 }, 376 .p2 = { .dot_limit = 112000, 377 .p2_slow = 14, .p2_fast = 14 }, 378 }; 379 380 /* Ironlake / Sandybridge 381 * 382 * We calculate clock using (register_value + 2) for N/M1/M2, so here 383 * the range value for them is (actual_value - 2). 384 */ 385 static const intel_limit_t intel_limits_ironlake_dac = { 386 .dot = { .min = 25000, .max = 350000 }, 387 .vco = { .min = 1760000, .max = 3510000 }, 388 .n = { .min = 1, .max = 5 }, 389 .m = { .min = 79, .max = 127 }, 390 .m1 = { .min = 12, .max = 22 }, 391 .m2 = { .min = 5, .max = 9 }, 392 .p = { .min = 5, .max = 80 }, 393 .p1 = { .min = 1, .max = 8 }, 394 .p2 = { .dot_limit = 225000, 395 .p2_slow = 10, .p2_fast = 5 }, 396 }; 397 398 static const intel_limit_t intel_limits_ironlake_single_lvds = { 399 .dot = { .min = 25000, .max = 350000 }, 400 .vco = { .min = 1760000, .max = 3510000 }, 401 .n = { .min = 1, .max = 3 }, 402 .m = { .min = 79, .max = 118 }, 403 .m1 = { .min = 12, .max = 22 }, 404 .m2 = { .min = 5, .max = 9 }, 405 .p = { .min = 28, .max = 112 }, 406 .p1 = { .min = 2, .max = 8 }, 407 .p2 = { .dot_limit = 225000, 408 .p2_slow = 14, .p2_fast = 14 }, 409 }; 410 411 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 412 .dot = { .min = 25000, .max = 350000 }, 413 .vco = { .min = 1760000, .max = 3510000 }, 414 .n = { .min = 1, .max = 3 }, 415 .m = { .min = 79, .max = 127 }, 416 .m1 = { .min = 12, .max = 22 }, 417 .m2 = { .min = 5, .max = 9 }, 418 .p = { .min = 14, .max = 56 }, 419 .p1 = { .min = 2, .max = 8 }, 420 .p2 = { .dot_limit = 225000, 421 .p2_slow = 7, .p2_fast = 7 }, 422 }; 423 424 /* LVDS 100mhz refclk limits. */ 425 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 426 .dot = { .min = 25000, .max = 350000 }, 427 .vco = { .min = 1760000, .max = 3510000 }, 428 .n = { .min = 1, .max = 2 }, 429 .m = { .min = 79, .max = 126 }, 430 .m1 = { .min = 12, .max = 22 }, 431 .m2 = { .min = 5, .max = 9 }, 432 .p = { .min = 28, .max = 112 }, 433 .p1 = { .min = 2, .max = 8 }, 434 .p2 = { .dot_limit = 225000, 435 .p2_slow = 14, .p2_fast = 14 }, 436 }; 437 438 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 439 .dot = { .min = 25000, .max = 350000 }, 440 .vco = { .min = 1760000, .max = 3510000 }, 441 .n = { .min = 1, .max = 3 }, 442 .m = { .min = 79, .max = 126 }, 443 .m1 = { .min = 12, .max = 22 }, 444 .m2 = { .min = 5, .max = 9 }, 445 .p = { .min = 14, .max = 42 }, 446 .p1 = { .min = 2, .max = 6 }, 447 .p2 = { .dot_limit = 225000, 448 .p2_slow = 7, .p2_fast = 7 }, 449 }; 450 451 static const intel_limit_t intel_limits_vlv = { 452 /* 453 * These are the data rate limits (measured in fast clocks) 454 * since those are the strictest limits we have. The fast 455 * clock and actual rate limits are more relaxed, so checking 456 * them would make no difference. 457 */ 458 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 459 .vco = { .min = 4000000, .max = 6000000 }, 460 .n = { .min = 1, .max = 7 }, 461 .m1 = { .min = 2, .max = 3 }, 462 .m2 = { .min = 11, .max = 156 }, 463 .p1 = { .min = 2, .max = 3 }, 464 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 465 }; 466 467 static const intel_limit_t intel_limits_chv = { 468 /* 469 * These are the data rate limits (measured in fast clocks) 470 * since those are the strictest limits we have. The fast 471 * clock and actual rate limits are more relaxed, so checking 472 * them would make no difference. 473 */ 474 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 475 .vco = { .min = 4800000, .max = 6480000 }, 476 .n = { .min = 1, .max = 1 }, 477 .m1 = { .min = 2, .max = 2 }, 478 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 479 .p1 = { .min = 2, .max = 4 }, 480 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 481 }; 482 483 static const intel_limit_t intel_limits_bxt = { 484 /* FIXME: find real dot limits */ 485 .dot = { .min = 0, .max = INT_MAX }, 486 .vco = { .min = 4800000, .max = 6700000 }, 487 .n = { .min = 1, .max = 1 }, 488 .m1 = { .min = 2, .max = 2 }, 489 /* FIXME: find real m2 limits */ 490 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 491 .p1 = { .min = 2, .max = 4 }, 492 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 493 }; 494 495 static bool 496 needs_modeset(struct drm_crtc_state *state) 497 { 498 return drm_atomic_crtc_needs_modeset(state); 499 } 500 501 /** 502 * Returns whether any output on the specified pipe is of the specified type 503 */ 504 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 505 { 506 struct drm_device *dev = crtc->base.dev; 507 struct intel_encoder *encoder; 508 509 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 510 if (encoder->type == type) 511 return true; 512 513 return false; 514 } 515 516 /** 517 * Returns whether any output on the specified pipe will have the specified 518 * type after a staged modeset is complete, i.e., the same as 519 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 520 * encoder->crtc. 521 */ 522 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 523 int type) 524 { 525 struct drm_atomic_state *state = crtc_state->base.state; 526 struct drm_connector *connector; 527 struct drm_connector_state *connector_state; 528 struct intel_encoder *encoder; 529 int i, num_connectors = 0; 530 531 for_each_connector_in_state(state, connector, connector_state, i) { 532 if (connector_state->crtc != crtc_state->base.crtc) 533 continue; 534 535 num_connectors++; 536 537 encoder = to_intel_encoder(connector_state->best_encoder); 538 if (encoder->type == type) 539 return true; 540 } 541 542 WARN_ON(num_connectors == 0); 543 544 return false; 545 } 546 547 static const intel_limit_t * 548 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 549 { 550 struct drm_device *dev = crtc_state->base.crtc->dev; 551 const intel_limit_t *limit; 552 553 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 554 if (intel_is_dual_link_lvds(dev)) { 555 if (refclk == 100000) 556 limit = &intel_limits_ironlake_dual_lvds_100m; 557 else 558 limit = &intel_limits_ironlake_dual_lvds; 559 } else { 560 if (refclk == 100000) 561 limit = &intel_limits_ironlake_single_lvds_100m; 562 else 563 limit = &intel_limits_ironlake_single_lvds; 564 } 565 } else 566 limit = &intel_limits_ironlake_dac; 567 568 return limit; 569 } 570 571 static const intel_limit_t * 572 intel_g4x_limit(struct intel_crtc_state *crtc_state) 573 { 574 struct drm_device *dev = crtc_state->base.crtc->dev; 575 const intel_limit_t *limit; 576 577 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 578 if (intel_is_dual_link_lvds(dev)) 579 limit = &intel_limits_g4x_dual_channel_lvds; 580 else 581 limit = &intel_limits_g4x_single_channel_lvds; 582 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 583 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 584 limit = &intel_limits_g4x_hdmi; 585 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 586 limit = &intel_limits_g4x_sdvo; 587 } else /* The option is for other outputs */ 588 limit = &intel_limits_i9xx_sdvo; 589 590 return limit; 591 } 592 593 static const intel_limit_t * 594 intel_limit(struct intel_crtc_state *crtc_state, int refclk) 595 { 596 struct drm_device *dev = crtc_state->base.crtc->dev; 597 const intel_limit_t *limit; 598 599 if (IS_BROXTON(dev)) 600 limit = &intel_limits_bxt; 601 else if (HAS_PCH_SPLIT(dev)) 602 limit = intel_ironlake_limit(crtc_state, refclk); 603 else if (IS_G4X(dev)) { 604 limit = intel_g4x_limit(crtc_state); 605 } else if (IS_PINEVIEW(dev)) { 606 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 607 limit = &intel_limits_pineview_lvds; 608 else 609 limit = &intel_limits_pineview_sdvo; 610 } else if (IS_CHERRYVIEW(dev)) { 611 limit = &intel_limits_chv; 612 } else if (IS_VALLEYVIEW(dev)) { 613 limit = &intel_limits_vlv; 614 } else if (!IS_GEN2(dev)) { 615 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 616 limit = &intel_limits_i9xx_lvds; 617 else 618 limit = &intel_limits_i9xx_sdvo; 619 } else { 620 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 621 limit = &intel_limits_i8xx_lvds; 622 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 623 limit = &intel_limits_i8xx_dvo; 624 else 625 limit = &intel_limits_i8xx_dac; 626 } 627 return limit; 628 } 629 630 /* 631 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 632 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 633 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic. 634 * The helpers' return value is the rate of the clock that is fed to the 635 * display engine's pipe which can be the above fast dot clock rate or a 636 * divided-down version of it. 637 */ 638 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 639 static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 640 { 641 clock->m = clock->m2 + 2; 642 clock->p = clock->p1 * clock->p2; 643 if (WARN_ON(clock->n == 0 || clock->p == 0)) 644 return 0; 645 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 646 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 647 648 return clock->dot; 649 } 650 651 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 652 { 653 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 654 } 655 656 static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 657 { 658 clock->m = i9xx_dpll_compute_m(clock); 659 clock->p = clock->p1 * clock->p2; 660 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 661 return 0; 662 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 663 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 664 665 return clock->dot; 666 } 667 668 static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 669 { 670 clock->m = clock->m1 * clock->m2; 671 clock->p = clock->p1 * clock->p2; 672 if (WARN_ON(clock->n == 0 || clock->p == 0)) 673 return 0; 674 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 675 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 676 677 return clock->dot / 5; 678 } 679 680 int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 681 { 682 clock->m = clock->m1 * clock->m2; 683 clock->p = clock->p1 * clock->p2; 684 if (WARN_ON(clock->n == 0 || clock->p == 0)) 685 return 0; 686 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 687 clock->n << 22); 688 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 689 690 return clock->dot / 5; 691 } 692 693 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 694 /** 695 * Returns whether the given set of divisors are valid for a given refclk with 696 * the given connectors. 697 */ 698 699 static bool intel_PLL_is_valid(struct drm_device *dev, 700 const intel_limit_t *limit, 701 const intel_clock_t *clock) 702 { 703 if (clock->n < limit->n.min || limit->n.max < clock->n) 704 INTELPllInvalid("n out of range\n"); 705 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 706 INTELPllInvalid("p1 out of range\n"); 707 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 708 INTELPllInvalid("m2 out of range\n"); 709 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 710 INTELPllInvalid("m1 out of range\n"); 711 712 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && 713 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) 714 if (clock->m1 <= clock->m2) 715 INTELPllInvalid("m1 <= m2\n"); 716 717 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) { 718 if (clock->p < limit->p.min || limit->p.max < clock->p) 719 INTELPllInvalid("p out of range\n"); 720 if (clock->m < limit->m.min || limit->m.max < clock->m) 721 INTELPllInvalid("m out of range\n"); 722 } 723 724 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 725 INTELPllInvalid("vco out of range\n"); 726 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 727 * connector, etc., rather than just a single range. 728 */ 729 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 730 INTELPllInvalid("dot out of range\n"); 731 732 return true; 733 } 734 735 static int 736 i9xx_select_p2_div(const intel_limit_t *limit, 737 const struct intel_crtc_state *crtc_state, 738 int target) 739 { 740 struct drm_device *dev = crtc_state->base.crtc->dev; 741 742 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 743 /* 744 * For LVDS just rely on its current settings for dual-channel. 745 * We haven't figured out how to reliably set up different 746 * single/dual channel state, if we even can. 747 */ 748 if (intel_is_dual_link_lvds(dev)) 749 return limit->p2.p2_fast; 750 else 751 return limit->p2.p2_slow; 752 } else { 753 if (target < limit->p2.dot_limit) 754 return limit->p2.p2_slow; 755 else 756 return limit->p2.p2_fast; 757 } 758 } 759 760 static bool 761 i9xx_find_best_dpll(const intel_limit_t *limit, 762 struct intel_crtc_state *crtc_state, 763 int target, int refclk, intel_clock_t *match_clock, 764 intel_clock_t *best_clock) 765 { 766 struct drm_device *dev = crtc_state->base.crtc->dev; 767 intel_clock_t clock; 768 int err = target; 769 770 memset(best_clock, 0, sizeof(*best_clock)); 771 772 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 773 774 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 775 clock.m1++) { 776 for (clock.m2 = limit->m2.min; 777 clock.m2 <= limit->m2.max; clock.m2++) { 778 if (clock.m2 >= clock.m1) 779 break; 780 for (clock.n = limit->n.min; 781 clock.n <= limit->n.max; clock.n++) { 782 for (clock.p1 = limit->p1.min; 783 clock.p1 <= limit->p1.max; clock.p1++) { 784 int this_err; 785 786 i9xx_calc_dpll_params(refclk, &clock); 787 if (!intel_PLL_is_valid(dev, limit, 788 &clock)) 789 continue; 790 if (match_clock && 791 clock.p != match_clock->p) 792 continue; 793 794 this_err = abs(clock.dot - target); 795 if (this_err < err) { 796 *best_clock = clock; 797 err = this_err; 798 } 799 } 800 } 801 } 802 } 803 804 return (err != target); 805 } 806 807 static bool 808 pnv_find_best_dpll(const intel_limit_t *limit, 809 struct intel_crtc_state *crtc_state, 810 int target, int refclk, intel_clock_t *match_clock, 811 intel_clock_t *best_clock) 812 { 813 struct drm_device *dev = crtc_state->base.crtc->dev; 814 intel_clock_t clock; 815 int err = target; 816 817 memset(best_clock, 0, sizeof(*best_clock)); 818 819 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 820 821 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 822 clock.m1++) { 823 for (clock.m2 = limit->m2.min; 824 clock.m2 <= limit->m2.max; clock.m2++) { 825 for (clock.n = limit->n.min; 826 clock.n <= limit->n.max; clock.n++) { 827 for (clock.p1 = limit->p1.min; 828 clock.p1 <= limit->p1.max; clock.p1++) { 829 int this_err; 830 831 pnv_calc_dpll_params(refclk, &clock); 832 if (!intel_PLL_is_valid(dev, limit, 833 &clock)) 834 continue; 835 if (match_clock && 836 clock.p != match_clock->p) 837 continue; 838 839 this_err = abs(clock.dot - target); 840 if (this_err < err) { 841 *best_clock = clock; 842 err = this_err; 843 } 844 } 845 } 846 } 847 } 848 849 return (err != target); 850 } 851 852 static bool 853 g4x_find_best_dpll(const intel_limit_t *limit, 854 struct intel_crtc_state *crtc_state, 855 int target, int refclk, intel_clock_t *match_clock, 856 intel_clock_t *best_clock) 857 { 858 struct drm_device *dev = crtc_state->base.crtc->dev; 859 intel_clock_t clock; 860 int max_n; 861 bool found = false; 862 /* approximately equals target * 0.00585 */ 863 int err_most = (target >> 8) + (target >> 9); 864 865 memset(best_clock, 0, sizeof(*best_clock)); 866 867 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target); 868 869 max_n = limit->n.max; 870 /* based on hardware requirement, prefer smaller n to precision */ 871 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 872 /* based on hardware requirement, prefere larger m1,m2 */ 873 for (clock.m1 = limit->m1.max; 874 clock.m1 >= limit->m1.min; clock.m1--) { 875 for (clock.m2 = limit->m2.max; 876 clock.m2 >= limit->m2.min; clock.m2--) { 877 for (clock.p1 = limit->p1.max; 878 clock.p1 >= limit->p1.min; clock.p1--) { 879 int this_err; 880 881 i9xx_calc_dpll_params(refclk, &clock); 882 if (!intel_PLL_is_valid(dev, limit, 883 &clock)) 884 continue; 885 886 this_err = abs(clock.dot - target); 887 if (this_err < err_most) { 888 *best_clock = clock; 889 err_most = this_err; 890 max_n = clock.n; 891 found = true; 892 } 893 } 894 } 895 } 896 } 897 return found; 898 } 899 900 /* 901 * Check if the calculated PLL configuration is more optimal compared to the 902 * best configuration and error found so far. Return the calculated error. 903 */ 904 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 905 const intel_clock_t *calculated_clock, 906 const intel_clock_t *best_clock, 907 unsigned int best_error_ppm, 908 unsigned int *error_ppm) 909 { 910 /* 911 * For CHV ignore the error and consider only the P value. 912 * Prefer a bigger P value based on HW requirements. 913 */ 914 if (IS_CHERRYVIEW(dev)) { 915 *error_ppm = 0; 916 917 return calculated_clock->p > best_clock->p; 918 } 919 920 if (WARN_ON_ONCE(!target_freq)) 921 return false; 922 923 *error_ppm = div_u64(1000000ULL * 924 abs(target_freq - calculated_clock->dot), 925 target_freq); 926 /* 927 * Prefer a better P value over a better (smaller) error if the error 928 * is small. Ensure this preference for future configurations too by 929 * setting the error to 0. 930 */ 931 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 932 *error_ppm = 0; 933 934 return true; 935 } 936 937 return *error_ppm + 10 < best_error_ppm; 938 } 939 940 static bool 941 vlv_find_best_dpll(const intel_limit_t *limit, 942 struct intel_crtc_state *crtc_state, 943 int target, int refclk, intel_clock_t *match_clock, 944 intel_clock_t *best_clock) 945 { 946 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 947 struct drm_device *dev = crtc->base.dev; 948 intel_clock_t clock; 949 unsigned int bestppm = 1000000; 950 /* min update 19.2 MHz */ 951 int max_n = min(limit->n.max, refclk / 19200); 952 bool found = false; 953 954 target *= 5; /* fast clock */ 955 956 memset(best_clock, 0, sizeof(*best_clock)); 957 958 /* based on hardware requirement, prefer smaller n to precision */ 959 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 960 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 961 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 962 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 963 clock.p = clock.p1 * clock.p2; 964 /* based on hardware requirement, prefer bigger m1,m2 values */ 965 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 966 unsigned int ppm; 967 968 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 969 refclk * clock.m1); 970 971 vlv_calc_dpll_params(refclk, &clock); 972 973 if (!intel_PLL_is_valid(dev, limit, 974 &clock)) 975 continue; 976 977 if (!vlv_PLL_is_optimal(dev, target, 978 &clock, 979 best_clock, 980 bestppm, &ppm)) 981 continue; 982 983 *best_clock = clock; 984 bestppm = ppm; 985 found = true; 986 } 987 } 988 } 989 } 990 991 return found; 992 } 993 994 static bool 995 chv_find_best_dpll(const intel_limit_t *limit, 996 struct intel_crtc_state *crtc_state, 997 int target, int refclk, intel_clock_t *match_clock, 998 intel_clock_t *best_clock) 999 { 1000 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 1001 struct drm_device *dev = crtc->base.dev; 1002 unsigned int best_error_ppm; 1003 intel_clock_t clock; 1004 uint64_t m2; 1005 int found = false; 1006 1007 memset(best_clock, 0, sizeof(*best_clock)); 1008 best_error_ppm = 1000000; 1009 1010 /* 1011 * Based on hardware doc, the n always set to 1, and m1 always 1012 * set to 2. If requires to support 200Mhz refclk, we need to 1013 * revisit this because n may not 1 anymore. 1014 */ 1015 clock.n = 1, clock.m1 = 2; 1016 target *= 5; /* fast clock */ 1017 1018 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 1019 for (clock.p2 = limit->p2.p2_fast; 1020 clock.p2 >= limit->p2.p2_slow; 1021 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 1022 unsigned int error_ppm; 1023 1024 clock.p = clock.p1 * clock.p2; 1025 1026 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 1027 clock.n) << 22, refclk * clock.m1); 1028 1029 if (m2 > INT_MAX/clock.m1) 1030 continue; 1031 1032 clock.m2 = m2; 1033 1034 chv_calc_dpll_params(refclk, &clock); 1035 1036 if (!intel_PLL_is_valid(dev, limit, &clock)) 1037 continue; 1038 1039 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 1040 best_error_ppm, &error_ppm)) 1041 continue; 1042 1043 *best_clock = clock; 1044 best_error_ppm = error_ppm; 1045 found = true; 1046 } 1047 } 1048 1049 return found; 1050 } 1051 1052 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1053 intel_clock_t *best_clock) 1054 { 1055 int refclk = i9xx_get_refclk(crtc_state, 0); 1056 1057 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state, 1058 target_clock, refclk, NULL, best_clock); 1059 } 1060 1061 bool intel_crtc_active(struct drm_crtc *crtc) 1062 { 1063 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1064 1065 /* Be paranoid as we can arrive here with only partial 1066 * state retrieved from the hardware during setup. 1067 * 1068 * We can ditch the adjusted_mode.crtc_clock check as soon 1069 * as Haswell has gained clock readout/fastboot support. 1070 * 1071 * We can ditch the crtc->primary->fb check as soon as we can 1072 * properly reconstruct framebuffers. 1073 * 1074 * FIXME: The intel_crtc->active here should be switched to 1075 * crtc->state->active once we have proper CRTC states wired up 1076 * for atomic. 1077 */ 1078 return intel_crtc->active && crtc->primary->state->fb && 1079 intel_crtc->config->base.adjusted_mode.crtc_clock; 1080 } 1081 1082 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1083 enum i915_pipe pipe) 1084 { 1085 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1087 1088 return intel_crtc->config->cpu_transcoder; 1089 } 1090 1091 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1092 { 1093 struct drm_i915_private *dev_priv = dev->dev_private; 1094 i915_reg_t reg = PIPEDSL(pipe); 1095 u32 line1, line2; 1096 u32 line_mask; 1097 1098 if (IS_GEN2(dev)) 1099 line_mask = DSL_LINEMASK_GEN2; 1100 else 1101 line_mask = DSL_LINEMASK_GEN3; 1102 1103 line1 = I915_READ(reg) & line_mask; 1104 msleep(5); 1105 line2 = I915_READ(reg) & line_mask; 1106 1107 return line1 == line2; 1108 } 1109 1110 /* 1111 * intel_wait_for_pipe_off - wait for pipe to turn off 1112 * @crtc: crtc whose pipe to wait for 1113 * 1114 * After disabling a pipe, we can't wait for vblank in the usual way, 1115 * spinning on the vblank interrupt status bit, since we won't actually 1116 * see an interrupt when the pipe is disabled. 1117 * 1118 * On Gen4 and above: 1119 * wait for the pipe register state bit to turn off 1120 * 1121 * Otherwise: 1122 * wait for the display line value to settle (it usually 1123 * ends up stopping at the start of the next frame). 1124 * 1125 */ 1126 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1127 { 1128 struct drm_device *dev = crtc->base.dev; 1129 struct drm_i915_private *dev_priv = dev->dev_private; 1130 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1131 enum i915_pipe pipe = crtc->pipe; 1132 1133 if (INTEL_INFO(dev)->gen >= 4) { 1134 i915_reg_t reg = PIPECONF(cpu_transcoder); 1135 1136 /* Wait for the Pipe State to go off */ 1137 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1138 100)) 1139 WARN(1, "pipe_off wait timed out\n"); 1140 } else { 1141 /* Wait for the display line to settle */ 1142 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1143 WARN(1, "pipe_off wait timed out\n"); 1144 } 1145 } 1146 1147 /* Only for pre-ILK configs */ 1148 void assert_pll(struct drm_i915_private *dev_priv, 1149 enum i915_pipe pipe, bool state) 1150 { 1151 u32 val; 1152 bool cur_state; 1153 1154 val = I915_READ(DPLL(pipe)); 1155 cur_state = !!(val & DPLL_VCO_ENABLE); 1156 I915_STATE_WARN(cur_state != state, 1157 "PLL state assertion failure (expected %s, current %s)\n", 1158 onoff(state), onoff(cur_state)); 1159 } 1160 1161 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1162 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1163 { 1164 u32 val; 1165 bool cur_state; 1166 1167 mutex_lock(&dev_priv->sb_lock); 1168 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1169 mutex_unlock(&dev_priv->sb_lock); 1170 1171 cur_state = val & DSI_PLL_VCO_EN; 1172 I915_STATE_WARN(cur_state != state, 1173 "DSI PLL state assertion failure (expected %s, current %s)\n", 1174 onoff(state), onoff(cur_state)); 1175 } 1176 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1177 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1178 1179 struct intel_shared_dpll * 1180 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1181 { 1182 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1183 1184 if (crtc->config->shared_dpll < 0) 1185 return NULL; 1186 1187 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1188 } 1189 1190 /* For ILK+ */ 1191 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1192 struct intel_shared_dpll *pll, 1193 bool state) 1194 { 1195 bool cur_state; 1196 struct intel_dpll_hw_state hw_state; 1197 1198 if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state))) 1199 return; 1200 1201 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1202 I915_STATE_WARN(cur_state != state, 1203 "%s assertion failure (expected %s, current %s)\n", 1204 pll->name, onoff(state), onoff(cur_state)); 1205 } 1206 1207 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1208 enum i915_pipe pipe, bool state) 1209 { 1210 bool cur_state; 1211 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1212 pipe); 1213 1214 if (HAS_DDI(dev_priv->dev)) { 1215 /* DDI does not have a specific FDI_TX register */ 1216 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1217 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1218 } else { 1219 u32 val = I915_READ(FDI_TX_CTL(pipe)); 1220 cur_state = !!(val & FDI_TX_ENABLE); 1221 } 1222 I915_STATE_WARN(cur_state != state, 1223 "FDI TX state assertion failure (expected %s, current %s)\n", 1224 onoff(state), onoff(cur_state)); 1225 } 1226 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1227 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1228 1229 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1230 enum i915_pipe pipe, bool state) 1231 { 1232 u32 val; 1233 bool cur_state; 1234 1235 val = I915_READ(FDI_RX_CTL(pipe)); 1236 cur_state = !!(val & FDI_RX_ENABLE); 1237 I915_STATE_WARN(cur_state != state, 1238 "FDI RX state assertion failure (expected %s, current %s)\n", 1239 onoff(state), onoff(cur_state)); 1240 } 1241 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1242 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1243 1244 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1245 enum i915_pipe pipe) 1246 { 1247 u32 val; 1248 1249 /* ILK FDI PLL is always enabled */ 1250 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1251 return; 1252 1253 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1254 if (HAS_DDI(dev_priv->dev)) 1255 return; 1256 1257 val = I915_READ(FDI_TX_CTL(pipe)); 1258 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1259 } 1260 1261 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1262 enum i915_pipe pipe, bool state) 1263 { 1264 u32 val; 1265 bool cur_state; 1266 1267 val = I915_READ(FDI_RX_CTL(pipe)); 1268 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1269 I915_STATE_WARN(cur_state != state, 1270 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1271 onoff(state), onoff(cur_state)); 1272 } 1273 1274 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1275 enum i915_pipe pipe) 1276 { 1277 struct drm_device *dev = dev_priv->dev; 1278 i915_reg_t pp_reg; 1279 u32 val; 1280 enum i915_pipe panel_pipe = PIPE_A; 1281 bool locked = true; 1282 1283 if (WARN_ON(HAS_DDI(dev))) 1284 return; 1285 1286 if (HAS_PCH_SPLIT(dev)) { 1287 u32 port_sel; 1288 1289 pp_reg = PCH_PP_CONTROL; 1290 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1291 1292 if (port_sel == PANEL_PORT_SELECT_LVDS && 1293 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1294 panel_pipe = PIPE_B; 1295 /* XXX: else fix for eDP */ 1296 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1297 /* presumably write lock depends on pipe, not port select */ 1298 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1299 panel_pipe = pipe; 1300 } else { 1301 pp_reg = PP_CONTROL; 1302 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1303 panel_pipe = PIPE_B; 1304 } 1305 1306 val = I915_READ(pp_reg); 1307 if (!(val & PANEL_POWER_ON) || 1308 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1309 locked = false; 1310 1311 I915_STATE_WARN(panel_pipe == pipe && locked, 1312 "panel assertion failure, pipe %c regs locked\n", 1313 pipe_name(pipe)); 1314 } 1315 1316 static void assert_cursor(struct drm_i915_private *dev_priv, 1317 enum i915_pipe pipe, bool state) 1318 { 1319 struct drm_device *dev = dev_priv->dev; 1320 bool cur_state; 1321 1322 if (IS_845G(dev) || IS_I865G(dev)) 1323 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1324 else 1325 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1326 1327 I915_STATE_WARN(cur_state != state, 1328 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1329 pipe_name(pipe), onoff(state), onoff(cur_state)); 1330 } 1331 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1332 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1333 1334 void assert_pipe(struct drm_i915_private *dev_priv, 1335 enum i915_pipe pipe, bool state) 1336 { 1337 bool cur_state; 1338 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1339 pipe); 1340 enum intel_display_power_domain power_domain; 1341 1342 /* if we need the pipe quirk it must be always on */ 1343 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1344 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1345 state = true; 1346 1347 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); 1348 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 1349 u32 val = I915_READ(PIPECONF(cpu_transcoder)); 1350 cur_state = !!(val & PIPECONF_ENABLE); 1351 1352 intel_display_power_put(dev_priv, power_domain); 1353 } else { 1354 cur_state = false; 1355 } 1356 1357 I915_STATE_WARN(cur_state != state, 1358 "pipe %c assertion failure (expected %s, current %s)\n", 1359 pipe_name(pipe), onoff(state), onoff(cur_state)); 1360 } 1361 1362 static void assert_plane(struct drm_i915_private *dev_priv, 1363 enum plane plane, bool state) 1364 { 1365 u32 val; 1366 bool cur_state; 1367 1368 val = I915_READ(DSPCNTR(plane)); 1369 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1370 I915_STATE_WARN(cur_state != state, 1371 "plane %c assertion failure (expected %s, current %s)\n", 1372 plane_name(plane), onoff(state), onoff(cur_state)); 1373 } 1374 1375 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1376 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1377 1378 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1379 enum i915_pipe pipe) 1380 { 1381 struct drm_device *dev = dev_priv->dev; 1382 int i; 1383 1384 /* Primary planes are fixed to pipes on gen4+ */ 1385 if (INTEL_INFO(dev)->gen >= 4) { 1386 u32 val = I915_READ(DSPCNTR(pipe)); 1387 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1388 "plane %c assertion failure, should be disabled but not\n", 1389 plane_name(pipe)); 1390 return; 1391 } 1392 1393 /* Need to check both planes against the pipe */ 1394 for_each_pipe(dev_priv, i) { 1395 u32 val = I915_READ(DSPCNTR(i)); 1396 enum i915_pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1397 DISPPLANE_SEL_PIPE_SHIFT; 1398 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1399 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1400 plane_name(i), pipe_name(pipe)); 1401 } 1402 } 1403 1404 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1405 enum i915_pipe pipe) 1406 { 1407 struct drm_device *dev = dev_priv->dev; 1408 int sprite; 1409 1410 if (INTEL_INFO(dev)->gen >= 9) { 1411 for_each_sprite(dev_priv, pipe, sprite) { 1412 u32 val = I915_READ(PLANE_CTL(pipe, sprite)); 1413 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1414 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1415 sprite, pipe_name(pipe)); 1416 } 1417 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1418 for_each_sprite(dev_priv, pipe, sprite) { 1419 u32 val = I915_READ(SPCNTR(pipe, sprite)); 1420 I915_STATE_WARN(val & SP_ENABLE, 1421 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1422 sprite_name(pipe, sprite), pipe_name(pipe)); 1423 } 1424 } else if (INTEL_INFO(dev)->gen >= 7) { 1425 u32 val = I915_READ(SPRCTL(pipe)); 1426 I915_STATE_WARN(val & SPRITE_ENABLE, 1427 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1428 plane_name(pipe), pipe_name(pipe)); 1429 } else if (INTEL_INFO(dev)->gen >= 5) { 1430 u32 val = I915_READ(DVSCNTR(pipe)); 1431 I915_STATE_WARN(val & DVS_ENABLE, 1432 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1433 plane_name(pipe), pipe_name(pipe)); 1434 } 1435 } 1436 1437 static void assert_vblank_disabled(struct drm_crtc *crtc) 1438 { 1439 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1440 drm_crtc_vblank_put(crtc); 1441 } 1442 1443 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1444 { 1445 u32 val; 1446 bool enabled; 1447 1448 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1449 1450 val = I915_READ(PCH_DREF_CONTROL); 1451 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1452 DREF_SUPERSPREAD_SOURCE_MASK)); 1453 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1454 } 1455 1456 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1457 enum i915_pipe pipe) 1458 { 1459 u32 val; 1460 bool enabled; 1461 1462 val = I915_READ(PCH_TRANSCONF(pipe)); 1463 enabled = !!(val & TRANS_ENABLE); 1464 I915_STATE_WARN(enabled, 1465 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1466 pipe_name(pipe)); 1467 } 1468 1469 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1470 enum i915_pipe pipe, u32 port_sel, u32 val) 1471 { 1472 if ((val & DP_PORT_EN) == 0) 1473 return false; 1474 1475 if (HAS_PCH_CPT(dev_priv->dev)) { 1476 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe)); 1477 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1478 return false; 1479 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1480 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1481 return false; 1482 } else { 1483 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1484 return false; 1485 } 1486 return true; 1487 } 1488 1489 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1490 enum i915_pipe pipe, u32 val) 1491 { 1492 if ((val & SDVO_ENABLE) == 0) 1493 return false; 1494 1495 if (HAS_PCH_CPT(dev_priv->dev)) { 1496 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1497 return false; 1498 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1499 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1500 return false; 1501 } else { 1502 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1503 return false; 1504 } 1505 return true; 1506 } 1507 1508 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1509 enum i915_pipe pipe, u32 val) 1510 { 1511 if ((val & LVDS_PORT_EN) == 0) 1512 return false; 1513 1514 if (HAS_PCH_CPT(dev_priv->dev)) { 1515 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1516 return false; 1517 } else { 1518 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1519 return false; 1520 } 1521 return true; 1522 } 1523 1524 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1525 enum i915_pipe pipe, u32 val) 1526 { 1527 if ((val & ADPA_DAC_ENABLE) == 0) 1528 return false; 1529 if (HAS_PCH_CPT(dev_priv->dev)) { 1530 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1531 return false; 1532 } else { 1533 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1534 return false; 1535 } 1536 return true; 1537 } 1538 1539 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1540 enum i915_pipe pipe, i915_reg_t reg, 1541 u32 port_sel) 1542 { 1543 u32 val = I915_READ(reg); 1544 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1545 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1546 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1547 1548 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1549 && (val & DP_PIPEB_SELECT), 1550 "IBX PCH dp port still using transcoder B\n"); 1551 } 1552 1553 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1554 enum i915_pipe pipe, i915_reg_t reg) 1555 { 1556 u32 val = I915_READ(reg); 1557 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1558 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1559 i915_mmio_reg_offset(reg), pipe_name(pipe)); 1560 1561 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1562 && (val & SDVO_PIPE_B_SELECT), 1563 "IBX PCH hdmi port still using transcoder B\n"); 1564 } 1565 1566 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1567 enum i915_pipe pipe) 1568 { 1569 u32 val; 1570 1571 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1572 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1573 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1574 1575 val = I915_READ(PCH_ADPA); 1576 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1577 "PCH VGA enabled on transcoder %c, should be disabled\n", 1578 pipe_name(pipe)); 1579 1580 val = I915_READ(PCH_LVDS); 1581 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1582 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1583 pipe_name(pipe)); 1584 1585 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1586 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1587 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1588 } 1589 1590 static void vlv_enable_pll(struct intel_crtc *crtc, 1591 const struct intel_crtc_state *pipe_config) 1592 { 1593 struct drm_device *dev = crtc->base.dev; 1594 struct drm_i915_private *dev_priv = dev->dev_private; 1595 i915_reg_t reg = DPLL(crtc->pipe); 1596 u32 dpll = pipe_config->dpll_hw_state.dpll; 1597 1598 assert_pipe_disabled(dev_priv, crtc->pipe); 1599 1600 /* PLL is protected by panel, make sure we can write it */ 1601 if (IS_MOBILE(dev_priv->dev)) 1602 assert_panel_unlocked(dev_priv, crtc->pipe); 1603 1604 I915_WRITE(reg, dpll); 1605 POSTING_READ(reg); 1606 udelay(150); 1607 1608 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1609 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1610 1611 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1612 POSTING_READ(DPLL_MD(crtc->pipe)); 1613 1614 /* We do this three times for luck */ 1615 I915_WRITE(reg, dpll); 1616 POSTING_READ(reg); 1617 udelay(150); /* wait for warmup */ 1618 I915_WRITE(reg, dpll); 1619 POSTING_READ(reg); 1620 udelay(150); /* wait for warmup */ 1621 I915_WRITE(reg, dpll); 1622 POSTING_READ(reg); 1623 udelay(150); /* wait for warmup */ 1624 } 1625 1626 static void chv_enable_pll(struct intel_crtc *crtc, 1627 const struct intel_crtc_state *pipe_config) 1628 { 1629 struct drm_device *dev = crtc->base.dev; 1630 struct drm_i915_private *dev_priv = dev->dev_private; 1631 int pipe = crtc->pipe; 1632 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1633 u32 tmp; 1634 1635 assert_pipe_disabled(dev_priv, crtc->pipe); 1636 1637 mutex_lock(&dev_priv->sb_lock); 1638 1639 /* Enable back the 10bit clock to display controller */ 1640 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1641 tmp |= DPIO_DCLKP_EN; 1642 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1643 1644 mutex_unlock(&dev_priv->sb_lock); 1645 1646 /* 1647 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1648 */ 1649 udelay(1); 1650 1651 /* Enable PLL */ 1652 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1653 1654 /* Check PLL is locked */ 1655 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1656 DRM_ERROR("PLL %d failed to lock\n", pipe); 1657 1658 /* not sure when this should be written */ 1659 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1660 POSTING_READ(DPLL_MD(pipe)); 1661 } 1662 1663 static int intel_num_dvo_pipes(struct drm_device *dev) 1664 { 1665 struct intel_crtc *crtc; 1666 int count = 0; 1667 1668 for_each_intel_crtc(dev, crtc) 1669 count += crtc->base.state->active && 1670 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1671 1672 return count; 1673 } 1674 1675 static void i9xx_enable_pll(struct intel_crtc *crtc) 1676 { 1677 struct drm_device *dev = crtc->base.dev; 1678 struct drm_i915_private *dev_priv = dev->dev_private; 1679 i915_reg_t reg = DPLL(crtc->pipe); 1680 u32 dpll = crtc->config->dpll_hw_state.dpll; 1681 1682 assert_pipe_disabled(dev_priv, crtc->pipe); 1683 1684 /* No really, not for ILK+ */ 1685 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1686 1687 /* PLL is protected by panel, make sure we can write it */ 1688 if (IS_MOBILE(dev) && !IS_I830(dev)) 1689 assert_panel_unlocked(dev_priv, crtc->pipe); 1690 1691 /* Enable DVO 2x clock on both PLLs if necessary */ 1692 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1693 /* 1694 * It appears to be important that we don't enable this 1695 * for the current pipe before otherwise configuring the 1696 * PLL. No idea how this should be handled if multiple 1697 * DVO outputs are enabled simultaneosly. 1698 */ 1699 dpll |= DPLL_DVO_2X_MODE; 1700 I915_WRITE(DPLL(!crtc->pipe), 1701 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1702 } 1703 1704 /* 1705 * Apparently we need to have VGA mode enabled prior to changing 1706 * the P1/P2 dividers. Otherwise the DPLL will keep using the old 1707 * dividers, even though the register value does change. 1708 */ 1709 I915_WRITE(reg, 0); 1710 1711 I915_WRITE(reg, dpll); 1712 1713 /* Wait for the clocks to stabilize. */ 1714 POSTING_READ(reg); 1715 udelay(150); 1716 1717 if (INTEL_INFO(dev)->gen >= 4) { 1718 I915_WRITE(DPLL_MD(crtc->pipe), 1719 crtc->config->dpll_hw_state.dpll_md); 1720 } else { 1721 /* The pixel multiplier can only be updated once the 1722 * DPLL is enabled and the clocks are stable. 1723 * 1724 * So write it again. 1725 */ 1726 I915_WRITE(reg, dpll); 1727 } 1728 1729 /* We do this three times for luck */ 1730 I915_WRITE(reg, dpll); 1731 POSTING_READ(reg); 1732 udelay(150); /* wait for warmup */ 1733 I915_WRITE(reg, dpll); 1734 POSTING_READ(reg); 1735 udelay(150); /* wait for warmup */ 1736 I915_WRITE(reg, dpll); 1737 POSTING_READ(reg); 1738 udelay(150); /* wait for warmup */ 1739 } 1740 1741 /** 1742 * i9xx_disable_pll - disable a PLL 1743 * @dev_priv: i915 private structure 1744 * @pipe: pipe PLL to disable 1745 * 1746 * Disable the PLL for @pipe, making sure the pipe is off first. 1747 * 1748 * Note! This is for pre-ILK only. 1749 */ 1750 static void i9xx_disable_pll(struct intel_crtc *crtc) 1751 { 1752 struct drm_device *dev = crtc->base.dev; 1753 struct drm_i915_private *dev_priv = dev->dev_private; 1754 enum i915_pipe pipe = crtc->pipe; 1755 1756 /* Disable DVO 2x clock on both PLLs if necessary */ 1757 if (IS_I830(dev) && 1758 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1759 !intel_num_dvo_pipes(dev)) { 1760 I915_WRITE(DPLL(PIPE_B), 1761 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1762 I915_WRITE(DPLL(PIPE_A), 1763 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1764 } 1765 1766 /* Don't disable pipe or pipe PLLs if needed */ 1767 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1768 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1769 return; 1770 1771 /* Make sure the pipe isn't still relying on us */ 1772 assert_pipe_disabled(dev_priv, pipe); 1773 1774 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS); 1775 POSTING_READ(DPLL(pipe)); 1776 } 1777 1778 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1779 { 1780 u32 val; 1781 1782 /* Make sure the pipe isn't still relying on us */ 1783 assert_pipe_disabled(dev_priv, pipe); 1784 1785 /* 1786 * Leave integrated clock source and reference clock enabled for pipe B. 1787 * The latter is needed for VGA hotplug / manual detection. 1788 */ 1789 val = DPLL_VGA_MODE_DIS; 1790 if (pipe == PIPE_B) 1791 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REF_CLK_ENABLE_VLV; 1792 I915_WRITE(DPLL(pipe), val); 1793 POSTING_READ(DPLL(pipe)); 1794 1795 } 1796 1797 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1798 { 1799 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1800 u32 val; 1801 1802 /* Make sure the pipe isn't still relying on us */ 1803 assert_pipe_disabled(dev_priv, pipe); 1804 1805 /* Set PLL en = 0 */ 1806 val = DPLL_SSC_REF_CLK_CHV | 1807 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1808 if (pipe != PIPE_A) 1809 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1810 I915_WRITE(DPLL(pipe), val); 1811 POSTING_READ(DPLL(pipe)); 1812 1813 mutex_lock(&dev_priv->sb_lock); 1814 1815 /* Disable 10bit clock to display controller */ 1816 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1817 val &= ~DPIO_DCLKP_EN; 1818 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1819 1820 mutex_unlock(&dev_priv->sb_lock); 1821 } 1822 1823 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1824 struct intel_digital_port *dport, 1825 unsigned int expected_mask) 1826 { 1827 u32 port_mask; 1828 i915_reg_t dpll_reg; 1829 1830 switch (dport->port) { 1831 case PORT_B: 1832 port_mask = DPLL_PORTB_READY_MASK; 1833 dpll_reg = DPLL(0); 1834 break; 1835 case PORT_C: 1836 port_mask = DPLL_PORTC_READY_MASK; 1837 dpll_reg = DPLL(0); 1838 expected_mask <<= 4; 1839 break; 1840 case PORT_D: 1841 port_mask = DPLL_PORTD_READY_MASK; 1842 dpll_reg = DPIO_PHY_STATUS; 1843 break; 1844 default: 1845 BUG(); 1846 } 1847 1848 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1849 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1850 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1851 } 1852 1853 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1854 { 1855 struct drm_device *dev = crtc->base.dev; 1856 struct drm_i915_private *dev_priv = dev->dev_private; 1857 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1858 1859 if (WARN_ON(pll == NULL)) 1860 return; 1861 1862 WARN_ON(!pll->config.crtc_mask); 1863 if (pll->active == 0) { 1864 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1865 WARN_ON(pll->on); 1866 assert_shared_dpll_disabled(dev_priv, pll); 1867 1868 pll->mode_set(dev_priv, pll); 1869 } 1870 } 1871 1872 /** 1873 * intel_enable_shared_dpll - enable PCH PLL 1874 * @dev_priv: i915 private structure 1875 * @pipe: pipe PLL to enable 1876 * 1877 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1878 * drives the transcoder clock. 1879 */ 1880 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1881 { 1882 struct drm_device *dev = crtc->base.dev; 1883 struct drm_i915_private *dev_priv = dev->dev_private; 1884 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1885 1886 if (WARN_ON(pll == NULL)) 1887 return; 1888 1889 if (WARN_ON(pll->config.crtc_mask == 0)) 1890 return; 1891 1892 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1893 pll->name, pll->active, pll->on, 1894 crtc->base.base.id); 1895 1896 if (pll->active++) { 1897 WARN_ON(!pll->on); 1898 assert_shared_dpll_enabled(dev_priv, pll); 1899 return; 1900 } 1901 WARN_ON(pll->on); 1902 1903 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1904 1905 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1906 pll->enable(dev_priv, pll); 1907 pll->on = true; 1908 } 1909 1910 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1911 { 1912 struct drm_device *dev = crtc->base.dev; 1913 struct drm_i915_private *dev_priv = dev->dev_private; 1914 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1915 1916 /* PCH only available on ILK+ */ 1917 if (INTEL_INFO(dev)->gen < 5) 1918 return; 1919 1920 if (pll == NULL) 1921 return; 1922 1923 if (WARN_ON(!(pll->config.crtc_mask & (1 << drm_crtc_index(&crtc->base))))) 1924 return; 1925 1926 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1927 pll->name, pll->active, pll->on, 1928 crtc->base.base.id); 1929 1930 if (WARN_ON(pll->active == 0)) { 1931 assert_shared_dpll_disabled(dev_priv, pll); 1932 return; 1933 } 1934 1935 assert_shared_dpll_enabled(dev_priv, pll); 1936 WARN_ON(!pll->on); 1937 if (--pll->active) 1938 return; 1939 1940 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1941 pll->disable(dev_priv, pll); 1942 pll->on = false; 1943 1944 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1945 } 1946 1947 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1948 enum i915_pipe pipe) 1949 { 1950 struct drm_device *dev = dev_priv->dev; 1951 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1952 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1953 i915_reg_t reg; 1954 uint32_t val, pipeconf_val; 1955 1956 /* PCH only available on ILK+ */ 1957 BUG_ON(!HAS_PCH_SPLIT(dev)); 1958 1959 /* Make sure PCH DPLL is enabled */ 1960 assert_shared_dpll_enabled(dev_priv, 1961 intel_crtc_to_shared_dpll(intel_crtc)); 1962 1963 /* FDI must be feeding us bits for PCH ports */ 1964 assert_fdi_tx_enabled(dev_priv, pipe); 1965 assert_fdi_rx_enabled(dev_priv, pipe); 1966 1967 if (HAS_PCH_CPT(dev)) { 1968 /* Workaround: Set the timing override bit before enabling the 1969 * pch transcoder. */ 1970 reg = TRANS_CHICKEN2(pipe); 1971 val = I915_READ(reg); 1972 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1973 I915_WRITE(reg, val); 1974 } 1975 1976 reg = PCH_TRANSCONF(pipe); 1977 val = I915_READ(reg); 1978 pipeconf_val = I915_READ(PIPECONF(pipe)); 1979 1980 if (HAS_PCH_IBX(dev_priv->dev)) { 1981 /* 1982 * Make the BPC in transcoder be consistent with 1983 * that in pipeconf reg. For HDMI we must use 8bpc 1984 * here for both 8bpc and 12bpc. 1985 */ 1986 val &= ~PIPECONF_BPC_MASK; 1987 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) 1988 val |= PIPECONF_8BPC; 1989 else 1990 val |= pipeconf_val & PIPECONF_BPC_MASK; 1991 } 1992 1993 val &= ~TRANS_INTERLACE_MASK; 1994 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1995 if (HAS_PCH_IBX(dev_priv->dev) && 1996 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1997 val |= TRANS_LEGACY_INTERLACED_ILK; 1998 else 1999 val |= TRANS_INTERLACED; 2000 else 2001 val |= TRANS_PROGRESSIVE; 2002 2003 I915_WRITE(reg, val | TRANS_ENABLE); 2004 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2005 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 2006 } 2007 2008 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 2009 enum transcoder cpu_transcoder) 2010 { 2011 u32 val, pipeconf_val; 2012 2013 /* PCH only available on ILK+ */ 2014 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 2015 2016 /* FDI must be feeding us bits for PCH ports */ 2017 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 2018 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2019 2020 /* Workaround: set timing override bit. */ 2021 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2022 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2023 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2024 2025 val = TRANS_ENABLE; 2026 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2027 2028 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 2029 PIPECONF_INTERLACED_ILK) 2030 val |= TRANS_INTERLACED; 2031 else 2032 val |= TRANS_PROGRESSIVE; 2033 2034 I915_WRITE(LPT_TRANSCONF, val); 2035 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 2036 DRM_ERROR("Failed to enable PCH transcoder\n"); 2037 } 2038 2039 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 2040 enum i915_pipe pipe) 2041 { 2042 struct drm_device *dev = dev_priv->dev; 2043 i915_reg_t reg; 2044 uint32_t val; 2045 2046 /* FDI relies on the transcoder */ 2047 assert_fdi_tx_disabled(dev_priv, pipe); 2048 assert_fdi_rx_disabled(dev_priv, pipe); 2049 2050 /* Ports must be off as well */ 2051 assert_pch_ports_disabled(dev_priv, pipe); 2052 2053 reg = PCH_TRANSCONF(pipe); 2054 val = I915_READ(reg); 2055 val &= ~TRANS_ENABLE; 2056 I915_WRITE(reg, val); 2057 /* wait for PCH transcoder off, transcoder state */ 2058 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2059 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2060 2061 if (HAS_PCH_CPT(dev)) { 2062 /* Workaround: Clear the timing override chicken bit again. */ 2063 reg = TRANS_CHICKEN2(pipe); 2064 val = I915_READ(reg); 2065 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2066 I915_WRITE(reg, val); 2067 } 2068 } 2069 2070 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 2071 { 2072 u32 val; 2073 2074 val = I915_READ(LPT_TRANSCONF); 2075 val &= ~TRANS_ENABLE; 2076 I915_WRITE(LPT_TRANSCONF, val); 2077 /* wait for PCH transcoder off, transcoder state */ 2078 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2079 DRM_ERROR("Failed to disable PCH transcoder\n"); 2080 2081 /* Workaround: clear timing override bit. */ 2082 val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2083 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2084 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2085 } 2086 2087 /** 2088 * intel_enable_pipe - enable a pipe, asserting requirements 2089 * @crtc: crtc responsible for the pipe 2090 * 2091 * Enable @crtc's pipe, making sure that various hardware specific requirements 2092 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2093 */ 2094 static void intel_enable_pipe(struct intel_crtc *crtc) 2095 { 2096 struct drm_device *dev = crtc->base.dev; 2097 struct drm_i915_private *dev_priv = dev->dev_private; 2098 enum i915_pipe pipe = crtc->pipe; 2099 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2100 enum i915_pipe pch_transcoder; 2101 i915_reg_t reg; 2102 u32 val; 2103 2104 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe)); 2105 2106 assert_planes_disabled(dev_priv, pipe); 2107 assert_cursor_disabled(dev_priv, pipe); 2108 assert_sprites_disabled(dev_priv, pipe); 2109 2110 if (HAS_PCH_LPT(dev_priv->dev)) 2111 pch_transcoder = TRANSCODER_A; 2112 else 2113 pch_transcoder = pipe; 2114 2115 /* 2116 * A pipe without a PLL won't actually be able to drive bits from 2117 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2118 * need the check. 2119 */ 2120 if (HAS_GMCH_DISPLAY(dev_priv->dev)) 2121 if (crtc->config->has_dsi_encoder) 2122 assert_dsi_pll_enabled(dev_priv); 2123 else 2124 assert_pll_enabled(dev_priv, pipe); 2125 else { 2126 if (crtc->config->has_pch_encoder) { 2127 /* if driving the PCH, we need FDI enabled */ 2128 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2129 assert_fdi_tx_pll_enabled(dev_priv, 2130 (enum i915_pipe) cpu_transcoder); 2131 } 2132 /* FIXME: assert CPU port conditions for SNB+ */ 2133 } 2134 2135 reg = PIPECONF(cpu_transcoder); 2136 val = I915_READ(reg); 2137 if (val & PIPECONF_ENABLE) { 2138 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2139 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2140 return; 2141 } 2142 2143 I915_WRITE(reg, val | PIPECONF_ENABLE); 2144 POSTING_READ(reg); 2145 2146 /* 2147 * Until the pipe starts DSL will read as 0, which would cause 2148 * an apparent vblank timestamp jump, which messes up also the 2149 * frame count when it's derived from the timestamps. So let's 2150 * wait for the pipe to start properly before we call 2151 * drm_crtc_vblank_on() 2152 */ 2153 if (dev->max_vblank_count == 0 && 2154 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50)) 2155 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe)); 2156 } 2157 2158 /** 2159 * intel_disable_pipe - disable a pipe, asserting requirements 2160 * @crtc: crtc whose pipes is to be disabled 2161 * 2162 * Disable the pipe of @crtc, making sure that various hardware 2163 * specific requirements are met, if applicable, e.g. plane 2164 * disabled, panel fitter off, etc. 2165 * 2166 * Will wait until the pipe has shut down before returning. 2167 */ 2168 static void intel_disable_pipe(struct intel_crtc *crtc) 2169 { 2170 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2171 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2172 enum i915_pipe pipe = crtc->pipe; 2173 i915_reg_t reg; 2174 u32 val; 2175 2176 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe)); 2177 2178 /* 2179 * Make sure planes won't keep trying to pump pixels to us, 2180 * or we might hang the display. 2181 */ 2182 assert_planes_disabled(dev_priv, pipe); 2183 assert_cursor_disabled(dev_priv, pipe); 2184 assert_sprites_disabled(dev_priv, pipe); 2185 2186 reg = PIPECONF(cpu_transcoder); 2187 val = I915_READ(reg); 2188 if ((val & PIPECONF_ENABLE) == 0) 2189 return; 2190 2191 /* 2192 * Double wide has implications for planes 2193 * so best keep it disabled when not needed. 2194 */ 2195 if (crtc->config->double_wide) 2196 val &= ~PIPECONF_DOUBLE_WIDE; 2197 2198 /* Don't disable pipe or pipe PLLs if needed */ 2199 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2200 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2201 val &= ~PIPECONF_ENABLE; 2202 2203 I915_WRITE(reg, val); 2204 if ((val & PIPECONF_ENABLE) == 0) 2205 intel_wait_for_pipe_off(crtc); 2206 } 2207 2208 static bool need_vtd_wa(struct drm_device *dev) 2209 { 2210 #ifdef CONFIG_INTEL_IOMMU 2211 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2212 return true; 2213 #endif 2214 return false; 2215 } 2216 2217 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 2218 { 2219 return IS_GEN2(dev_priv) ? 2048 : 4096; 2220 } 2221 2222 static unsigned int intel_tile_width(const struct drm_i915_private *dev_priv, 2223 uint64_t fb_modifier, unsigned int cpp) 2224 { 2225 switch (fb_modifier) { 2226 case DRM_FORMAT_MOD_NONE: 2227 return cpp; 2228 case I915_FORMAT_MOD_X_TILED: 2229 if (IS_GEN2(dev_priv)) 2230 return 128; 2231 else 2232 return 512; 2233 case I915_FORMAT_MOD_Y_TILED: 2234 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv)) 2235 return 128; 2236 else 2237 return 512; 2238 case I915_FORMAT_MOD_Yf_TILED: 2239 switch (cpp) { 2240 case 1: 2241 return 64; 2242 case 2: 2243 case 4: 2244 return 128; 2245 case 8: 2246 case 16: 2247 return 256; 2248 default: 2249 MISSING_CASE(cpp); 2250 return cpp; 2251 } 2252 break; 2253 default: 2254 MISSING_CASE(fb_modifier); 2255 return cpp; 2256 } 2257 } 2258 2259 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv, 2260 uint64_t fb_modifier, unsigned int cpp) 2261 { 2262 if (fb_modifier == DRM_FORMAT_MOD_NONE) 2263 return 1; 2264 else 2265 return intel_tile_size(dev_priv) / 2266 intel_tile_width(dev_priv, fb_modifier, cpp); 2267 } 2268 2269 unsigned int 2270 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2271 uint32_t pixel_format, uint64_t fb_modifier) 2272 { 2273 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0); 2274 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp); 2275 2276 return ALIGN(height, tile_height); 2277 } 2278 2279 static void 2280 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2281 const struct drm_plane_state *plane_state) 2282 { 2283 struct drm_i915_private *dev_priv = to_i915(fb->dev); 2284 struct intel_rotation_info *info = &view->params.rotated; 2285 unsigned int tile_size, tile_width, tile_height, cpp; 2286 2287 *view = i915_ggtt_view_normal; 2288 2289 if (!plane_state) 2290 return; 2291 2292 if (!intel_rotation_90_or_270(plane_state->rotation)) 2293 return; 2294 2295 *view = i915_ggtt_view_rotated; 2296 2297 info->height = fb->height; 2298 info->pixel_format = fb->pixel_format; 2299 info->pitch = fb->pitches[0]; 2300 info->uv_offset = fb->offsets[1]; 2301 info->fb_modifier = fb->modifier[0]; 2302 2303 tile_size = intel_tile_size(dev_priv); 2304 2305 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2306 tile_width = intel_tile_width(dev_priv, fb->modifier[0], cpp); 2307 tile_height = tile_size / tile_width; 2308 2309 info->width_pages = DIV_ROUND_UP(fb->pitches[0], tile_width); 2310 info->height_pages = DIV_ROUND_UP(fb->height, tile_height); 2311 info->size = info->width_pages * info->height_pages * tile_size; 2312 2313 if (info->pixel_format == DRM_FORMAT_NV12) { 2314 cpp = drm_format_plane_cpp(fb->pixel_format, 1); 2315 tile_width = intel_tile_width(dev_priv, fb->modifier[1], cpp); 2316 tile_height = tile_size / tile_width; 2317 2318 info->width_pages_uv = DIV_ROUND_UP(fb->pitches[1], tile_width); 2319 info->height_pages_uv = DIV_ROUND_UP(fb->height / 2, tile_height); 2320 info->size_uv = info->width_pages_uv * info->height_pages_uv * tile_size; 2321 } 2322 } 2323 2324 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2325 { 2326 if (INTEL_INFO(dev_priv)->gen >= 9) 2327 return 256 * 1024; 2328 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) || 2329 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2330 return 128 * 1024; 2331 else if (INTEL_INFO(dev_priv)->gen >= 4) 2332 return 4 * 1024; 2333 else 2334 return 0; 2335 } 2336 2337 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv, 2338 uint64_t fb_modifier) 2339 { 2340 switch (fb_modifier) { 2341 case DRM_FORMAT_MOD_NONE: 2342 return intel_linear_alignment(dev_priv); 2343 case I915_FORMAT_MOD_X_TILED: 2344 if (INTEL_INFO(dev_priv)->gen >= 9) 2345 return 256 * 1024; 2346 return 0; 2347 case I915_FORMAT_MOD_Y_TILED: 2348 case I915_FORMAT_MOD_Yf_TILED: 2349 return 1 * 1024 * 1024; 2350 default: 2351 MISSING_CASE(fb_modifier); 2352 return 0; 2353 } 2354 } 2355 2356 int 2357 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2358 struct drm_framebuffer *fb, 2359 const struct drm_plane_state *plane_state) 2360 { 2361 struct drm_device *dev = fb->dev; 2362 struct drm_i915_private *dev_priv = dev->dev_private; 2363 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2364 struct i915_ggtt_view view; 2365 u32 alignment; 2366 int ret; 2367 2368 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2369 2370 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]); 2371 2372 intel_fill_fb_ggtt_view(&view, fb, plane_state); 2373 2374 /* Note that the w/a also requires 64 PTE of padding following the 2375 * bo. We currently fill all unused PTE with the shadow page and so 2376 * we should always have valid PTE following the scanout preventing 2377 * the VT-d warning. 2378 */ 2379 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2380 alignment = 256 * 1024; 2381 2382 /* 2383 * Global gtt pte registers are special registers which actually forward 2384 * writes to a chunk of system memory. Which means that there is no risk 2385 * that the register values disappear as soon as we call 2386 * intel_runtime_pm_put(), so it is correct to wrap only the 2387 * pin/unpin/fence and not more. 2388 */ 2389 intel_runtime_pm_get(dev_priv); 2390 2391 ret = i915_gem_object_pin_to_display_plane(obj, alignment, 2392 &view); 2393 if (ret) 2394 goto err_pm; 2395 2396 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2397 * fence, whereas 965+ only requires a fence if using 2398 * framebuffer compression. For simplicity, we always install 2399 * a fence as the cost is not that onerous. 2400 */ 2401 if (view.type == I915_GGTT_VIEW_NORMAL) { 2402 ret = i915_gem_object_get_fence(obj); 2403 if (ret == -EDEADLK) { 2404 /* 2405 * -EDEADLK means there are no free fences 2406 * no pending flips. 2407 * 2408 * This is propagated to atomic, but it uses 2409 * -EDEADLK to force a locking recovery, so 2410 * change the returned error to -EBUSY. 2411 */ 2412 ret = -EBUSY; 2413 goto err_unpin; 2414 } else if (ret) 2415 goto err_unpin; 2416 2417 i915_gem_object_pin_fence(obj); 2418 } 2419 2420 intel_runtime_pm_put(dev_priv); 2421 return 0; 2422 2423 err_unpin: 2424 i915_gem_object_unpin_from_display_plane(obj, &view); 2425 err_pm: 2426 intel_runtime_pm_put(dev_priv); 2427 return ret; 2428 } 2429 2430 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, 2431 const struct drm_plane_state *plane_state) 2432 { 2433 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2434 struct i915_ggtt_view view; 2435 2436 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2437 2438 intel_fill_fb_ggtt_view(&view, fb, plane_state); 2439 2440 if (view.type == I915_GGTT_VIEW_NORMAL) 2441 i915_gem_object_unpin_fence(obj); 2442 2443 i915_gem_object_unpin_from_display_plane(obj, &view); 2444 } 2445 2446 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2447 * is assumed to be a power-of-two. */ 2448 u32 intel_compute_tile_offset(struct drm_i915_private *dev_priv, 2449 int *x, int *y, 2450 uint64_t fb_modifier, 2451 unsigned int cpp, 2452 unsigned int pitch) 2453 { 2454 if (fb_modifier != DRM_FORMAT_MOD_NONE) { 2455 unsigned int tile_size, tile_width, tile_height; 2456 unsigned int tile_rows, tiles; 2457 2458 tile_size = intel_tile_size(dev_priv); 2459 tile_width = intel_tile_width(dev_priv, fb_modifier, cpp); 2460 tile_height = tile_size / tile_width; 2461 2462 tile_rows = *y / tile_height; 2463 *y %= tile_height; 2464 2465 tiles = *x / (tile_width/cpp); 2466 *x %= tile_width/cpp; 2467 2468 return tile_rows * pitch * tile_height + tiles * tile_size; 2469 } else { 2470 unsigned int alignment = intel_linear_alignment(dev_priv) - 1; 2471 unsigned int offset; 2472 2473 offset = *y * pitch + *x * cpp; 2474 *y = (offset & alignment) / pitch; 2475 *x = ((offset & alignment) - *y * pitch) / cpp; 2476 return offset & ~alignment; 2477 } 2478 } 2479 2480 static int i9xx_format_to_fourcc(int format) 2481 { 2482 switch (format) { 2483 case DISPPLANE_8BPP: 2484 return DRM_FORMAT_C8; 2485 case DISPPLANE_BGRX555: 2486 return DRM_FORMAT_XRGB1555; 2487 case DISPPLANE_BGRX565: 2488 return DRM_FORMAT_RGB565; 2489 default: 2490 case DISPPLANE_BGRX888: 2491 return DRM_FORMAT_XRGB8888; 2492 case DISPPLANE_RGBX888: 2493 return DRM_FORMAT_XBGR8888; 2494 case DISPPLANE_BGRX101010: 2495 return DRM_FORMAT_XRGB2101010; 2496 case DISPPLANE_RGBX101010: 2497 return DRM_FORMAT_XBGR2101010; 2498 } 2499 } 2500 2501 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2502 { 2503 switch (format) { 2504 case PLANE_CTL_FORMAT_RGB_565: 2505 return DRM_FORMAT_RGB565; 2506 default: 2507 case PLANE_CTL_FORMAT_XRGB_8888: 2508 if (rgb_order) { 2509 if (alpha) 2510 return DRM_FORMAT_ABGR8888; 2511 else 2512 return DRM_FORMAT_XBGR8888; 2513 } else { 2514 if (alpha) 2515 return DRM_FORMAT_ARGB8888; 2516 else 2517 return DRM_FORMAT_XRGB8888; 2518 } 2519 case PLANE_CTL_FORMAT_XRGB_2101010: 2520 if (rgb_order) 2521 return DRM_FORMAT_XBGR2101010; 2522 else 2523 return DRM_FORMAT_XRGB2101010; 2524 } 2525 } 2526 2527 static bool 2528 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2529 struct intel_initial_plane_config *plane_config) 2530 { 2531 struct drm_device *dev = crtc->base.dev; 2532 struct drm_i915_private *dev_priv = to_i915(dev); 2533 struct drm_i915_gem_object *obj = NULL; 2534 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2535 struct drm_framebuffer *fb = &plane_config->fb->base; 2536 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); 2537 u32 size_aligned = round_up(plane_config->base + plane_config->size, 2538 PAGE_SIZE); 2539 2540 size_aligned -= base_aligned; 2541 2542 if (plane_config->size == 0) 2543 return false; 2544 2545 /* If the FB is too big, just don't use it since fbdev is not very 2546 * important and we should probably use that space with FBC or other 2547 * features. */ 2548 if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) 2549 return false; 2550 2551 mutex_lock(&dev->struct_mutex); 2552 2553 obj = i915_gem_object_create_stolen_for_preallocated(dev, 2554 base_aligned, 2555 base_aligned, 2556 size_aligned); 2557 if (!obj) { 2558 mutex_unlock(&dev->struct_mutex); 2559 return false; 2560 } 2561 2562 obj->tiling_mode = plane_config->tiling; 2563 if (obj->tiling_mode == I915_TILING_X) 2564 obj->stride = fb->pitches[0]; 2565 2566 mode_cmd.pixel_format = fb->pixel_format; 2567 mode_cmd.width = fb->width; 2568 mode_cmd.height = fb->height; 2569 mode_cmd.pitches[0] = fb->pitches[0]; 2570 mode_cmd.modifier[0] = fb->modifier[0]; 2571 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2572 2573 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2574 &mode_cmd, obj)) { 2575 DRM_DEBUG_KMS("intel fb init failed\n"); 2576 goto out_unref_obj; 2577 } 2578 2579 mutex_unlock(&dev->struct_mutex); 2580 2581 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2582 return true; 2583 2584 out_unref_obj: 2585 drm_gem_object_unreference(&obj->base); 2586 mutex_unlock(&dev->struct_mutex); 2587 return false; 2588 } 2589 2590 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2591 static void 2592 update_state_fb(struct drm_plane *plane) 2593 { 2594 if (plane->fb == plane->state->fb) 2595 return; 2596 2597 if (plane->state->fb) 2598 drm_framebuffer_unreference(plane->state->fb); 2599 plane->state->fb = plane->fb; 2600 if (plane->state->fb) 2601 drm_framebuffer_reference(plane->state->fb); 2602 } 2603 2604 static void 2605 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2606 struct intel_initial_plane_config *plane_config) 2607 { 2608 struct drm_device *dev = intel_crtc->base.dev; 2609 struct drm_i915_private *dev_priv = dev->dev_private; 2610 struct drm_crtc *c; 2611 struct intel_crtc *i; 2612 struct drm_i915_gem_object *obj; 2613 struct drm_plane *primary = intel_crtc->base.primary; 2614 struct drm_plane_state *plane_state = primary->state; 2615 struct drm_crtc_state *crtc_state = intel_crtc->base.state; 2616 struct intel_plane *intel_plane = to_intel_plane(primary); 2617 struct intel_plane_state *intel_state = 2618 to_intel_plane_state(plane_state); 2619 struct drm_framebuffer *fb; 2620 2621 if (!plane_config->fb) 2622 return; 2623 2624 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2625 fb = &plane_config->fb->base; 2626 goto valid_fb; 2627 } 2628 2629 kfree(plane_config->fb); 2630 2631 /* 2632 * Failed to alloc the obj, check to see if we should share 2633 * an fb with another CRTC instead 2634 */ 2635 for_each_crtc(dev, c) { 2636 i = to_intel_crtc(c); 2637 2638 if (c == &intel_crtc->base) 2639 continue; 2640 2641 if (!i->active) 2642 continue; 2643 2644 fb = c->primary->fb; 2645 if (!fb) 2646 continue; 2647 2648 obj = intel_fb_obj(fb); 2649 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2650 drm_framebuffer_reference(fb); 2651 goto valid_fb; 2652 } 2653 } 2654 2655 /* 2656 * We've failed to reconstruct the BIOS FB. Current display state 2657 * indicates that the primary plane is visible, but has a NULL FB, 2658 * which will lead to problems later if we don't fix it up. The 2659 * simplest solution is to just disable the primary plane now and 2660 * pretend the BIOS never had it enabled. 2661 */ 2662 to_intel_plane_state(plane_state)->visible = false; 2663 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary)); 2664 intel_pre_disable_primary(&intel_crtc->base); 2665 intel_plane->disable_plane(primary, &intel_crtc->base); 2666 2667 return; 2668 2669 valid_fb: 2670 plane_state->src_x = 0; 2671 plane_state->src_y = 0; 2672 plane_state->src_w = fb->width << 16; 2673 plane_state->src_h = fb->height << 16; 2674 2675 plane_state->crtc_x = 0; 2676 plane_state->crtc_y = 0; 2677 plane_state->crtc_w = fb->width; 2678 plane_state->crtc_h = fb->height; 2679 2680 intel_state->src.x1 = plane_state->src_x; 2681 intel_state->src.y1 = plane_state->src_y; 2682 intel_state->src.x2 = plane_state->src_x + plane_state->src_w; 2683 intel_state->src.y2 = plane_state->src_y + plane_state->src_h; 2684 intel_state->dst.x1 = plane_state->crtc_x; 2685 intel_state->dst.y1 = plane_state->crtc_y; 2686 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w; 2687 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h; 2688 2689 obj = intel_fb_obj(fb); 2690 if (obj->tiling_mode != I915_TILING_NONE) 2691 dev_priv->preserve_bios_swizzle = true; 2692 2693 drm_framebuffer_reference(fb); 2694 primary->fb = primary->state->fb = fb; 2695 primary->crtc = primary->state->crtc = &intel_crtc->base; 2696 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary)); 2697 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit; 2698 } 2699 2700 static void i9xx_update_primary_plane(struct drm_plane *primary, 2701 const struct intel_crtc_state *crtc_state, 2702 const struct intel_plane_state *plane_state) 2703 { 2704 struct drm_device *dev = primary->dev; 2705 struct drm_i915_private *dev_priv = dev->dev_private; 2706 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2707 struct drm_framebuffer *fb = plane_state->base.fb; 2708 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2709 int plane = intel_crtc->plane; 2710 u32 linear_offset; 2711 u32 dspcntr; 2712 i915_reg_t reg = DSPCNTR(plane); 2713 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2714 int x = plane_state->src.x1 >> 16; 2715 int y = plane_state->src.y1 >> 16; 2716 2717 dspcntr = DISPPLANE_GAMMA_ENABLE; 2718 2719 dspcntr |= DISPLAY_PLANE_ENABLE; 2720 2721 if (INTEL_INFO(dev)->gen < 4) { 2722 if (intel_crtc->pipe == PIPE_B) 2723 dspcntr |= DISPPLANE_SEL_PIPE_B; 2724 2725 /* pipesrc and dspsize control the size that is scaled from, 2726 * which should always be the user's requested size. 2727 */ 2728 I915_WRITE(DSPSIZE(plane), 2729 ((crtc_state->pipe_src_h - 1) << 16) | 2730 (crtc_state->pipe_src_w - 1)); 2731 I915_WRITE(DSPPOS(plane), 0); 2732 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2733 I915_WRITE(PRIMSIZE(plane), 2734 ((crtc_state->pipe_src_h - 1) << 16) | 2735 (crtc_state->pipe_src_w - 1)); 2736 I915_WRITE(PRIMPOS(plane), 0); 2737 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2738 } 2739 2740 switch (fb->pixel_format) { 2741 case DRM_FORMAT_C8: 2742 dspcntr |= DISPPLANE_8BPP; 2743 break; 2744 case DRM_FORMAT_XRGB1555: 2745 dspcntr |= DISPPLANE_BGRX555; 2746 break; 2747 case DRM_FORMAT_RGB565: 2748 dspcntr |= DISPPLANE_BGRX565; 2749 break; 2750 case DRM_FORMAT_XRGB8888: 2751 dspcntr |= DISPPLANE_BGRX888; 2752 break; 2753 case DRM_FORMAT_XBGR8888: 2754 dspcntr |= DISPPLANE_RGBX888; 2755 break; 2756 case DRM_FORMAT_XRGB2101010: 2757 dspcntr |= DISPPLANE_BGRX101010; 2758 break; 2759 case DRM_FORMAT_XBGR2101010: 2760 dspcntr |= DISPPLANE_RGBX101010; 2761 break; 2762 default: 2763 BUG(); 2764 } 2765 2766 if (INTEL_INFO(dev)->gen >= 4 && 2767 obj->tiling_mode != I915_TILING_NONE) 2768 dspcntr |= DISPPLANE_TILED; 2769 2770 if (IS_G4X(dev)) 2771 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2772 2773 linear_offset = y * fb->pitches[0] + x * cpp; 2774 2775 if (INTEL_INFO(dev)->gen >= 4) { 2776 intel_crtc->dspaddr_offset = 2777 intel_compute_tile_offset(dev_priv, &x, &y, 2778 fb->modifier[0], cpp, 2779 fb->pitches[0]); 2780 linear_offset -= intel_crtc->dspaddr_offset; 2781 } else { 2782 intel_crtc->dspaddr_offset = linear_offset; 2783 } 2784 2785 if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { 2786 dspcntr |= DISPPLANE_ROTATE_180; 2787 2788 x += (crtc_state->pipe_src_w - 1); 2789 y += (crtc_state->pipe_src_h - 1); 2790 2791 /* Finding the last pixel of the last line of the display 2792 data and adding to linear_offset*/ 2793 linear_offset += 2794 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2795 (crtc_state->pipe_src_w - 1) * cpp; 2796 } 2797 2798 intel_crtc->adjusted_x = x; 2799 intel_crtc->adjusted_y = y; 2800 2801 I915_WRITE(reg, dspcntr); 2802 2803 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2804 if (INTEL_INFO(dev)->gen >= 4) { 2805 I915_WRITE(DSPSURF(plane), 2806 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2807 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2808 I915_WRITE(DSPLINOFF(plane), linear_offset); 2809 } else 2810 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2811 POSTING_READ(reg); 2812 } 2813 2814 static void i9xx_disable_primary_plane(struct drm_plane *primary, 2815 struct drm_crtc *crtc) 2816 { 2817 struct drm_device *dev = crtc->dev; 2818 struct drm_i915_private *dev_priv = dev->dev_private; 2819 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2820 int plane = intel_crtc->plane; 2821 2822 I915_WRITE(DSPCNTR(plane), 0); 2823 if (INTEL_INFO(dev_priv)->gen >= 4) 2824 I915_WRITE(DSPSURF(plane), 0); 2825 else 2826 I915_WRITE(DSPADDR(plane), 0); 2827 POSTING_READ(DSPCNTR(plane)); 2828 } 2829 2830 static void ironlake_update_primary_plane(struct drm_plane *primary, 2831 const struct intel_crtc_state *crtc_state, 2832 const struct intel_plane_state *plane_state) 2833 { 2834 struct drm_device *dev = primary->dev; 2835 struct drm_i915_private *dev_priv = dev->dev_private; 2836 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2837 struct drm_framebuffer *fb = plane_state->base.fb; 2838 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2839 int plane = intel_crtc->plane; 2840 u32 linear_offset; 2841 u32 dspcntr; 2842 i915_reg_t reg = DSPCNTR(plane); 2843 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 2844 int x = plane_state->src.x1 >> 16; 2845 int y = plane_state->src.y1 >> 16; 2846 2847 dspcntr = DISPPLANE_GAMMA_ENABLE; 2848 dspcntr |= DISPLAY_PLANE_ENABLE; 2849 2850 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2851 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2852 2853 switch (fb->pixel_format) { 2854 case DRM_FORMAT_C8: 2855 dspcntr |= DISPPLANE_8BPP; 2856 break; 2857 case DRM_FORMAT_RGB565: 2858 dspcntr |= DISPPLANE_BGRX565; 2859 break; 2860 case DRM_FORMAT_XRGB8888: 2861 dspcntr |= DISPPLANE_BGRX888; 2862 break; 2863 case DRM_FORMAT_XBGR8888: 2864 dspcntr |= DISPPLANE_RGBX888; 2865 break; 2866 case DRM_FORMAT_XRGB2101010: 2867 dspcntr |= DISPPLANE_BGRX101010; 2868 break; 2869 case DRM_FORMAT_XBGR2101010: 2870 dspcntr |= DISPPLANE_RGBX101010; 2871 break; 2872 default: 2873 BUG(); 2874 } 2875 2876 if (obj->tiling_mode != I915_TILING_NONE) 2877 dspcntr |= DISPPLANE_TILED; 2878 2879 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2880 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2881 2882 linear_offset = y * fb->pitches[0] + x * cpp; 2883 intel_crtc->dspaddr_offset = 2884 intel_compute_tile_offset(dev_priv, &x, &y, 2885 fb->modifier[0], cpp, 2886 fb->pitches[0]); 2887 linear_offset -= intel_crtc->dspaddr_offset; 2888 if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) { 2889 dspcntr |= DISPPLANE_ROTATE_180; 2890 2891 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2892 x += (crtc_state->pipe_src_w - 1); 2893 y += (crtc_state->pipe_src_h - 1); 2894 2895 /* Finding the last pixel of the last line of the display 2896 data and adding to linear_offset*/ 2897 linear_offset += 2898 (crtc_state->pipe_src_h - 1) * fb->pitches[0] + 2899 (crtc_state->pipe_src_w - 1) * cpp; 2900 } 2901 } 2902 2903 intel_crtc->adjusted_x = x; 2904 intel_crtc->adjusted_y = y; 2905 2906 I915_WRITE(reg, dspcntr); 2907 2908 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2909 I915_WRITE(DSPSURF(plane), 2910 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2911 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2912 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2913 } else { 2914 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2915 I915_WRITE(DSPLINOFF(plane), linear_offset); 2916 } 2917 POSTING_READ(reg); 2918 } 2919 2920 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv, 2921 uint64_t fb_modifier, uint32_t pixel_format) 2922 { 2923 if (fb_modifier == DRM_FORMAT_MOD_NONE) { 2924 return 64; 2925 } else { 2926 int cpp = drm_format_plane_cpp(pixel_format, 0); 2927 2928 return intel_tile_width(dev_priv, fb_modifier, cpp); 2929 } 2930 } 2931 2932 u32 intel_plane_obj_offset(struct intel_plane *intel_plane, 2933 struct drm_i915_gem_object *obj, 2934 unsigned int plane) 2935 { 2936 struct i915_ggtt_view view; 2937 struct i915_vma *vma; 2938 u64 offset; 2939 2940 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb, 2941 intel_plane->base.state); 2942 2943 vma = i915_gem_obj_to_ggtt_view(obj, &view); 2944 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n", 2945 view.type)) 2946 return -1; 2947 2948 offset = vma->node.start; 2949 2950 if (plane == 1) { 2951 offset += vma->ggtt_view.params.rotated.uv_start_page * 2952 PAGE_SIZE; 2953 } 2954 2955 WARN_ON(upper_32_bits(offset)); 2956 2957 return lower_32_bits(offset); 2958 } 2959 2960 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2961 { 2962 struct drm_device *dev = intel_crtc->base.dev; 2963 struct drm_i915_private *dev_priv = dev->dev_private; 2964 2965 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2966 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2967 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0); 2968 } 2969 2970 /* 2971 * This function detaches (aka. unbinds) unused scalers in hardware 2972 */ 2973 static void skl_detach_scalers(struct intel_crtc *intel_crtc) 2974 { 2975 struct intel_crtc_scaler_state *scaler_state; 2976 int i; 2977 2978 scaler_state = &intel_crtc->config->scaler_state; 2979 2980 /* loop through and disable scalers that aren't in use */ 2981 for (i = 0; i < intel_crtc->num_scalers; i++) { 2982 if (!scaler_state->scalers[i].in_use) 2983 skl_detach_scaler(intel_crtc, i); 2984 } 2985 } 2986 2987 u32 skl_plane_ctl_format(uint32_t pixel_format) 2988 { 2989 switch (pixel_format) { 2990 case DRM_FORMAT_C8: 2991 return PLANE_CTL_FORMAT_INDEXED; 2992 case DRM_FORMAT_RGB565: 2993 return PLANE_CTL_FORMAT_RGB_565; 2994 case DRM_FORMAT_XBGR8888: 2995 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 2996 case DRM_FORMAT_XRGB8888: 2997 return PLANE_CTL_FORMAT_XRGB_8888; 2998 /* 2999 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 3000 * to be already pre-multiplied. We need to add a knob (or a different 3001 * DRM_FORMAT) for user-space to configure that. 3002 */ 3003 case DRM_FORMAT_ABGR8888: 3004 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 3005 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3006 case DRM_FORMAT_ARGB8888: 3007 return PLANE_CTL_FORMAT_XRGB_8888 | 3008 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 3009 case DRM_FORMAT_XRGB2101010: 3010 return PLANE_CTL_FORMAT_XRGB_2101010; 3011 case DRM_FORMAT_XBGR2101010: 3012 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 3013 case DRM_FORMAT_YUYV: 3014 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 3015 case DRM_FORMAT_YVYU: 3016 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 3017 case DRM_FORMAT_UYVY: 3018 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 3019 case DRM_FORMAT_VYUY: 3020 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 3021 default: 3022 MISSING_CASE(pixel_format); 3023 } 3024 3025 return 0; 3026 } 3027 3028 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 3029 { 3030 switch (fb_modifier) { 3031 case DRM_FORMAT_MOD_NONE: 3032 break; 3033 case I915_FORMAT_MOD_X_TILED: 3034 return PLANE_CTL_TILED_X; 3035 case I915_FORMAT_MOD_Y_TILED: 3036 return PLANE_CTL_TILED_Y; 3037 case I915_FORMAT_MOD_Yf_TILED: 3038 return PLANE_CTL_TILED_YF; 3039 default: 3040 MISSING_CASE(fb_modifier); 3041 } 3042 3043 return 0; 3044 } 3045 3046 u32 skl_plane_ctl_rotation(unsigned int rotation) 3047 { 3048 switch (rotation) { 3049 case BIT(DRM_ROTATE_0): 3050 break; 3051 /* 3052 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 3053 * while i915 HW rotation is clockwise, thats why this swapping. 3054 */ 3055 case BIT(DRM_ROTATE_90): 3056 return PLANE_CTL_ROTATE_270; 3057 case BIT(DRM_ROTATE_180): 3058 return PLANE_CTL_ROTATE_180; 3059 case BIT(DRM_ROTATE_270): 3060 return PLANE_CTL_ROTATE_90; 3061 default: 3062 MISSING_CASE(rotation); 3063 } 3064 3065 return 0; 3066 } 3067 3068 static void skylake_update_primary_plane(struct drm_plane *plane, 3069 const struct intel_crtc_state *crtc_state, 3070 const struct intel_plane_state *plane_state) 3071 { 3072 struct drm_device *dev = plane->dev; 3073 struct drm_i915_private *dev_priv = dev->dev_private; 3074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 3075 struct drm_framebuffer *fb = plane_state->base.fb; 3076 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 3077 int pipe = intel_crtc->pipe; 3078 u32 plane_ctl, stride_div, stride; 3079 u32 tile_height, plane_offset, plane_size; 3080 unsigned int rotation = plane_state->base.rotation; 3081 int x_offset, y_offset; 3082 u32 surf_addr; 3083 int scaler_id = plane_state->scaler_id; 3084 int src_x = plane_state->src.x1 >> 16; 3085 int src_y = plane_state->src.y1 >> 16; 3086 int src_w = drm_rect_width(&plane_state->src) >> 16; 3087 int src_h = drm_rect_height(&plane_state->src) >> 16; 3088 int dst_x = plane_state->dst.x1; 3089 int dst_y = plane_state->dst.y1; 3090 int dst_w = drm_rect_width(&plane_state->dst); 3091 int dst_h = drm_rect_height(&plane_state->dst); 3092 3093 plane_ctl = PLANE_CTL_ENABLE | 3094 PLANE_CTL_PIPE_GAMMA_ENABLE | 3095 PLANE_CTL_PIPE_CSC_ENABLE; 3096 3097 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3098 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3099 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3100 plane_ctl |= skl_plane_ctl_rotation(rotation); 3101 3102 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 3103 fb->pixel_format); 3104 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); 3105 3106 WARN_ON(drm_rect_width(&plane_state->src) == 0); 3107 3108 if (intel_rotation_90_or_270(rotation)) { 3109 int cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3110 3111 /* stride = Surface height in tiles */ 3112 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp); 3113 stride = DIV_ROUND_UP(fb->height, tile_height); 3114 x_offset = stride * tile_height - src_y - src_h; 3115 y_offset = src_x; 3116 plane_size = (src_w - 1) << 16 | (src_h - 1); 3117 } else { 3118 stride = fb->pitches[0] / stride_div; 3119 x_offset = src_x; 3120 y_offset = src_y; 3121 plane_size = (src_h - 1) << 16 | (src_w - 1); 3122 } 3123 plane_offset = y_offset << 16 | x_offset; 3124 3125 intel_crtc->adjusted_x = x_offset; 3126 intel_crtc->adjusted_y = y_offset; 3127 3128 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3129 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3130 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3131 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3132 3133 if (scaler_id >= 0) { 3134 uint32_t ps_ctrl = 0; 3135 3136 WARN_ON(!dst_w || !dst_h); 3137 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3138 crtc_state->scaler_state.scalers[scaler_id].mode; 3139 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3140 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3141 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3142 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3143 I915_WRITE(PLANE_POS(pipe, 0), 0); 3144 } else { 3145 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3146 } 3147 3148 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3149 3150 POSTING_READ(PLANE_SURF(pipe, 0)); 3151 } 3152 3153 static void skylake_disable_primary_plane(struct drm_plane *primary, 3154 struct drm_crtc *crtc) 3155 { 3156 struct drm_device *dev = crtc->dev; 3157 struct drm_i915_private *dev_priv = dev->dev_private; 3158 int pipe = to_intel_crtc(crtc)->pipe; 3159 3160 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3161 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3162 POSTING_READ(PLANE_SURF(pipe, 0)); 3163 } 3164 3165 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3166 static int 3167 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3168 int x, int y, enum mode_set_atomic state) 3169 { 3170 /* Support for kgdboc is disabled, this needs a major rework. */ 3171 DRM_ERROR("legacy panic handler not supported any more.\n"); 3172 3173 return -ENODEV; 3174 } 3175 3176 static void intel_complete_page_flips(struct drm_device *dev) 3177 { 3178 struct drm_crtc *crtc; 3179 3180 for_each_crtc(dev, crtc) { 3181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3182 enum plane plane = intel_crtc->plane; 3183 3184 intel_prepare_page_flip(dev, plane); 3185 intel_finish_page_flip_plane(dev, plane); 3186 } 3187 } 3188 3189 static void intel_update_primary_planes(struct drm_device *dev) 3190 { 3191 struct drm_crtc *crtc; 3192 3193 for_each_crtc(dev, crtc) { 3194 struct intel_plane *plane = to_intel_plane(crtc->primary); 3195 struct intel_plane_state *plane_state; 3196 3197 drm_modeset_lock_crtc(crtc, &plane->base); 3198 plane_state = to_intel_plane_state(plane->base.state); 3199 3200 if (plane_state->visible) 3201 plane->update_plane(&plane->base, 3202 to_intel_crtc_state(crtc->state), 3203 plane_state); 3204 3205 drm_modeset_unlock_crtc(crtc); 3206 } 3207 } 3208 3209 void intel_prepare_reset(struct drm_device *dev) 3210 { 3211 /* no reset support for gen2 */ 3212 if (IS_GEN2(dev)) 3213 return; 3214 3215 /* reset doesn't touch the display */ 3216 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3217 return; 3218 3219 drm_modeset_lock_all(dev); 3220 /* 3221 * Disabling the crtcs gracefully seems nicer. Also the 3222 * g33 docs say we should at least disable all the planes. 3223 */ 3224 intel_display_suspend(dev); 3225 } 3226 3227 void intel_finish_reset(struct drm_device *dev) 3228 { 3229 struct drm_i915_private *dev_priv = to_i915(dev); 3230 3231 /* 3232 * Flips in the rings will be nuked by the reset, 3233 * so complete all pending flips so that user space 3234 * will get its events and not get stuck. 3235 */ 3236 intel_complete_page_flips(dev); 3237 3238 /* no reset support for gen2 */ 3239 if (IS_GEN2(dev)) 3240 return; 3241 3242 /* reset doesn't touch the display */ 3243 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3244 /* 3245 * Flips in the rings have been nuked by the reset, 3246 * so update the base address of all primary 3247 * planes to the the last fb to make sure we're 3248 * showing the correct fb after a reset. 3249 * 3250 * FIXME: Atomic will make this obsolete since we won't schedule 3251 * CS-based flips (which might get lost in gpu resets) any more. 3252 */ 3253 intel_update_primary_planes(dev); 3254 return; 3255 } 3256 3257 /* 3258 * The display has been reset as well, 3259 * so need a full re-initialization. 3260 */ 3261 intel_runtime_pm_disable_interrupts(dev_priv); 3262 intel_runtime_pm_enable_interrupts(dev_priv); 3263 3264 intel_modeset_init_hw(dev); 3265 3266 spin_lock_irq(&dev_priv->irq_lock); 3267 if (dev_priv->display.hpd_irq_setup) 3268 dev_priv->display.hpd_irq_setup(dev); 3269 spin_unlock_irq(&dev_priv->irq_lock); 3270 3271 intel_display_resume(dev); 3272 3273 intel_hpd_init(dev_priv); 3274 3275 drm_modeset_unlock_all(dev); 3276 } 3277 3278 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3279 { 3280 struct drm_device *dev = crtc->dev; 3281 struct drm_i915_private *dev_priv = dev->dev_private; 3282 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3283 bool pending; 3284 3285 if (i915_reset_in_progress(&dev_priv->gpu_error) || 3286 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 3287 return false; 3288 3289 spin_lock_irq(&dev->event_lock); 3290 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3291 spin_unlock_irq(&dev->event_lock); 3292 3293 return pending; 3294 } 3295 3296 static void intel_update_pipe_config(struct intel_crtc *crtc, 3297 struct intel_crtc_state *old_crtc_state) 3298 { 3299 struct drm_device *dev = crtc->base.dev; 3300 struct drm_i915_private *dev_priv = dev->dev_private; 3301 struct intel_crtc_state *pipe_config = 3302 to_intel_crtc_state(crtc->base.state); 3303 3304 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ 3305 crtc->base.mode = crtc->base.state->mode; 3306 3307 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n", 3308 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h, 3309 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 3310 3311 if (HAS_DDI(dev)) 3312 intel_set_pipe_csc(&crtc->base); 3313 3314 /* 3315 * Update pipe size and adjust fitter if needed: the reason for this is 3316 * that in compute_mode_changes we check the native mode (not the pfit 3317 * mode) to see if we can flip rather than do a full mode set. In the 3318 * fastboot case, we'll flip, but if we don't update the pipesrc and 3319 * pfit state, we'll end up with a big fb scanned out into the wrong 3320 * sized surface. 3321 */ 3322 3323 I915_WRITE(PIPESRC(crtc->pipe), 3324 ((pipe_config->pipe_src_w - 1) << 16) | 3325 (pipe_config->pipe_src_h - 1)); 3326 3327 /* on skylake this is done by detaching scalers */ 3328 if (INTEL_INFO(dev)->gen >= 9) { 3329 skl_detach_scalers(crtc); 3330 3331 if (pipe_config->pch_pfit.enabled) 3332 skylake_pfit_enable(crtc); 3333 } else if (HAS_PCH_SPLIT(dev)) { 3334 if (pipe_config->pch_pfit.enabled) 3335 ironlake_pfit_enable(crtc); 3336 else if (old_crtc_state->pch_pfit.enabled) 3337 ironlake_pfit_disable(crtc, true); 3338 } 3339 } 3340 3341 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3342 { 3343 struct drm_device *dev = crtc->dev; 3344 struct drm_i915_private *dev_priv = dev->dev_private; 3345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3346 int pipe = intel_crtc->pipe; 3347 i915_reg_t reg; 3348 u32 temp; 3349 3350 /* enable normal train */ 3351 reg = FDI_TX_CTL(pipe); 3352 temp = I915_READ(reg); 3353 if (IS_IVYBRIDGE(dev)) { 3354 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3355 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3356 } else { 3357 temp &= ~FDI_LINK_TRAIN_NONE; 3358 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3359 } 3360 I915_WRITE(reg, temp); 3361 3362 reg = FDI_RX_CTL(pipe); 3363 temp = I915_READ(reg); 3364 if (HAS_PCH_CPT(dev)) { 3365 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3366 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3367 } else { 3368 temp &= ~FDI_LINK_TRAIN_NONE; 3369 temp |= FDI_LINK_TRAIN_NONE; 3370 } 3371 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3372 3373 /* wait one idle pattern time */ 3374 POSTING_READ(reg); 3375 udelay(1000); 3376 3377 /* IVB wants error correction enabled */ 3378 if (IS_IVYBRIDGE(dev)) 3379 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3380 FDI_FE_ERRC_ENABLE); 3381 } 3382 3383 /* The FDI link training functions for ILK/Ibexpeak. */ 3384 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3385 { 3386 struct drm_device *dev = crtc->dev; 3387 struct drm_i915_private *dev_priv = dev->dev_private; 3388 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3389 int pipe = intel_crtc->pipe; 3390 i915_reg_t reg; 3391 u32 temp, tries; 3392 3393 /* FDI needs bits from pipe first */ 3394 assert_pipe_enabled(dev_priv, pipe); 3395 3396 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3397 for train result */ 3398 reg = FDI_RX_IMR(pipe); 3399 temp = I915_READ(reg); 3400 temp &= ~FDI_RX_SYMBOL_LOCK; 3401 temp &= ~FDI_RX_BIT_LOCK; 3402 I915_WRITE(reg, temp); 3403 I915_READ(reg); 3404 udelay(150); 3405 3406 /* enable CPU FDI TX and PCH FDI RX */ 3407 reg = FDI_TX_CTL(pipe); 3408 temp = I915_READ(reg); 3409 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3410 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3411 temp &= ~FDI_LINK_TRAIN_NONE; 3412 temp |= FDI_LINK_TRAIN_PATTERN_1; 3413 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3414 3415 reg = FDI_RX_CTL(pipe); 3416 temp = I915_READ(reg); 3417 temp &= ~FDI_LINK_TRAIN_NONE; 3418 temp |= FDI_LINK_TRAIN_PATTERN_1; 3419 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3420 3421 POSTING_READ(reg); 3422 udelay(150); 3423 3424 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3425 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3426 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3427 FDI_RX_PHASE_SYNC_POINTER_EN); 3428 3429 reg = FDI_RX_IIR(pipe); 3430 for (tries = 0; tries < 5; tries++) { 3431 temp = I915_READ(reg); 3432 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3433 3434 if ((temp & FDI_RX_BIT_LOCK)) { 3435 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3436 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3437 break; 3438 } 3439 } 3440 if (tries == 5) 3441 DRM_ERROR("FDI train 1 fail!\n"); 3442 3443 /* Train 2 */ 3444 reg = FDI_TX_CTL(pipe); 3445 temp = I915_READ(reg); 3446 temp &= ~FDI_LINK_TRAIN_NONE; 3447 temp |= FDI_LINK_TRAIN_PATTERN_2; 3448 I915_WRITE(reg, temp); 3449 3450 reg = FDI_RX_CTL(pipe); 3451 temp = I915_READ(reg); 3452 temp &= ~FDI_LINK_TRAIN_NONE; 3453 temp |= FDI_LINK_TRAIN_PATTERN_2; 3454 I915_WRITE(reg, temp); 3455 3456 POSTING_READ(reg); 3457 udelay(150); 3458 3459 reg = FDI_RX_IIR(pipe); 3460 for (tries = 0; tries < 5; tries++) { 3461 temp = I915_READ(reg); 3462 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3463 3464 if (temp & FDI_RX_SYMBOL_LOCK) { 3465 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3466 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3467 break; 3468 } 3469 } 3470 if (tries == 5) 3471 DRM_ERROR("FDI train 2 fail!\n"); 3472 3473 DRM_DEBUG_KMS("FDI train done\n"); 3474 3475 } 3476 3477 static const int snb_b_fdi_train_param[] = { 3478 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3479 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3480 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3481 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3482 }; 3483 3484 /* The FDI link training functions for SNB/Cougarpoint. */ 3485 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3486 { 3487 struct drm_device *dev = crtc->dev; 3488 struct drm_i915_private *dev_priv = dev->dev_private; 3489 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3490 int pipe = intel_crtc->pipe; 3491 i915_reg_t reg; 3492 u32 temp, i, retry; 3493 3494 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3495 for train result */ 3496 reg = FDI_RX_IMR(pipe); 3497 temp = I915_READ(reg); 3498 temp &= ~FDI_RX_SYMBOL_LOCK; 3499 temp &= ~FDI_RX_BIT_LOCK; 3500 I915_WRITE(reg, temp); 3501 3502 POSTING_READ(reg); 3503 udelay(150); 3504 3505 /* enable CPU FDI TX and PCH FDI RX */ 3506 reg = FDI_TX_CTL(pipe); 3507 temp = I915_READ(reg); 3508 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3509 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3510 temp &= ~FDI_LINK_TRAIN_NONE; 3511 temp |= FDI_LINK_TRAIN_PATTERN_1; 3512 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3513 /* SNB-B */ 3514 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3515 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3516 3517 I915_WRITE(FDI_RX_MISC(pipe), 3518 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3519 3520 reg = FDI_RX_CTL(pipe); 3521 temp = I915_READ(reg); 3522 if (HAS_PCH_CPT(dev)) { 3523 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3524 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3525 } else { 3526 temp &= ~FDI_LINK_TRAIN_NONE; 3527 temp |= FDI_LINK_TRAIN_PATTERN_1; 3528 } 3529 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3530 3531 POSTING_READ(reg); 3532 udelay(150); 3533 3534 for (i = 0; i < 4; i++) { 3535 reg = FDI_TX_CTL(pipe); 3536 temp = I915_READ(reg); 3537 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3538 temp |= snb_b_fdi_train_param[i]; 3539 I915_WRITE(reg, temp); 3540 3541 POSTING_READ(reg); 3542 udelay(500); 3543 3544 for (retry = 0; retry < 5; retry++) { 3545 reg = FDI_RX_IIR(pipe); 3546 temp = I915_READ(reg); 3547 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3548 if (temp & FDI_RX_BIT_LOCK) { 3549 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3550 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3551 break; 3552 } 3553 udelay(50); 3554 } 3555 if (retry < 5) 3556 break; 3557 } 3558 if (i == 4) 3559 DRM_ERROR("FDI train 1 fail!\n"); 3560 3561 /* Train 2 */ 3562 reg = FDI_TX_CTL(pipe); 3563 temp = I915_READ(reg); 3564 temp &= ~FDI_LINK_TRAIN_NONE; 3565 temp |= FDI_LINK_TRAIN_PATTERN_2; 3566 if (IS_GEN6(dev)) { 3567 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3568 /* SNB-B */ 3569 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3570 } 3571 I915_WRITE(reg, temp); 3572 3573 reg = FDI_RX_CTL(pipe); 3574 temp = I915_READ(reg); 3575 if (HAS_PCH_CPT(dev)) { 3576 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3577 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3578 } else { 3579 temp &= ~FDI_LINK_TRAIN_NONE; 3580 temp |= FDI_LINK_TRAIN_PATTERN_2; 3581 } 3582 I915_WRITE(reg, temp); 3583 3584 POSTING_READ(reg); 3585 udelay(150); 3586 3587 for (i = 0; i < 4; i++) { 3588 reg = FDI_TX_CTL(pipe); 3589 temp = I915_READ(reg); 3590 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3591 temp |= snb_b_fdi_train_param[i]; 3592 I915_WRITE(reg, temp); 3593 3594 POSTING_READ(reg); 3595 udelay(500); 3596 3597 for (retry = 0; retry < 5; retry++) { 3598 reg = FDI_RX_IIR(pipe); 3599 temp = I915_READ(reg); 3600 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3601 if (temp & FDI_RX_SYMBOL_LOCK) { 3602 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3603 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3604 break; 3605 } 3606 udelay(50); 3607 } 3608 if (retry < 5) 3609 break; 3610 } 3611 if (i == 4) 3612 DRM_ERROR("FDI train 2 fail!\n"); 3613 3614 DRM_DEBUG_KMS("FDI train done.\n"); 3615 } 3616 3617 /* Manual link training for Ivy Bridge A0 parts */ 3618 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3619 { 3620 struct drm_device *dev = crtc->dev; 3621 struct drm_i915_private *dev_priv = dev->dev_private; 3622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3623 int pipe = intel_crtc->pipe; 3624 i915_reg_t reg; 3625 u32 temp, i, j; 3626 3627 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3628 for train result */ 3629 reg = FDI_RX_IMR(pipe); 3630 temp = I915_READ(reg); 3631 temp &= ~FDI_RX_SYMBOL_LOCK; 3632 temp &= ~FDI_RX_BIT_LOCK; 3633 I915_WRITE(reg, temp); 3634 3635 POSTING_READ(reg); 3636 udelay(150); 3637 3638 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3639 I915_READ(FDI_RX_IIR(pipe))); 3640 3641 /* Try each vswing and preemphasis setting twice before moving on */ 3642 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3643 /* disable first in case we need to retry */ 3644 reg = FDI_TX_CTL(pipe); 3645 temp = I915_READ(reg); 3646 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3647 temp &= ~FDI_TX_ENABLE; 3648 I915_WRITE(reg, temp); 3649 3650 reg = FDI_RX_CTL(pipe); 3651 temp = I915_READ(reg); 3652 temp &= ~FDI_LINK_TRAIN_AUTO; 3653 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3654 temp &= ~FDI_RX_ENABLE; 3655 I915_WRITE(reg, temp); 3656 3657 /* enable CPU FDI TX and PCH FDI RX */ 3658 reg = FDI_TX_CTL(pipe); 3659 temp = I915_READ(reg); 3660 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3661 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3662 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3663 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3664 temp |= snb_b_fdi_train_param[j/2]; 3665 temp |= FDI_COMPOSITE_SYNC; 3666 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3667 3668 I915_WRITE(FDI_RX_MISC(pipe), 3669 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3670 3671 reg = FDI_RX_CTL(pipe); 3672 temp = I915_READ(reg); 3673 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3674 temp |= FDI_COMPOSITE_SYNC; 3675 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3676 3677 POSTING_READ(reg); 3678 udelay(1); /* should be 0.5us */ 3679 3680 for (i = 0; i < 4; i++) { 3681 reg = FDI_RX_IIR(pipe); 3682 temp = I915_READ(reg); 3683 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3684 3685 if (temp & FDI_RX_BIT_LOCK || 3686 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3687 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3688 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3689 i); 3690 break; 3691 } 3692 udelay(1); /* should be 0.5us */ 3693 } 3694 if (i == 4) { 3695 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3696 continue; 3697 } 3698 3699 /* Train 2 */ 3700 reg = FDI_TX_CTL(pipe); 3701 temp = I915_READ(reg); 3702 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3703 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3704 I915_WRITE(reg, temp); 3705 3706 reg = FDI_RX_CTL(pipe); 3707 temp = I915_READ(reg); 3708 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3709 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3710 I915_WRITE(reg, temp); 3711 3712 POSTING_READ(reg); 3713 udelay(2); /* should be 1.5us */ 3714 3715 for (i = 0; i < 4; i++) { 3716 reg = FDI_RX_IIR(pipe); 3717 temp = I915_READ(reg); 3718 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3719 3720 if (temp & FDI_RX_SYMBOL_LOCK || 3721 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3722 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3723 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3724 i); 3725 goto train_done; 3726 } 3727 udelay(2); /* should be 1.5us */ 3728 } 3729 if (i == 4) 3730 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3731 } 3732 3733 train_done: 3734 DRM_DEBUG_KMS("FDI train done.\n"); 3735 } 3736 3737 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3738 { 3739 struct drm_device *dev = intel_crtc->base.dev; 3740 struct drm_i915_private *dev_priv = dev->dev_private; 3741 int pipe = intel_crtc->pipe; 3742 i915_reg_t reg; 3743 u32 temp; 3744 3745 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3746 reg = FDI_RX_CTL(pipe); 3747 temp = I915_READ(reg); 3748 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3749 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3750 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3751 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3752 3753 POSTING_READ(reg); 3754 udelay(200); 3755 3756 /* Switch from Rawclk to PCDclk */ 3757 temp = I915_READ(reg); 3758 I915_WRITE(reg, temp | FDI_PCDCLK); 3759 3760 POSTING_READ(reg); 3761 udelay(200); 3762 3763 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3764 reg = FDI_TX_CTL(pipe); 3765 temp = I915_READ(reg); 3766 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3767 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3768 3769 POSTING_READ(reg); 3770 udelay(100); 3771 } 3772 } 3773 3774 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3775 { 3776 struct drm_device *dev = intel_crtc->base.dev; 3777 struct drm_i915_private *dev_priv = dev->dev_private; 3778 int pipe = intel_crtc->pipe; 3779 i915_reg_t reg; 3780 u32 temp; 3781 3782 /* Switch from PCDclk to Rawclk */ 3783 reg = FDI_RX_CTL(pipe); 3784 temp = I915_READ(reg); 3785 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3786 3787 /* Disable CPU FDI TX PLL */ 3788 reg = FDI_TX_CTL(pipe); 3789 temp = I915_READ(reg); 3790 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3791 3792 POSTING_READ(reg); 3793 udelay(100); 3794 3795 reg = FDI_RX_CTL(pipe); 3796 temp = I915_READ(reg); 3797 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3798 3799 /* Wait for the clocks to turn off. */ 3800 POSTING_READ(reg); 3801 udelay(100); 3802 } 3803 3804 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3805 { 3806 struct drm_device *dev = crtc->dev; 3807 struct drm_i915_private *dev_priv = dev->dev_private; 3808 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3809 int pipe = intel_crtc->pipe; 3810 i915_reg_t reg; 3811 u32 temp; 3812 3813 /* disable CPU FDI tx and PCH FDI rx */ 3814 reg = FDI_TX_CTL(pipe); 3815 temp = I915_READ(reg); 3816 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3817 POSTING_READ(reg); 3818 3819 reg = FDI_RX_CTL(pipe); 3820 temp = I915_READ(reg); 3821 temp &= ~(0x7 << 16); 3822 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3823 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3824 3825 POSTING_READ(reg); 3826 udelay(100); 3827 3828 /* Ironlake workaround, disable clock pointer after downing FDI */ 3829 if (HAS_PCH_IBX(dev)) 3830 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3831 3832 /* still set train pattern 1 */ 3833 reg = FDI_TX_CTL(pipe); 3834 temp = I915_READ(reg); 3835 temp &= ~FDI_LINK_TRAIN_NONE; 3836 temp |= FDI_LINK_TRAIN_PATTERN_1; 3837 I915_WRITE(reg, temp); 3838 3839 reg = FDI_RX_CTL(pipe); 3840 temp = I915_READ(reg); 3841 if (HAS_PCH_CPT(dev)) { 3842 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3843 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3844 } else { 3845 temp &= ~FDI_LINK_TRAIN_NONE; 3846 temp |= FDI_LINK_TRAIN_PATTERN_1; 3847 } 3848 /* BPC in FDI rx is consistent with that in PIPECONF */ 3849 temp &= ~(0x07 << 16); 3850 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3851 I915_WRITE(reg, temp); 3852 3853 POSTING_READ(reg); 3854 udelay(100); 3855 } 3856 3857 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3858 { 3859 struct intel_crtc *crtc; 3860 3861 /* Note that we don't need to be called with mode_config.lock here 3862 * as our list of CRTC objects is static for the lifetime of the 3863 * device and so cannot disappear as we iterate. Similarly, we can 3864 * happily treat the predicates as racy, atomic checks as userspace 3865 * cannot claim and pin a new fb without at least acquring the 3866 * struct_mutex and so serialising with us. 3867 */ 3868 for_each_intel_crtc(dev, crtc) { 3869 if (atomic_read(&crtc->unpin_work_count) == 0) 3870 continue; 3871 3872 if (crtc->unpin_work) 3873 intel_wait_for_vblank(dev, crtc->pipe); 3874 3875 return true; 3876 } 3877 3878 return false; 3879 } 3880 3881 static void page_flip_completed(struct intel_crtc *intel_crtc) 3882 { 3883 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3884 struct intel_unpin_work *work = intel_crtc->unpin_work; 3885 3886 /* ensure that the unpin work is consistent wrt ->pending. */ 3887 smp_rmb(); 3888 intel_crtc->unpin_work = NULL; 3889 3890 if (work->event) 3891 drm_send_vblank_event(intel_crtc->base.dev, 3892 intel_crtc->pipe, 3893 work->event); 3894 3895 drm_crtc_vblank_put(&intel_crtc->base); 3896 3897 wake_up_all(&dev_priv->pending_flip_queue); 3898 queue_work(dev_priv->wq, &work->work); 3899 3900 trace_i915_flip_complete(intel_crtc->plane, 3901 work->pending_flip_obj); 3902 } 3903 3904 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3905 { 3906 struct drm_device *dev = crtc->dev; 3907 struct drm_i915_private *dev_priv = dev->dev_private; 3908 long ret; 3909 3910 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3911 3912 ret = wait_event_interruptible_timeout( 3913 dev_priv->pending_flip_queue, 3914 !intel_crtc_has_pending_flip(crtc), 3915 60*HZ); 3916 3917 if (ret < 0) 3918 return ret; 3919 3920 if (ret == 0) { 3921 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3922 3923 spin_lock_irq(&dev->event_lock); 3924 if (intel_crtc->unpin_work) { 3925 WARN_ONCE(1, "Removing stuck page flip\n"); 3926 page_flip_completed(intel_crtc); 3927 } 3928 spin_unlock_irq(&dev->event_lock); 3929 } 3930 3931 return 0; 3932 } 3933 3934 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv) 3935 { 3936 u32 temp; 3937 3938 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3939 3940 mutex_lock(&dev_priv->sb_lock); 3941 3942 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3943 temp |= SBI_SSCCTL_DISABLE; 3944 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3945 3946 mutex_unlock(&dev_priv->sb_lock); 3947 } 3948 3949 /* Program iCLKIP clock to the desired frequency */ 3950 static void lpt_program_iclkip(struct drm_crtc *crtc) 3951 { 3952 struct drm_device *dev = crtc->dev; 3953 struct drm_i915_private *dev_priv = dev->dev_private; 3954 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3955 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3956 u32 temp; 3957 3958 lpt_disable_iclkip(dev_priv); 3959 3960 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3961 if (clock == 20000) { 3962 auxdiv = 1; 3963 divsel = 0x41; 3964 phaseinc = 0x20; 3965 } else { 3966 /* The iCLK virtual clock root frequency is in MHz, 3967 * but the adjusted_mode->crtc_clock in in KHz. To get the 3968 * divisors, it is necessary to divide one by another, so we 3969 * convert the virtual clock precision to KHz here for higher 3970 * precision. 3971 */ 3972 u32 iclk_virtual_root_freq = 172800 * 1000; 3973 u32 iclk_pi_range = 64; 3974 u32 desired_divisor, msb_divisor_value, pi_value; 3975 3976 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq, clock); 3977 msb_divisor_value = desired_divisor / iclk_pi_range; 3978 pi_value = desired_divisor % iclk_pi_range; 3979 3980 auxdiv = 0; 3981 divsel = msb_divisor_value - 2; 3982 phaseinc = pi_value; 3983 } 3984 3985 /* This should not happen with any sane values */ 3986 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3987 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3988 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3989 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3990 3991 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3992 clock, 3993 auxdiv, 3994 divsel, 3995 phasedir, 3996 phaseinc); 3997 3998 mutex_lock(&dev_priv->sb_lock); 3999 4000 /* Program SSCDIVINTPHASE6 */ 4001 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 4002 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 4003 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 4004 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 4005 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 4006 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 4007 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 4008 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 4009 4010 /* Program SSCAUXDIV */ 4011 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 4012 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 4013 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 4014 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 4015 4016 /* Enable modulator and associated divider */ 4017 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4018 temp &= ~SBI_SSCCTL_DISABLE; 4019 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4020 4021 mutex_unlock(&dev_priv->sb_lock); 4022 4023 /* Wait for initialization time */ 4024 udelay(24); 4025 4026 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4027 } 4028 4029 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4030 enum i915_pipe pch_transcoder) 4031 { 4032 struct drm_device *dev = crtc->base.dev; 4033 struct drm_i915_private *dev_priv = dev->dev_private; 4034 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4035 4036 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4037 I915_READ(HTOTAL(cpu_transcoder))); 4038 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4039 I915_READ(HBLANK(cpu_transcoder))); 4040 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4041 I915_READ(HSYNC(cpu_transcoder))); 4042 4043 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4044 I915_READ(VTOTAL(cpu_transcoder))); 4045 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4046 I915_READ(VBLANK(cpu_transcoder))); 4047 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4048 I915_READ(VSYNC(cpu_transcoder))); 4049 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4050 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4051 } 4052 4053 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4054 { 4055 struct drm_i915_private *dev_priv = dev->dev_private; 4056 uint32_t temp; 4057 4058 temp = I915_READ(SOUTH_CHICKEN1); 4059 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4060 return; 4061 4062 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4063 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4064 4065 temp &= ~FDI_BC_BIFURCATION_SELECT; 4066 if (enable) 4067 temp |= FDI_BC_BIFURCATION_SELECT; 4068 4069 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4070 I915_WRITE(SOUTH_CHICKEN1, temp); 4071 POSTING_READ(SOUTH_CHICKEN1); 4072 } 4073 4074 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4075 { 4076 struct drm_device *dev = intel_crtc->base.dev; 4077 4078 switch (intel_crtc->pipe) { 4079 case PIPE_A: 4080 break; 4081 case PIPE_B: 4082 if (intel_crtc->config->fdi_lanes > 2) 4083 cpt_set_fdi_bc_bifurcation(dev, false); 4084 else 4085 cpt_set_fdi_bc_bifurcation(dev, true); 4086 4087 break; 4088 case PIPE_C: 4089 cpt_set_fdi_bc_bifurcation(dev, true); 4090 4091 break; 4092 default: 4093 BUG(); 4094 } 4095 } 4096 4097 /* Return which DP Port should be selected for Transcoder DP control */ 4098 static enum port 4099 intel_trans_dp_port_sel(struct drm_crtc *crtc) 4100 { 4101 struct drm_device *dev = crtc->dev; 4102 struct intel_encoder *encoder; 4103 4104 for_each_encoder_on_crtc(dev, crtc, encoder) { 4105 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 4106 encoder->type == INTEL_OUTPUT_EDP) 4107 return enc_to_dig_port(&encoder->base)->port; 4108 } 4109 4110 return -1; 4111 } 4112 4113 /* 4114 * Enable PCH resources required for PCH ports: 4115 * - PCH PLLs 4116 * - FDI training & RX/TX 4117 * - update transcoder timings 4118 * - DP transcoding bits 4119 * - transcoder 4120 */ 4121 static void ironlake_pch_enable(struct drm_crtc *crtc) 4122 { 4123 struct drm_device *dev = crtc->dev; 4124 struct drm_i915_private *dev_priv = dev->dev_private; 4125 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4126 int pipe = intel_crtc->pipe; 4127 u32 temp; 4128 4129 assert_pch_transcoder_disabled(dev_priv, pipe); 4130 4131 if (IS_IVYBRIDGE(dev)) 4132 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4133 4134 /* Write the TU size bits before fdi link training, so that error 4135 * detection works. */ 4136 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4137 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4138 4139 /* 4140 * Sometimes spurious CPU pipe underruns happen during FDI 4141 * training, at least with VGA+HDMI cloning. Suppress them. 4142 */ 4143 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4144 4145 /* For PCH output, training FDI link */ 4146 dev_priv->display.fdi_link_train(crtc); 4147 4148 /* We need to program the right clock selection before writing the pixel 4149 * mutliplier into the DPLL. */ 4150 if (HAS_PCH_CPT(dev)) { 4151 u32 sel; 4152 4153 temp = I915_READ(PCH_DPLL_SEL); 4154 temp |= TRANS_DPLL_ENABLE(pipe); 4155 sel = TRANS_DPLLB_SEL(pipe); 4156 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 4157 temp |= sel; 4158 else 4159 temp &= ~sel; 4160 I915_WRITE(PCH_DPLL_SEL, temp); 4161 } 4162 4163 /* XXX: pch pll's can be enabled any time before we enable the PCH 4164 * transcoder, and we actually should do this to not upset any PCH 4165 * transcoder that already use the clock when we share it. 4166 * 4167 * Note that enable_shared_dpll tries to do the right thing, but 4168 * get_shared_dpll unconditionally resets the pll - we need that to have 4169 * the right LVDS enable sequence. */ 4170 intel_enable_shared_dpll(intel_crtc); 4171 4172 /* set transcoder timing, panel must allow it */ 4173 assert_panel_unlocked(dev_priv, pipe); 4174 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4175 4176 intel_fdi_normal_train(crtc); 4177 4178 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4179 4180 /* For PCH DP, enable TRANS_DP_CTL */ 4181 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4182 const struct drm_display_mode *adjusted_mode = 4183 &intel_crtc->config->base.adjusted_mode; 4184 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4185 i915_reg_t reg = TRANS_DP_CTL(pipe); 4186 temp = I915_READ(reg); 4187 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4188 TRANS_DP_SYNC_MASK | 4189 TRANS_DP_BPC_MASK); 4190 temp |= TRANS_DP_OUTPUT_ENABLE; 4191 temp |= bpc << 9; /* same format but at 11:9 */ 4192 4193 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 4194 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4195 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 4196 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4197 4198 switch (intel_trans_dp_port_sel(crtc)) { 4199 case PORT_B: 4200 temp |= TRANS_DP_PORT_SEL_B; 4201 break; 4202 case PORT_C: 4203 temp |= TRANS_DP_PORT_SEL_C; 4204 break; 4205 case PORT_D: 4206 temp |= TRANS_DP_PORT_SEL_D; 4207 break; 4208 default: 4209 BUG(); 4210 } 4211 4212 I915_WRITE(reg, temp); 4213 } 4214 4215 ironlake_enable_pch_transcoder(dev_priv, pipe); 4216 } 4217 4218 static void lpt_pch_enable(struct drm_crtc *crtc) 4219 { 4220 struct drm_device *dev = crtc->dev; 4221 struct drm_i915_private *dev_priv = dev->dev_private; 4222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4223 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4224 4225 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4226 4227 lpt_program_iclkip(crtc); 4228 4229 /* Set transcoder timing. */ 4230 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4231 4232 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4233 } 4234 4235 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 4236 struct intel_crtc_state *crtc_state) 4237 { 4238 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 4239 struct intel_shared_dpll *pll; 4240 struct intel_shared_dpll_config *shared_dpll; 4241 enum intel_dpll_id i; 4242 int max = dev_priv->num_shared_dpll; 4243 4244 shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state); 4245 4246 if (HAS_PCH_IBX(dev_priv->dev)) { 4247 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 4248 i = (enum intel_dpll_id) crtc->pipe; 4249 pll = &dev_priv->shared_dplls[i]; 4250 4251 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4252 crtc->base.base.id, pll->name); 4253 4254 WARN_ON(shared_dpll[i].crtc_mask); 4255 4256 goto found; 4257 } 4258 4259 if (IS_BROXTON(dev_priv->dev)) { 4260 /* PLL is attached to port in bxt */ 4261 struct intel_encoder *encoder; 4262 struct intel_digital_port *intel_dig_port; 4263 4264 encoder = intel_ddi_get_crtc_new_encoder(crtc_state); 4265 if (WARN_ON(!encoder)) 4266 return NULL; 4267 4268 intel_dig_port = enc_to_dig_port(&encoder->base); 4269 /* 1:1 mapping between ports and PLLs */ 4270 i = (enum intel_dpll_id)intel_dig_port->port; 4271 pll = &dev_priv->shared_dplls[i]; 4272 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4273 crtc->base.base.id, pll->name); 4274 WARN_ON(shared_dpll[i].crtc_mask); 4275 4276 goto found; 4277 } else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv)) 4278 /* Do not consider SPLL */ 4279 max = 2; 4280 4281 for (i = 0; i < max; i++) { 4282 pll = &dev_priv->shared_dplls[i]; 4283 4284 /* Only want to check enabled timings first */ 4285 if (shared_dpll[i].crtc_mask == 0) 4286 continue; 4287 4288 if (memcmp(&crtc_state->dpll_hw_state, 4289 &shared_dpll[i].hw_state, 4290 sizeof(crtc_state->dpll_hw_state)) == 0) { 4291 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 4292 crtc->base.base.id, pll->name, 4293 shared_dpll[i].crtc_mask, 4294 pll->active); 4295 goto found; 4296 } 4297 } 4298 4299 /* Ok no matching timings, maybe there's a free one? */ 4300 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4301 pll = &dev_priv->shared_dplls[i]; 4302 if (shared_dpll[i].crtc_mask == 0) { 4303 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 4304 crtc->base.base.id, pll->name); 4305 goto found; 4306 } 4307 } 4308 4309 return NULL; 4310 4311 found: 4312 if (shared_dpll[i].crtc_mask == 0) 4313 shared_dpll[i].hw_state = 4314 crtc_state->dpll_hw_state; 4315 4316 crtc_state->shared_dpll = i; 4317 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 4318 pipe_name(crtc->pipe)); 4319 4320 shared_dpll[i].crtc_mask |= 1 << crtc->pipe; 4321 4322 return pll; 4323 } 4324 4325 static void intel_shared_dpll_commit(struct drm_atomic_state *state) 4326 { 4327 struct drm_i915_private *dev_priv = to_i915(state->dev); 4328 struct intel_shared_dpll_config *shared_dpll; 4329 struct intel_shared_dpll *pll; 4330 enum intel_dpll_id i; 4331 4332 if (!to_intel_atomic_state(state)->dpll_set) 4333 return; 4334 4335 shared_dpll = to_intel_atomic_state(state)->shared_dpll; 4336 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4337 pll = &dev_priv->shared_dplls[i]; 4338 pll->config = shared_dpll[i]; 4339 } 4340 } 4341 4342 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4343 { 4344 struct drm_i915_private *dev_priv = dev->dev_private; 4345 i915_reg_t dslreg = PIPEDSL(pipe); 4346 u32 temp; 4347 4348 temp = I915_READ(dslreg); 4349 udelay(500); 4350 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4351 if (wait_for(I915_READ(dslreg) != temp, 5)) 4352 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4353 } 4354 } 4355 4356 static int 4357 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, 4358 unsigned scaler_user, int *scaler_id, unsigned int rotation, 4359 int src_w, int src_h, int dst_w, int dst_h) 4360 { 4361 struct intel_crtc_scaler_state *scaler_state = 4362 &crtc_state->scaler_state; 4363 struct intel_crtc *intel_crtc = 4364 to_intel_crtc(crtc_state->base.crtc); 4365 int need_scaling; 4366 4367 need_scaling = intel_rotation_90_or_270(rotation) ? 4368 (src_h != dst_w || src_w != dst_h): 4369 (src_w != dst_w || src_h != dst_h); 4370 4371 /* 4372 * if plane is being disabled or scaler is no more required or force detach 4373 * - free scaler binded to this plane/crtc 4374 * - in order to do this, update crtc->scaler_usage 4375 * 4376 * Here scaler state in crtc_state is set free so that 4377 * scaler can be assigned to other user. Actual register 4378 * update to free the scaler is done in plane/panel-fit programming. 4379 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4380 */ 4381 if (force_detach || !need_scaling) { 4382 if (*scaler_id >= 0) { 4383 scaler_state->scaler_users &= ~(1 << scaler_user); 4384 scaler_state->scalers[*scaler_id].in_use = 0; 4385 4386 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4387 "Staged freeing scaler id %d scaler_users = 0x%x\n", 4388 intel_crtc->pipe, scaler_user, *scaler_id, 4389 scaler_state->scaler_users); 4390 *scaler_id = -1; 4391 } 4392 return 0; 4393 } 4394 4395 /* range checks */ 4396 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4397 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4398 4399 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4400 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4401 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u " 4402 "size is out of scaler range\n", 4403 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h); 4404 return -EINVAL; 4405 } 4406 4407 /* mark this plane as a scaler user in crtc_state */ 4408 scaler_state->scaler_users |= (1 << scaler_user); 4409 DRM_DEBUG_KMS("scaler_user index %u.%u: " 4410 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n", 4411 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h, 4412 scaler_state->scaler_users); 4413 4414 return 0; 4415 } 4416 4417 /** 4418 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc. 4419 * 4420 * @state: crtc's scaler state 4421 * 4422 * Return 4423 * 0 - scaler_usage updated successfully 4424 * error - requested scaling cannot be supported or other error condition 4425 */ 4426 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4427 { 4428 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4429 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4430 4431 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4432 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4433 4434 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4435 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), 4436 state->pipe_src_w, state->pipe_src_h, 4437 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4438 } 4439 4440 /** 4441 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4442 * 4443 * @state: crtc's scaler state 4444 * @plane_state: atomic plane state to update 4445 * 4446 * Return 4447 * 0 - scaler_usage updated successfully 4448 * error - requested scaling cannot be supported or other error condition 4449 */ 4450 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, 4451 struct intel_plane_state *plane_state) 4452 { 4453 4454 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 4455 struct intel_plane *intel_plane = 4456 to_intel_plane(plane_state->base.plane); 4457 struct drm_framebuffer *fb = plane_state->base.fb; 4458 int ret; 4459 4460 bool force_detach = !fb || !plane_state->visible; 4461 4462 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4463 intel_plane->base.base.id, intel_crtc->pipe, 4464 drm_plane_index(&intel_plane->base)); 4465 4466 ret = skl_update_scaler(crtc_state, force_detach, 4467 drm_plane_index(&intel_plane->base), 4468 &plane_state->scaler_id, 4469 plane_state->base.rotation, 4470 drm_rect_width(&plane_state->src) >> 16, 4471 drm_rect_height(&plane_state->src) >> 16, 4472 drm_rect_width(&plane_state->dst), 4473 drm_rect_height(&plane_state->dst)); 4474 4475 if (ret || plane_state->scaler_id < 0) 4476 return ret; 4477 4478 /* check colorkey */ 4479 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4480 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4481 intel_plane->base.base.id); 4482 return -EINVAL; 4483 } 4484 4485 /* Check src format */ 4486 switch (fb->pixel_format) { 4487 case DRM_FORMAT_RGB565: 4488 case DRM_FORMAT_XBGR8888: 4489 case DRM_FORMAT_XRGB8888: 4490 case DRM_FORMAT_ABGR8888: 4491 case DRM_FORMAT_ARGB8888: 4492 case DRM_FORMAT_XRGB2101010: 4493 case DRM_FORMAT_XBGR2101010: 4494 case DRM_FORMAT_YUYV: 4495 case DRM_FORMAT_YVYU: 4496 case DRM_FORMAT_UYVY: 4497 case DRM_FORMAT_VYUY: 4498 break; 4499 default: 4500 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4501 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4502 return -EINVAL; 4503 } 4504 4505 return 0; 4506 } 4507 4508 static void skylake_scaler_disable(struct intel_crtc *crtc) 4509 { 4510 int i; 4511 4512 for (i = 0; i < crtc->num_scalers; i++) 4513 skl_detach_scaler(crtc, i); 4514 } 4515 4516 static void skylake_pfit_enable(struct intel_crtc *crtc) 4517 { 4518 struct drm_device *dev = crtc->base.dev; 4519 struct drm_i915_private *dev_priv = dev->dev_private; 4520 int pipe = crtc->pipe; 4521 struct intel_crtc_scaler_state *scaler_state = 4522 &crtc->config->scaler_state; 4523 4524 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4525 4526 if (crtc->config->pch_pfit.enabled) { 4527 int id; 4528 4529 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4530 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4531 return; 4532 } 4533 4534 id = scaler_state->scaler_id; 4535 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4536 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4537 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4538 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4539 4540 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4541 } 4542 } 4543 4544 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4545 { 4546 struct drm_device *dev = crtc->base.dev; 4547 struct drm_i915_private *dev_priv = dev->dev_private; 4548 int pipe = crtc->pipe; 4549 4550 if (crtc->config->pch_pfit.enabled) { 4551 /* Force use of hard-coded filter coefficients 4552 * as some pre-programmed values are broken, 4553 * e.g. x201. 4554 */ 4555 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4556 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4557 PF_PIPE_SEL_IVB(pipe)); 4558 else 4559 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4560 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4561 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4562 } 4563 } 4564 4565 void hsw_enable_ips(struct intel_crtc *crtc) 4566 { 4567 struct drm_device *dev = crtc->base.dev; 4568 struct drm_i915_private *dev_priv = dev->dev_private; 4569 4570 if (!crtc->config->ips_enabled) 4571 return; 4572 4573 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4574 intel_wait_for_vblank(dev, crtc->pipe); 4575 4576 assert_plane_enabled(dev_priv, crtc->plane); 4577 if (IS_BROADWELL(dev)) { 4578 mutex_lock(&dev_priv->rps.hw_lock); 4579 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4580 mutex_unlock(&dev_priv->rps.hw_lock); 4581 /* Quoting Art Runyan: "its not safe to expect any particular 4582 * value in IPS_CTL bit 31 after enabling IPS through the 4583 * mailbox." Moreover, the mailbox may return a bogus state, 4584 * so we need to just enable it and continue on. 4585 */ 4586 } else { 4587 I915_WRITE(IPS_CTL, IPS_ENABLE); 4588 /* The bit only becomes 1 in the next vblank, so this wait here 4589 * is essentially intel_wait_for_vblank. If we don't have this 4590 * and don't wait for vblanks until the end of crtc_enable, then 4591 * the HW state readout code will complain that the expected 4592 * IPS_CTL value is not the one we read. */ 4593 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4594 DRM_ERROR("Timed out waiting for IPS enable\n"); 4595 } 4596 } 4597 4598 void hsw_disable_ips(struct intel_crtc *crtc) 4599 { 4600 struct drm_device *dev = crtc->base.dev; 4601 struct drm_i915_private *dev_priv = dev->dev_private; 4602 4603 if (!crtc->config->ips_enabled) 4604 return; 4605 4606 assert_plane_enabled(dev_priv, crtc->plane); 4607 if (IS_BROADWELL(dev)) { 4608 mutex_lock(&dev_priv->rps.hw_lock); 4609 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4610 mutex_unlock(&dev_priv->rps.hw_lock); 4611 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4612 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4613 DRM_ERROR("Timed out waiting for IPS disable\n"); 4614 } else { 4615 I915_WRITE(IPS_CTL, 0); 4616 POSTING_READ(IPS_CTL); 4617 } 4618 4619 /* We need to wait for a vblank before we can disable the plane. */ 4620 intel_wait_for_vblank(dev, crtc->pipe); 4621 } 4622 4623 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4624 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4625 { 4626 struct drm_device *dev = crtc->dev; 4627 struct drm_i915_private *dev_priv = dev->dev_private; 4628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4629 enum i915_pipe pipe = intel_crtc->pipe; 4630 int i; 4631 bool reenable_ips = false; 4632 4633 /* The clocks have to be on to load the palette. */ 4634 if (!crtc->state->active) 4635 return; 4636 4637 if (HAS_GMCH_DISPLAY(dev_priv->dev)) { 4638 if (intel_crtc->config->has_dsi_encoder) 4639 assert_dsi_pll_enabled(dev_priv); 4640 else 4641 assert_pll_enabled(dev_priv, pipe); 4642 } 4643 4644 /* Workaround : Do not read or write the pipe palette/gamma data while 4645 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4646 */ 4647 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4648 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4649 GAMMA_MODE_MODE_SPLIT)) { 4650 hsw_disable_ips(intel_crtc); 4651 reenable_ips = true; 4652 } 4653 4654 for (i = 0; i < 256; i++) { 4655 i915_reg_t palreg; 4656 4657 if (HAS_GMCH_DISPLAY(dev)) 4658 palreg = PALETTE(pipe, i); 4659 else 4660 palreg = LGC_PALETTE(pipe, i); 4661 4662 I915_WRITE(palreg, 4663 (intel_crtc->lut_r[i] << 16) | 4664 (intel_crtc->lut_g[i] << 8) | 4665 intel_crtc->lut_b[i]); 4666 } 4667 4668 if (reenable_ips) 4669 hsw_enable_ips(intel_crtc); 4670 } 4671 4672 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4673 { 4674 if (intel_crtc->overlay) { 4675 struct drm_device *dev = intel_crtc->base.dev; 4676 struct drm_i915_private *dev_priv = dev->dev_private; 4677 4678 mutex_lock(&dev->struct_mutex); 4679 dev_priv->mm.interruptible = false; 4680 (void) intel_overlay_switch_off(intel_crtc->overlay); 4681 dev_priv->mm.interruptible = true; 4682 mutex_unlock(&dev->struct_mutex); 4683 } 4684 4685 /* Let userspace switch the overlay on again. In most cases userspace 4686 * has to recompute where to put it anyway. 4687 */ 4688 } 4689 4690 /** 4691 * intel_post_enable_primary - Perform operations after enabling primary plane 4692 * @crtc: the CRTC whose primary plane was just enabled 4693 * 4694 * Performs potentially sleeping operations that must be done after the primary 4695 * plane is enabled, such as updating FBC and IPS. Note that this may be 4696 * called due to an explicit primary plane update, or due to an implicit 4697 * re-enable that is caused when a sprite plane is updated to no longer 4698 * completely hide the primary plane. 4699 */ 4700 static void 4701 intel_post_enable_primary(struct drm_crtc *crtc) 4702 { 4703 struct drm_device *dev = crtc->dev; 4704 struct drm_i915_private *dev_priv = dev->dev_private; 4705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4706 int pipe = intel_crtc->pipe; 4707 4708 /* 4709 * FIXME IPS should be fine as long as one plane is 4710 * enabled, but in practice it seems to have problems 4711 * when going from primary only to sprite only and vice 4712 * versa. 4713 */ 4714 hsw_enable_ips(intel_crtc); 4715 4716 /* 4717 * Gen2 reports pipe underruns whenever all planes are disabled. 4718 * So don't enable underrun reporting before at least some planes 4719 * are enabled. 4720 * FIXME: Need to fix the logic to work when we turn off all planes 4721 * but leave the pipe running. 4722 */ 4723 if (IS_GEN2(dev)) 4724 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4725 4726 /* Underruns don't always raise interrupts, so check manually. */ 4727 intel_check_cpu_fifo_underruns(dev_priv); 4728 intel_check_pch_fifo_underruns(dev_priv); 4729 } 4730 4731 /** 4732 * intel_pre_disable_primary - Perform operations before disabling primary plane 4733 * @crtc: the CRTC whose primary plane is to be disabled 4734 * 4735 * Performs potentially sleeping operations that must be done before the 4736 * primary plane is disabled, such as updating FBC and IPS. Note that this may 4737 * be called due to an explicit primary plane update, or due to an implicit 4738 * disable that is caused when a sprite plane completely hides the primary 4739 * plane. 4740 */ 4741 static void 4742 intel_pre_disable_primary(struct drm_crtc *crtc) 4743 { 4744 struct drm_device *dev = crtc->dev; 4745 struct drm_i915_private *dev_priv = dev->dev_private; 4746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4747 int pipe = intel_crtc->pipe; 4748 4749 /* 4750 * Gen2 reports pipe underruns whenever all planes are disabled. 4751 * So diasble underrun reporting before all the planes get disabled. 4752 * FIXME: Need to fix the logic to work when we turn off all planes 4753 * but leave the pipe running. 4754 */ 4755 if (IS_GEN2(dev)) 4756 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4757 4758 /* 4759 * Vblank time updates from the shadow to live plane control register 4760 * are blocked if the memory self-refresh mode is active at that 4761 * moment. So to make sure the plane gets truly disabled, disable 4762 * first the self-refresh mode. The self-refresh enable bit in turn 4763 * will be checked/applied by the HW only at the next frame start 4764 * event which is after the vblank start event, so we need to have a 4765 * wait-for-vblank between disabling the plane and the pipe. 4766 */ 4767 if (HAS_GMCH_DISPLAY(dev)) { 4768 intel_set_memory_cxsr(dev_priv, false); 4769 dev_priv->wm.vlv.cxsr = false; 4770 intel_wait_for_vblank(dev, pipe); 4771 } 4772 4773 /* 4774 * FIXME IPS should be fine as long as one plane is 4775 * enabled, but in practice it seems to have problems 4776 * when going from primary only to sprite only and vice 4777 * versa. 4778 */ 4779 hsw_disable_ips(intel_crtc); 4780 } 4781 4782 static void intel_post_plane_update(struct intel_crtc *crtc) 4783 { 4784 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4785 struct intel_crtc_state *pipe_config = 4786 to_intel_crtc_state(crtc->base.state); 4787 struct drm_device *dev = crtc->base.dev; 4788 4789 intel_frontbuffer_flip(dev, atomic->fb_bits); 4790 4791 crtc->wm.cxsr_allowed = true; 4792 4793 if (pipe_config->wm_changed && pipe_config->base.active) 4794 intel_update_watermarks(&crtc->base); 4795 4796 if (atomic->update_fbc) 4797 intel_fbc_post_update(crtc); 4798 4799 if (atomic->post_enable_primary) 4800 intel_post_enable_primary(&crtc->base); 4801 4802 memset(atomic, 0, sizeof(*atomic)); 4803 } 4804 4805 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state) 4806 { 4807 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4808 struct drm_device *dev = crtc->base.dev; 4809 struct drm_i915_private *dev_priv = dev->dev_private; 4810 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4811 struct intel_crtc_state *pipe_config = 4812 to_intel_crtc_state(crtc->base.state); 4813 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4814 struct drm_plane *primary = crtc->base.primary; 4815 struct drm_plane_state *old_pri_state = 4816 drm_atomic_get_existing_plane_state(old_state, primary); 4817 bool modeset = needs_modeset(&pipe_config->base); 4818 4819 if (atomic->update_fbc) 4820 intel_fbc_pre_update(crtc); 4821 4822 if (old_pri_state) { 4823 struct intel_plane_state *primary_state = 4824 to_intel_plane_state(primary->state); 4825 struct intel_plane_state *old_primary_state = 4826 to_intel_plane_state(old_pri_state); 4827 4828 if (old_primary_state->visible && 4829 (modeset || !primary_state->visible)) 4830 intel_pre_disable_primary(&crtc->base); 4831 } 4832 4833 if (pipe_config->disable_cxsr) { 4834 crtc->wm.cxsr_allowed = false; 4835 4836 if (old_crtc_state->base.active) 4837 intel_set_memory_cxsr(dev_priv, false); 4838 } 4839 4840 if (!needs_modeset(&pipe_config->base) && pipe_config->wm_changed) 4841 intel_update_watermarks(&crtc->base); 4842 } 4843 4844 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask) 4845 { 4846 struct drm_device *dev = crtc->dev; 4847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4848 struct drm_plane *p; 4849 int pipe = intel_crtc->pipe; 4850 4851 intel_crtc_dpms_overlay_disable(intel_crtc); 4852 4853 drm_for_each_plane_mask(p, dev, plane_mask) 4854 to_intel_plane(p)->disable_plane(p, crtc); 4855 4856 /* 4857 * FIXME: Once we grow proper nuclear flip support out of this we need 4858 * to compute the mask of flip planes precisely. For the time being 4859 * consider this a flip to a NULL plane. 4860 */ 4861 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4862 } 4863 4864 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4865 { 4866 struct drm_device *dev = crtc->dev; 4867 struct drm_i915_private *dev_priv = dev->dev_private; 4868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4869 struct intel_encoder *encoder; 4870 int pipe = intel_crtc->pipe; 4871 4872 if (WARN_ON(intel_crtc->active)) 4873 return; 4874 4875 if (intel_crtc->config->has_pch_encoder) 4876 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4877 4878 if (intel_crtc->config->has_pch_encoder) 4879 intel_prepare_shared_dpll(intel_crtc); 4880 4881 if (intel_crtc->config->has_dp_encoder) 4882 intel_dp_set_m_n(intel_crtc, M1_N1); 4883 4884 intel_set_pipe_timings(intel_crtc); 4885 4886 if (intel_crtc->config->has_pch_encoder) { 4887 intel_cpu_transcoder_set_m_n(intel_crtc, 4888 &intel_crtc->config->fdi_m_n, NULL); 4889 } 4890 4891 ironlake_set_pipeconf(crtc); 4892 4893 intel_crtc->active = true; 4894 4895 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4896 4897 for_each_encoder_on_crtc(dev, crtc, encoder) 4898 if (encoder->pre_enable) 4899 encoder->pre_enable(encoder); 4900 4901 if (intel_crtc->config->has_pch_encoder) { 4902 /* Note: FDI PLL enabling _must_ be done before we enable the 4903 * cpu pipes, hence this is separate from all the other fdi/pch 4904 * enabling. */ 4905 ironlake_fdi_pll_enable(intel_crtc); 4906 } else { 4907 assert_fdi_tx_disabled(dev_priv, pipe); 4908 assert_fdi_rx_disabled(dev_priv, pipe); 4909 } 4910 4911 ironlake_pfit_enable(intel_crtc); 4912 4913 /* 4914 * On ILK+ LUT must be loaded before the pipe is running but with 4915 * clocks enabled 4916 */ 4917 intel_crtc_load_lut(crtc); 4918 4919 intel_update_watermarks(crtc); 4920 intel_enable_pipe(intel_crtc); 4921 4922 if (intel_crtc->config->has_pch_encoder) 4923 ironlake_pch_enable(crtc); 4924 4925 assert_vblank_disabled(crtc); 4926 drm_crtc_vblank_on(crtc); 4927 4928 for_each_encoder_on_crtc(dev, crtc, encoder) 4929 encoder->enable(encoder); 4930 4931 if (HAS_PCH_CPT(dev)) 4932 cpt_verify_modeset(dev, intel_crtc->pipe); 4933 4934 /* Must wait for vblank to avoid spurious PCH FIFO underruns */ 4935 if (intel_crtc->config->has_pch_encoder) 4936 intel_wait_for_vblank(dev, pipe); 4937 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4938 } 4939 4940 /* IPS only exists on ULT machines and is tied to pipe A. */ 4941 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4942 { 4943 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4944 } 4945 4946 static void haswell_crtc_enable(struct drm_crtc *crtc) 4947 { 4948 struct drm_device *dev = crtc->dev; 4949 struct drm_i915_private *dev_priv = dev->dev_private; 4950 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4951 struct intel_encoder *encoder; 4952 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4953 struct intel_crtc_state *pipe_config = 4954 to_intel_crtc_state(crtc->state); 4955 4956 if (WARN_ON(intel_crtc->active)) 4957 return; 4958 4959 if (intel_crtc->config->has_pch_encoder) 4960 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4961 false); 4962 4963 if (intel_crtc_to_shared_dpll(intel_crtc)) 4964 intel_enable_shared_dpll(intel_crtc); 4965 4966 if (intel_crtc->config->has_dp_encoder) 4967 intel_dp_set_m_n(intel_crtc, M1_N1); 4968 4969 intel_set_pipe_timings(intel_crtc); 4970 4971 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 4972 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 4973 intel_crtc->config->pixel_multiplier - 1); 4974 } 4975 4976 if (intel_crtc->config->has_pch_encoder) { 4977 intel_cpu_transcoder_set_m_n(intel_crtc, 4978 &intel_crtc->config->fdi_m_n, NULL); 4979 } 4980 4981 haswell_set_pipeconf(crtc); 4982 4983 intel_set_pipe_csc(crtc); 4984 4985 intel_crtc->active = true; 4986 4987 if (intel_crtc->config->has_pch_encoder) 4988 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4989 else 4990 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4991 4992 for_each_encoder_on_crtc(dev, crtc, encoder) { 4993 if (encoder->pre_enable) 4994 encoder->pre_enable(encoder); 4995 } 4996 4997 if (intel_crtc->config->has_pch_encoder) 4998 dev_priv->display.fdi_link_train(crtc); 4999 5000 if (!intel_crtc->config->has_dsi_encoder) 5001 intel_ddi_enable_pipe_clock(intel_crtc); 5002 5003 if (INTEL_INFO(dev)->gen >= 9) 5004 skylake_pfit_enable(intel_crtc); 5005 else 5006 ironlake_pfit_enable(intel_crtc); 5007 5008 /* 5009 * On ILK+ LUT must be loaded before the pipe is running but with 5010 * clocks enabled 5011 */ 5012 intel_crtc_load_lut(crtc); 5013 5014 intel_ddi_set_pipe_settings(crtc); 5015 if (!intel_crtc->config->has_dsi_encoder) 5016 intel_ddi_enable_transcoder_func(crtc); 5017 5018 intel_update_watermarks(crtc); 5019 intel_enable_pipe(intel_crtc); 5020 5021 if (intel_crtc->config->has_pch_encoder) 5022 lpt_pch_enable(crtc); 5023 5024 if (intel_crtc->config->dp_encoder_is_mst) 5025 intel_ddi_set_vc_payload_alloc(crtc, true); 5026 5027 assert_vblank_disabled(crtc); 5028 drm_crtc_vblank_on(crtc); 5029 5030 for_each_encoder_on_crtc(dev, crtc, encoder) { 5031 encoder->enable(encoder); 5032 intel_opregion_notify_encoder(encoder, true); 5033 } 5034 5035 if (intel_crtc->config->has_pch_encoder) { 5036 intel_wait_for_vblank(dev, pipe); 5037 intel_wait_for_vblank(dev, pipe); 5038 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5039 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5040 true); 5041 } 5042 5043 /* If we change the relative order between pipe/planes enabling, we need 5044 * to change the workaround. */ 5045 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe; 5046 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) { 5047 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5048 intel_wait_for_vblank(dev, hsw_workaround_pipe); 5049 } 5050 } 5051 5052 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 5053 { 5054 struct drm_device *dev = crtc->base.dev; 5055 struct drm_i915_private *dev_priv = dev->dev_private; 5056 int pipe = crtc->pipe; 5057 5058 /* To avoid upsetting the power well on haswell only disable the pfit if 5059 * it's in use. The hw state code will make sure we get this right. */ 5060 if (force || crtc->config->pch_pfit.enabled) { 5061 I915_WRITE(PF_CTL(pipe), 0); 5062 I915_WRITE(PF_WIN_POS(pipe), 0); 5063 I915_WRITE(PF_WIN_SZ(pipe), 0); 5064 } 5065 } 5066 5067 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5068 { 5069 struct drm_device *dev = crtc->dev; 5070 struct drm_i915_private *dev_priv = dev->dev_private; 5071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5072 struct intel_encoder *encoder; 5073 int pipe = intel_crtc->pipe; 5074 5075 if (intel_crtc->config->has_pch_encoder) 5076 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5077 5078 for_each_encoder_on_crtc(dev, crtc, encoder) 5079 encoder->disable(encoder); 5080 5081 drm_crtc_vblank_off(crtc); 5082 assert_vblank_disabled(crtc); 5083 5084 /* 5085 * Sometimes spurious CPU pipe underruns happen when the 5086 * pipe is already disabled, but FDI RX/TX is still enabled. 5087 * Happens at least with VGA+HDMI cloning. Suppress them. 5088 */ 5089 if (intel_crtc->config->has_pch_encoder) 5090 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5091 5092 intel_disable_pipe(intel_crtc); 5093 5094 ironlake_pfit_disable(intel_crtc, false); 5095 5096 if (intel_crtc->config->has_pch_encoder) { 5097 ironlake_fdi_disable(crtc); 5098 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5099 } 5100 5101 for_each_encoder_on_crtc(dev, crtc, encoder) 5102 if (encoder->post_disable) 5103 encoder->post_disable(encoder); 5104 5105 if (intel_crtc->config->has_pch_encoder) { 5106 ironlake_disable_pch_transcoder(dev_priv, pipe); 5107 5108 if (HAS_PCH_CPT(dev)) { 5109 i915_reg_t reg; 5110 u32 temp; 5111 5112 /* disable TRANS_DP_CTL */ 5113 reg = TRANS_DP_CTL(pipe); 5114 temp = I915_READ(reg); 5115 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5116 TRANS_DP_PORT_SEL_MASK); 5117 temp |= TRANS_DP_PORT_SEL_NONE; 5118 I915_WRITE(reg, temp); 5119 5120 /* disable DPLL_SEL */ 5121 temp = I915_READ(PCH_DPLL_SEL); 5122 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5123 I915_WRITE(PCH_DPLL_SEL, temp); 5124 } 5125 5126 ironlake_fdi_pll_disable(intel_crtc); 5127 } 5128 5129 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 5130 } 5131 5132 static void haswell_crtc_disable(struct drm_crtc *crtc) 5133 { 5134 struct drm_device *dev = crtc->dev; 5135 struct drm_i915_private *dev_priv = dev->dev_private; 5136 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5137 struct intel_encoder *encoder; 5138 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5139 5140 if (intel_crtc->config->has_pch_encoder) 5141 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5142 false); 5143 5144 for_each_encoder_on_crtc(dev, crtc, encoder) { 5145 intel_opregion_notify_encoder(encoder, false); 5146 encoder->disable(encoder); 5147 } 5148 5149 drm_crtc_vblank_off(crtc); 5150 assert_vblank_disabled(crtc); 5151 5152 intel_disable_pipe(intel_crtc); 5153 5154 if (intel_crtc->config->dp_encoder_is_mst) 5155 intel_ddi_set_vc_payload_alloc(crtc, false); 5156 5157 if (!intel_crtc->config->has_dsi_encoder) 5158 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5159 5160 if (INTEL_INFO(dev)->gen >= 9) 5161 skylake_scaler_disable(intel_crtc); 5162 else 5163 ironlake_pfit_disable(intel_crtc, false); 5164 5165 if (!intel_crtc->config->has_dsi_encoder) 5166 intel_ddi_disable_pipe_clock(intel_crtc); 5167 5168 for_each_encoder_on_crtc(dev, crtc, encoder) 5169 if (encoder->post_disable) 5170 encoder->post_disable(encoder); 5171 5172 if (intel_crtc->config->has_pch_encoder) { 5173 lpt_disable_pch_transcoder(dev_priv); 5174 lpt_disable_iclkip(dev_priv); 5175 intel_ddi_fdi_disable(crtc); 5176 5177 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5178 true); 5179 } 5180 } 5181 5182 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5183 { 5184 struct drm_device *dev = crtc->base.dev; 5185 struct drm_i915_private *dev_priv = dev->dev_private; 5186 struct intel_crtc_state *pipe_config = crtc->config; 5187 5188 if (!pipe_config->gmch_pfit.control) 5189 return; 5190 5191 /* 5192 * The panel fitter should only be adjusted whilst the pipe is disabled, 5193 * according to register description and PRM. 5194 */ 5195 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5196 assert_pipe_disabled(dev_priv, crtc->pipe); 5197 5198 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5199 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5200 5201 /* Border color in case we don't scale up to the full screen. Black by 5202 * default, change to something else for debugging. */ 5203 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5204 } 5205 5206 static enum intel_display_power_domain port_to_power_domain(enum port port) 5207 { 5208 switch (port) { 5209 case PORT_A: 5210 return POWER_DOMAIN_PORT_DDI_A_LANES; 5211 case PORT_B: 5212 return POWER_DOMAIN_PORT_DDI_B_LANES; 5213 case PORT_C: 5214 return POWER_DOMAIN_PORT_DDI_C_LANES; 5215 case PORT_D: 5216 return POWER_DOMAIN_PORT_DDI_D_LANES; 5217 case PORT_E: 5218 return POWER_DOMAIN_PORT_DDI_E_LANES; 5219 default: 5220 MISSING_CASE(port); 5221 return POWER_DOMAIN_PORT_OTHER; 5222 } 5223 } 5224 5225 static enum intel_display_power_domain port_to_aux_power_domain(enum port port) 5226 { 5227 switch (port) { 5228 case PORT_A: 5229 return POWER_DOMAIN_AUX_A; 5230 case PORT_B: 5231 return POWER_DOMAIN_AUX_B; 5232 case PORT_C: 5233 return POWER_DOMAIN_AUX_C; 5234 case PORT_D: 5235 return POWER_DOMAIN_AUX_D; 5236 case PORT_E: 5237 /* FIXME: Check VBT for actual wiring of PORT E */ 5238 return POWER_DOMAIN_AUX_D; 5239 default: 5240 MISSING_CASE(port); 5241 return POWER_DOMAIN_AUX_A; 5242 } 5243 } 5244 5245 enum intel_display_power_domain 5246 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5247 { 5248 struct drm_device *dev = intel_encoder->base.dev; 5249 struct intel_digital_port *intel_dig_port; 5250 5251 switch (intel_encoder->type) { 5252 case INTEL_OUTPUT_UNKNOWN: 5253 /* Only DDI platforms should ever use this output type */ 5254 WARN_ON_ONCE(!HAS_DDI(dev)); 5255 case INTEL_OUTPUT_DISPLAYPORT: 5256 case INTEL_OUTPUT_HDMI: 5257 case INTEL_OUTPUT_EDP: 5258 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5259 return port_to_power_domain(intel_dig_port->port); 5260 case INTEL_OUTPUT_DP_MST: 5261 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5262 return port_to_power_domain(intel_dig_port->port); 5263 case INTEL_OUTPUT_ANALOG: 5264 return POWER_DOMAIN_PORT_CRT; 5265 case INTEL_OUTPUT_DSI: 5266 return POWER_DOMAIN_PORT_DSI; 5267 default: 5268 return POWER_DOMAIN_PORT_OTHER; 5269 } 5270 } 5271 5272 enum intel_display_power_domain 5273 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder) 5274 { 5275 struct drm_device *dev = intel_encoder->base.dev; 5276 struct intel_digital_port *intel_dig_port; 5277 5278 switch (intel_encoder->type) { 5279 case INTEL_OUTPUT_UNKNOWN: 5280 case INTEL_OUTPUT_HDMI: 5281 /* 5282 * Only DDI platforms should ever use these output types. 5283 * We can get here after the HDMI detect code has already set 5284 * the type of the shared encoder. Since we can't be sure 5285 * what's the status of the given connectors, play safe and 5286 * run the DP detection too. 5287 */ 5288 WARN_ON_ONCE(!HAS_DDI(dev)); 5289 case INTEL_OUTPUT_DISPLAYPORT: 5290 case INTEL_OUTPUT_EDP: 5291 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5292 return port_to_aux_power_domain(intel_dig_port->port); 5293 case INTEL_OUTPUT_DP_MST: 5294 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary; 5295 return port_to_aux_power_domain(intel_dig_port->port); 5296 default: 5297 MISSING_CASE(intel_encoder->type); 5298 return POWER_DOMAIN_AUX_A; 5299 } 5300 } 5301 5302 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc, 5303 struct intel_crtc_state *crtc_state) 5304 { 5305 struct drm_device *dev = crtc->dev; 5306 struct drm_encoder *encoder; 5307 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5308 enum i915_pipe pipe = intel_crtc->pipe; 5309 unsigned long mask; 5310 enum transcoder transcoder = crtc_state->cpu_transcoder; 5311 5312 if (!crtc_state->base.active) 5313 return 0; 5314 5315 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5316 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5317 if (crtc_state->pch_pfit.enabled || 5318 crtc_state->pch_pfit.force_thru) 5319 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5320 5321 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) { 5322 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5323 5324 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5325 } 5326 5327 return mask; 5328 } 5329 5330 static unsigned long 5331 modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5332 struct intel_crtc_state *crtc_state) 5333 { 5334 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5335 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5336 enum intel_display_power_domain domain; 5337 unsigned long domains, new_domains, old_domains; 5338 5339 old_domains = intel_crtc->enabled_power_domains; 5340 intel_crtc->enabled_power_domains = new_domains = 5341 get_crtc_power_domains(crtc, crtc_state); 5342 5343 domains = new_domains & ~old_domains; 5344 5345 for_each_power_domain(domain, domains) 5346 intel_display_power_get(dev_priv, domain); 5347 5348 return old_domains & ~new_domains; 5349 } 5350 5351 static void modeset_put_power_domains(struct drm_i915_private *dev_priv, 5352 unsigned long domains) 5353 { 5354 enum intel_display_power_domain domain; 5355 5356 for_each_power_domain(domain, domains) 5357 intel_display_power_put(dev_priv, domain); 5358 } 5359 5360 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 5361 { 5362 int max_cdclk_freq = dev_priv->max_cdclk_freq; 5363 5364 if (INTEL_INFO(dev_priv)->gen >= 9 || 5365 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 5366 return max_cdclk_freq; 5367 else if (IS_CHERRYVIEW(dev_priv)) 5368 return max_cdclk_freq*95/100; 5369 else if (INTEL_INFO(dev_priv)->gen < 4) 5370 return 2*max_cdclk_freq*90/100; 5371 else 5372 return max_cdclk_freq*90/100; 5373 } 5374 5375 static void intel_update_max_cdclk(struct drm_device *dev) 5376 { 5377 struct drm_i915_private *dev_priv = dev->dev_private; 5378 5379 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5380 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5381 5382 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5383 dev_priv->max_cdclk_freq = 675000; 5384 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5385 dev_priv->max_cdclk_freq = 540000; 5386 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5387 dev_priv->max_cdclk_freq = 450000; 5388 else 5389 dev_priv->max_cdclk_freq = 337500; 5390 } else if (IS_BROADWELL(dev)) { 5391 /* 5392 * FIXME with extra cooling we can allow 5393 * 540 MHz for ULX and 675 Mhz for ULT. 5394 * How can we know if extra cooling is 5395 * available? PCI ID, VTB, something else? 5396 */ 5397 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 5398 dev_priv->max_cdclk_freq = 450000; 5399 else if (IS_BDW_ULX(dev)) 5400 dev_priv->max_cdclk_freq = 450000; 5401 else if (IS_BDW_ULT(dev)) 5402 dev_priv->max_cdclk_freq = 540000; 5403 else 5404 dev_priv->max_cdclk_freq = 675000; 5405 } else if (IS_CHERRYVIEW(dev)) { 5406 dev_priv->max_cdclk_freq = 320000; 5407 } else if (IS_VALLEYVIEW(dev)) { 5408 dev_priv->max_cdclk_freq = 400000; 5409 } else { 5410 /* otherwise assume cdclk is fixed */ 5411 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq; 5412 } 5413 5414 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv); 5415 5416 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n", 5417 dev_priv->max_cdclk_freq); 5418 5419 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n", 5420 dev_priv->max_dotclk_freq); 5421 } 5422 5423 static void intel_update_cdclk(struct drm_device *dev) 5424 { 5425 struct drm_i915_private *dev_priv = dev->dev_private; 5426 5427 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5428 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5429 dev_priv->cdclk_freq); 5430 5431 /* 5432 * Program the gmbus_freq based on the cdclk frequency. 5433 * BSpec erroneously claims we should aim for 4MHz, but 5434 * in fact 1MHz is the correct frequency. 5435 */ 5436 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5437 /* 5438 * Program the gmbus_freq based on the cdclk frequency. 5439 * BSpec erroneously claims we should aim for 4MHz, but 5440 * in fact 1MHz is the correct frequency. 5441 */ 5442 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5443 } 5444 5445 if (dev_priv->max_cdclk_freq == 0) 5446 intel_update_max_cdclk(dev); 5447 } 5448 5449 static void broxton_set_cdclk(struct drm_device *dev, int frequency) 5450 { 5451 struct drm_i915_private *dev_priv = dev->dev_private; 5452 uint32_t divider; 5453 uint32_t ratio; 5454 uint32_t current_freq; 5455 int ret; 5456 5457 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5458 switch (frequency) { 5459 case 144000: 5460 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5461 ratio = BXT_DE_PLL_RATIO(60); 5462 break; 5463 case 288000: 5464 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5465 ratio = BXT_DE_PLL_RATIO(60); 5466 break; 5467 case 384000: 5468 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5469 ratio = BXT_DE_PLL_RATIO(60); 5470 break; 5471 case 576000: 5472 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5473 ratio = BXT_DE_PLL_RATIO(60); 5474 break; 5475 case 624000: 5476 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5477 ratio = BXT_DE_PLL_RATIO(65); 5478 break; 5479 case 19200: 5480 /* 5481 * Bypass frequency with DE PLL disabled. Init ratio, divider 5482 * to suppress GCC warning. 5483 */ 5484 ratio = 0; 5485 divider = 0; 5486 break; 5487 default: 5488 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5489 5490 return; 5491 } 5492 5493 mutex_lock(&dev_priv->rps.hw_lock); 5494 /* Inform power controller of upcoming frequency change */ 5495 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5496 0x80000000); 5497 mutex_unlock(&dev_priv->rps.hw_lock); 5498 5499 if (ret) { 5500 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5501 ret, frequency); 5502 return; 5503 } 5504 5505 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5506 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5507 current_freq = current_freq * 500 + 1000; 5508 5509 /* 5510 * DE PLL has to be disabled when 5511 * - setting to 19.2MHz (bypass, PLL isn't used) 5512 * - before setting to 624MHz (PLL needs toggling) 5513 * - before setting to any frequency from 624MHz (PLL needs toggling) 5514 */ 5515 if (frequency == 19200 || frequency == 624000 || 5516 current_freq == 624000) { 5517 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5518 /* Timeout 200us */ 5519 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5520 1)) 5521 DRM_ERROR("timout waiting for DE PLL unlock\n"); 5522 } 5523 5524 if (frequency != 19200) { 5525 uint32_t val; 5526 5527 val = I915_READ(BXT_DE_PLL_CTL); 5528 val &= ~BXT_DE_PLL_RATIO_MASK; 5529 val |= ratio; 5530 I915_WRITE(BXT_DE_PLL_CTL, val); 5531 5532 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5533 /* Timeout 200us */ 5534 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) 5535 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5536 5537 val = I915_READ(CDCLK_CTL); 5538 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; 5539 val |= divider; 5540 /* 5541 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5542 * enable otherwise. 5543 */ 5544 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5545 if (frequency >= 500000) 5546 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5547 5548 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5549 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5550 val |= (frequency - 1000) / 500; 5551 I915_WRITE(CDCLK_CTL, val); 5552 } 5553 5554 mutex_lock(&dev_priv->rps.hw_lock); 5555 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5556 DIV_ROUND_UP(frequency, 25000)); 5557 mutex_unlock(&dev_priv->rps.hw_lock); 5558 5559 if (ret) { 5560 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5561 ret, frequency); 5562 return; 5563 } 5564 5565 intel_update_cdclk(dev); 5566 } 5567 5568 void broxton_init_cdclk(struct drm_device *dev) 5569 { 5570 struct drm_i915_private *dev_priv = dev->dev_private; 5571 uint32_t val; 5572 5573 /* 5574 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5575 * or else the reset will hang because there is no PCH to respond. 5576 * Move the handshake programming to initialization sequence. 5577 * Previously was left up to BIOS. 5578 */ 5579 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5580 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 5581 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 5582 5583 /* Enable PG1 for cdclk */ 5584 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5585 5586 /* check if cd clock is enabled */ 5587 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) { 5588 DRM_DEBUG_KMS("Display already initialized\n"); 5589 return; 5590 } 5591 5592 /* 5593 * FIXME: 5594 * - The initial CDCLK needs to be read from VBT. 5595 * Need to make this change after VBT has changes for BXT. 5596 * - check if setting the max (or any) cdclk freq is really necessary 5597 * here, it belongs to modeset time 5598 */ 5599 broxton_set_cdclk(dev, 624000); 5600 5601 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5602 POSTING_READ(DBUF_CTL); 5603 5604 udelay(10); 5605 5606 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5607 DRM_ERROR("DBuf power enable timeout!\n"); 5608 } 5609 5610 void broxton_uninit_cdclk(struct drm_device *dev) 5611 { 5612 struct drm_i915_private *dev_priv = dev->dev_private; 5613 5614 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5615 POSTING_READ(DBUF_CTL); 5616 5617 udelay(10); 5618 5619 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5620 DRM_ERROR("DBuf power disable timeout!\n"); 5621 5622 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5623 broxton_set_cdclk(dev, 19200); 5624 5625 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5626 } 5627 5628 static const struct skl_cdclk_entry { 5629 unsigned int freq; 5630 unsigned int vco; 5631 } skl_cdclk_frequencies[] = { 5632 { .freq = 308570, .vco = 8640 }, 5633 { .freq = 337500, .vco = 8100 }, 5634 { .freq = 432000, .vco = 8640 }, 5635 { .freq = 450000, .vco = 8100 }, 5636 { .freq = 540000, .vco = 8100 }, 5637 { .freq = 617140, .vco = 8640 }, 5638 { .freq = 675000, .vco = 8100 }, 5639 }; 5640 5641 static unsigned int skl_cdclk_decimal(unsigned int freq) 5642 { 5643 return (freq - 1000) / 500; 5644 } 5645 5646 static unsigned int skl_cdclk_get_vco(unsigned int freq) 5647 { 5648 unsigned int i; 5649 5650 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5651 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; 5652 5653 if (e->freq == freq) 5654 return e->vco; 5655 } 5656 5657 return 8100; 5658 } 5659 5660 static void 5661 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5662 { 5663 unsigned int min_freq; 5664 u32 val; 5665 5666 /* select the minimum CDCLK before enabling DPLL 0 */ 5667 val = I915_READ(CDCLK_CTL); 5668 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; 5669 val |= CDCLK_FREQ_337_308; 5670 5671 if (required_vco == 8640) 5672 min_freq = 308570; 5673 else 5674 min_freq = 337500; 5675 5676 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); 5677 5678 I915_WRITE(CDCLK_CTL, val); 5679 POSTING_READ(CDCLK_CTL); 5680 5681 /* 5682 * We always enable DPLL0 with the lowest link rate possible, but still 5683 * taking into account the VCO required to operate the eDP panel at the 5684 * desired frequency. The usual DP link rates operate with a VCO of 5685 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5686 * The modeset code is responsible for the selection of the exact link 5687 * rate later on, with the constraint of choosing a frequency that 5688 * works with required_vco. 5689 */ 5690 val = I915_READ(DPLL_CTRL1); 5691 5692 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5693 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5694 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5695 if (required_vco == 8640) 5696 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5697 SKL_DPLL0); 5698 else 5699 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5700 SKL_DPLL0); 5701 5702 I915_WRITE(DPLL_CTRL1, val); 5703 POSTING_READ(DPLL_CTRL1); 5704 5705 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5706 5707 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5708 DRM_ERROR("DPLL0 not locked\n"); 5709 } 5710 5711 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5712 { 5713 int ret; 5714 u32 val; 5715 5716 /* inform PCU we want to change CDCLK */ 5717 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5718 mutex_lock(&dev_priv->rps.hw_lock); 5719 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5720 mutex_unlock(&dev_priv->rps.hw_lock); 5721 5722 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5723 } 5724 5725 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5726 { 5727 unsigned int i; 5728 5729 for (i = 0; i < 15; i++) { 5730 if (skl_cdclk_pcu_ready(dev_priv)) 5731 return true; 5732 udelay(10); 5733 } 5734 5735 return false; 5736 } 5737 5738 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5739 { 5740 struct drm_device *dev = dev_priv->dev; 5741 u32 freq_select, pcu_ack; 5742 5743 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5744 5745 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5746 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5747 return; 5748 } 5749 5750 /* set CDCLK_CTL */ 5751 switch(freq) { 5752 case 450000: 5753 case 432000: 5754 freq_select = CDCLK_FREQ_450_432; 5755 pcu_ack = 1; 5756 break; 5757 case 540000: 5758 freq_select = CDCLK_FREQ_540; 5759 pcu_ack = 2; 5760 break; 5761 case 308570: 5762 case 337500: 5763 default: 5764 freq_select = CDCLK_FREQ_337_308; 5765 pcu_ack = 0; 5766 break; 5767 case 617140: 5768 case 675000: 5769 freq_select = CDCLK_FREQ_675_617; 5770 pcu_ack = 3; 5771 break; 5772 } 5773 5774 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5775 POSTING_READ(CDCLK_CTL); 5776 5777 /* inform PCU of the change */ 5778 mutex_lock(&dev_priv->rps.hw_lock); 5779 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5780 mutex_unlock(&dev_priv->rps.hw_lock); 5781 5782 intel_update_cdclk(dev); 5783 } 5784 5785 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5786 { 5787 /* disable DBUF power */ 5788 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5789 POSTING_READ(DBUF_CTL); 5790 5791 udelay(10); 5792 5793 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5794 DRM_ERROR("DBuf power disable timeout\n"); 5795 5796 /* disable DPLL0 */ 5797 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5798 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5799 DRM_ERROR("Couldn't disable DPLL0\n"); 5800 } 5801 5802 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5803 { 5804 unsigned int required_vco; 5805 5806 /* DPLL0 not enabled (happens on early BIOS versions) */ 5807 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) { 5808 /* enable DPLL0 */ 5809 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); 5810 skl_dpll0_enable(dev_priv, required_vco); 5811 } 5812 5813 /* set CDCLK to the frequency the BIOS chose */ 5814 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5815 5816 /* enable DBUF power */ 5817 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5818 POSTING_READ(DBUF_CTL); 5819 5820 udelay(10); 5821 5822 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5823 DRM_ERROR("DBuf power enable timeout\n"); 5824 } 5825 5826 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5827 { 5828 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5829 uint32_t cdctl = I915_READ(CDCLK_CTL); 5830 int freq = dev_priv->skl_boot_cdclk; 5831 5832 /* 5833 * check if the pre-os intialized the display 5834 * There is SWF18 scratchpad register defined which is set by the 5835 * pre-os which can be used by the OS drivers to check the status 5836 */ 5837 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5838 goto sanitize; 5839 5840 /* Is PLL enabled and locked ? */ 5841 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5842 goto sanitize; 5843 5844 /* DPLL okay; verify the cdclock 5845 * 5846 * Noticed in some instances that the freq selection is correct but 5847 * decimal part is programmed wrong from BIOS where pre-os does not 5848 * enable display. Verify the same as well. 5849 */ 5850 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5851 /* All well; nothing to sanitize */ 5852 return false; 5853 sanitize: 5854 /* 5855 * As of now initialize with max cdclk till 5856 * we get dynamic cdclk support 5857 * */ 5858 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq; 5859 skl_init_cdclk(dev_priv); 5860 5861 /* we did have to sanitize */ 5862 return true; 5863 } 5864 5865 /* Adjust CDclk dividers to allow high res or save power if possible */ 5866 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5867 { 5868 struct drm_i915_private *dev_priv = dev->dev_private; 5869 u32 val, cmd; 5870 5871 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5872 != dev_priv->cdclk_freq); 5873 5874 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5875 cmd = 2; 5876 else if (cdclk == 266667) 5877 cmd = 1; 5878 else 5879 cmd = 0; 5880 5881 mutex_lock(&dev_priv->rps.hw_lock); 5882 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5883 val &= ~DSPFREQGUAR_MASK; 5884 val |= (cmd << DSPFREQGUAR_SHIFT); 5885 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5886 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5887 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5888 50)) { 5889 DRM_ERROR("timed out waiting for CDclk change\n"); 5890 } 5891 mutex_unlock(&dev_priv->rps.hw_lock); 5892 5893 mutex_lock(&dev_priv->sb_lock); 5894 5895 if (cdclk == 400000) { 5896 u32 divider; 5897 5898 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5899 5900 /* adjust cdclk divider */ 5901 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5902 val &= ~CCK_FREQUENCY_VALUES; 5903 val |= divider; 5904 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5905 5906 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5907 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 5908 50)) 5909 DRM_ERROR("timed out waiting for CDclk change\n"); 5910 } 5911 5912 /* adjust self-refresh exit latency value */ 5913 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5914 val &= ~0x7f; 5915 5916 /* 5917 * For high bandwidth configs, we set a higher latency in the bunit 5918 * so that the core display fetch happens in time to avoid underruns. 5919 */ 5920 if (cdclk == 400000) 5921 val |= 4500 / 250; /* 4.5 usec */ 5922 else 5923 val |= 3000 / 250; /* 3.0 usec */ 5924 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5925 5926 mutex_unlock(&dev_priv->sb_lock); 5927 5928 intel_update_cdclk(dev); 5929 } 5930 5931 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5932 { 5933 struct drm_i915_private *dev_priv = dev->dev_private; 5934 u32 val, cmd; 5935 5936 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5937 != dev_priv->cdclk_freq); 5938 5939 switch (cdclk) { 5940 case 333333: 5941 case 320000: 5942 case 266667: 5943 case 200000: 5944 break; 5945 default: 5946 MISSING_CASE(cdclk); 5947 return; 5948 } 5949 5950 /* 5951 * Specs are full of misinformation, but testing on actual 5952 * hardware has shown that we just need to write the desired 5953 * CCK divider into the Punit register. 5954 */ 5955 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5956 5957 mutex_lock(&dev_priv->rps.hw_lock); 5958 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5959 val &= ~DSPFREQGUAR_MASK_CHV; 5960 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 5961 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5962 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5963 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 5964 50)) { 5965 DRM_ERROR("timed out waiting for CDclk change\n"); 5966 } 5967 mutex_unlock(&dev_priv->rps.hw_lock); 5968 5969 intel_update_cdclk(dev); 5970 } 5971 5972 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 5973 int max_pixclk) 5974 { 5975 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 5976 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 5977 5978 /* 5979 * Really only a few cases to deal with, as only 4 CDclks are supported: 5980 * 200MHz 5981 * 267MHz 5982 * 320/333MHz (depends on HPLL freq) 5983 * 400MHz (VLV only) 5984 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 5985 * of the lower bin and adjust if needed. 5986 * 5987 * We seem to get an unstable or solid color picture at 200MHz. 5988 * Not sure what's wrong. For now use 200MHz only when all pipes 5989 * are off. 5990 */ 5991 if (!IS_CHERRYVIEW(dev_priv) && 5992 max_pixclk > freq_320*limit/100) 5993 return 400000; 5994 else if (max_pixclk > 266667*limit/100) 5995 return freq_320; 5996 else if (max_pixclk > 0) 5997 return 266667; 5998 else 5999 return 200000; 6000 } 6001 6002 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 6003 int max_pixclk) 6004 { 6005 /* 6006 * FIXME: 6007 * - remove the guardband, it's not needed on BXT 6008 * - set 19.2MHz bypass frequency if there are no active pipes 6009 */ 6010 if (max_pixclk > 576000*9/10) 6011 return 624000; 6012 else if (max_pixclk > 384000*9/10) 6013 return 576000; 6014 else if (max_pixclk > 288000*9/10) 6015 return 384000; 6016 else if (max_pixclk > 144000*9/10) 6017 return 288000; 6018 else 6019 return 144000; 6020 } 6021 6022 /* Compute the max pixel clock for new configuration. */ 6023 static int intel_mode_max_pixclk(struct drm_device *dev, 6024 struct drm_atomic_state *state) 6025 { 6026 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 6027 struct drm_i915_private *dev_priv = dev->dev_private; 6028 struct drm_crtc *crtc; 6029 struct drm_crtc_state *crtc_state; 6030 unsigned max_pixclk = 0, i; 6031 enum i915_pipe pipe; 6032 6033 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 6034 sizeof(intel_state->min_pixclk)); 6035 6036 for_each_crtc_in_state(state, crtc, crtc_state, i) { 6037 int pixclk = 0; 6038 6039 if (crtc_state->enable) 6040 pixclk = crtc_state->adjusted_mode.crtc_clock; 6041 6042 intel_state->min_pixclk[i] = pixclk; 6043 } 6044 6045 for_each_pipe(dev_priv, pipe) 6046 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk); 6047 6048 return max_pixclk; 6049 } 6050 6051 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6052 { 6053 struct drm_device *dev = state->dev; 6054 struct drm_i915_private *dev_priv = dev->dev_private; 6055 int max_pixclk = intel_mode_max_pixclk(dev, state); 6056 struct intel_atomic_state *intel_state = 6057 to_intel_atomic_state(state); 6058 6059 if (max_pixclk < 0) 6060 return max_pixclk; 6061 6062 intel_state->cdclk = intel_state->dev_cdclk = 6063 valleyview_calc_cdclk(dev_priv, max_pixclk); 6064 6065 if (!intel_state->active_crtcs) 6066 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0); 6067 6068 return 0; 6069 } 6070 6071 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 6072 { 6073 struct drm_device *dev = state->dev; 6074 struct drm_i915_private *dev_priv = dev->dev_private; 6075 int max_pixclk = intel_mode_max_pixclk(dev, state); 6076 struct intel_atomic_state *intel_state = 6077 to_intel_atomic_state(state); 6078 6079 if (max_pixclk < 0) 6080 return max_pixclk; 6081 6082 intel_state->cdclk = intel_state->dev_cdclk = 6083 broxton_calc_cdclk(dev_priv, max_pixclk); 6084 6085 if (!intel_state->active_crtcs) 6086 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); 6087 6088 return 0; 6089 } 6090 6091 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 6092 { 6093 unsigned int credits, default_credits; 6094 6095 if (IS_CHERRYVIEW(dev_priv)) 6096 default_credits = PFI_CREDIT(12); 6097 else 6098 default_credits = PFI_CREDIT(8); 6099 6100 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) { 6101 /* CHV suggested value is 31 or 63 */ 6102 if (IS_CHERRYVIEW(dev_priv)) 6103 credits = PFI_CREDIT_63; 6104 else 6105 credits = PFI_CREDIT(15); 6106 } else { 6107 credits = default_credits; 6108 } 6109 6110 /* 6111 * WA - write default credits before re-programming 6112 * FIXME: should we also set the resend bit here? 6113 */ 6114 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6115 default_credits); 6116 6117 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6118 credits | PFI_CREDIT_RESEND); 6119 6120 /* 6121 * FIXME is this guaranteed to clear 6122 * immediately or should we poll for it? 6123 */ 6124 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6125 } 6126 6127 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6128 { 6129 struct drm_device *dev = old_state->dev; 6130 struct drm_i915_private *dev_priv = dev->dev_private; 6131 struct intel_atomic_state *old_intel_state = 6132 to_intel_atomic_state(old_state); 6133 unsigned req_cdclk = old_intel_state->dev_cdclk; 6134 6135 /* 6136 * FIXME: We can end up here with all power domains off, yet 6137 * with a CDCLK frequency other than the minimum. To account 6138 * for this take the PIPE-A power domain, which covers the HW 6139 * blocks needed for the following programming. This can be 6140 * removed once it's guaranteed that we get here either with 6141 * the minimum CDCLK set, or the required power domains 6142 * enabled. 6143 */ 6144 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6145 6146 if (IS_CHERRYVIEW(dev)) 6147 cherryview_set_cdclk(dev, req_cdclk); 6148 else 6149 valleyview_set_cdclk(dev, req_cdclk); 6150 6151 vlv_program_pfi_credits(dev_priv); 6152 6153 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6154 } 6155 6156 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6157 { 6158 struct drm_device *dev = crtc->dev; 6159 struct drm_i915_private *dev_priv = to_i915(dev); 6160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6161 struct intel_encoder *encoder; 6162 int pipe = intel_crtc->pipe; 6163 6164 if (WARN_ON(intel_crtc->active)) 6165 return; 6166 6167 if (intel_crtc->config->has_dp_encoder) 6168 intel_dp_set_m_n(intel_crtc, M1_N1); 6169 6170 intel_set_pipe_timings(intel_crtc); 6171 6172 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6173 struct drm_i915_private *dev_priv = dev->dev_private; 6174 6175 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6176 I915_WRITE(CHV_CANVAS(pipe), 0); 6177 } 6178 6179 i9xx_set_pipeconf(intel_crtc); 6180 6181 intel_crtc->active = true; 6182 6183 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6184 6185 for_each_encoder_on_crtc(dev, crtc, encoder) 6186 if (encoder->pre_pll_enable) 6187 encoder->pre_pll_enable(encoder); 6188 6189 if (!intel_crtc->config->has_dsi_encoder) { 6190 if (IS_CHERRYVIEW(dev)) { 6191 chv_prepare_pll(intel_crtc, intel_crtc->config); 6192 chv_enable_pll(intel_crtc, intel_crtc->config); 6193 } else { 6194 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6195 vlv_enable_pll(intel_crtc, intel_crtc->config); 6196 } 6197 } 6198 6199 for_each_encoder_on_crtc(dev, crtc, encoder) 6200 if (encoder->pre_enable) 6201 encoder->pre_enable(encoder); 6202 6203 i9xx_pfit_enable(intel_crtc); 6204 6205 intel_crtc_load_lut(crtc); 6206 6207 intel_enable_pipe(intel_crtc); 6208 6209 assert_vblank_disabled(crtc); 6210 drm_crtc_vblank_on(crtc); 6211 6212 for_each_encoder_on_crtc(dev, crtc, encoder) 6213 encoder->enable(encoder); 6214 } 6215 6216 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6217 { 6218 struct drm_device *dev = crtc->base.dev; 6219 struct drm_i915_private *dev_priv = dev->dev_private; 6220 6221 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6222 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6223 } 6224 6225 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6226 { 6227 struct drm_device *dev = crtc->dev; 6228 struct drm_i915_private *dev_priv = to_i915(dev); 6229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6230 struct intel_encoder *encoder; 6231 int pipe = intel_crtc->pipe; 6232 6233 if (WARN_ON(intel_crtc->active)) 6234 return; 6235 6236 i9xx_set_pll_dividers(intel_crtc); 6237 6238 if (intel_crtc->config->has_dp_encoder) 6239 intel_dp_set_m_n(intel_crtc, M1_N1); 6240 6241 intel_set_pipe_timings(intel_crtc); 6242 6243 i9xx_set_pipeconf(intel_crtc); 6244 6245 intel_crtc->active = true; 6246 6247 if (!IS_GEN2(dev)) 6248 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6249 6250 for_each_encoder_on_crtc(dev, crtc, encoder) 6251 if (encoder->pre_enable) 6252 encoder->pre_enable(encoder); 6253 6254 i9xx_enable_pll(intel_crtc); 6255 6256 i9xx_pfit_enable(intel_crtc); 6257 6258 intel_crtc_load_lut(crtc); 6259 6260 intel_update_watermarks(crtc); 6261 intel_enable_pipe(intel_crtc); 6262 6263 assert_vblank_disabled(crtc); 6264 drm_crtc_vblank_on(crtc); 6265 6266 for_each_encoder_on_crtc(dev, crtc, encoder) 6267 encoder->enable(encoder); 6268 } 6269 6270 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6271 { 6272 struct drm_device *dev = crtc->base.dev; 6273 struct drm_i915_private *dev_priv = dev->dev_private; 6274 6275 if (!crtc->config->gmch_pfit.control) 6276 return; 6277 6278 assert_pipe_disabled(dev_priv, crtc->pipe); 6279 6280 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6281 I915_READ(PFIT_CONTROL)); 6282 I915_WRITE(PFIT_CONTROL, 0); 6283 } 6284 6285 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6286 { 6287 struct drm_device *dev = crtc->dev; 6288 struct drm_i915_private *dev_priv = dev->dev_private; 6289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6290 struct intel_encoder *encoder; 6291 int pipe = intel_crtc->pipe; 6292 6293 /* 6294 * On gen2 planes are double buffered but the pipe isn't, so we must 6295 * wait for planes to fully turn off before disabling the pipe. 6296 * We also need to wait on all gmch platforms because of the 6297 * self-refresh mode constraint explained above. 6298 */ 6299 intel_wait_for_vblank(dev, pipe); 6300 6301 for_each_encoder_on_crtc(dev, crtc, encoder) 6302 encoder->disable(encoder); 6303 6304 drm_crtc_vblank_off(crtc); 6305 assert_vblank_disabled(crtc); 6306 6307 intel_disable_pipe(intel_crtc); 6308 6309 i9xx_pfit_disable(intel_crtc); 6310 6311 for_each_encoder_on_crtc(dev, crtc, encoder) 6312 if (encoder->post_disable) 6313 encoder->post_disable(encoder); 6314 6315 if (!intel_crtc->config->has_dsi_encoder) { 6316 if (IS_CHERRYVIEW(dev)) 6317 chv_disable_pll(dev_priv, pipe); 6318 else if (IS_VALLEYVIEW(dev)) 6319 vlv_disable_pll(dev_priv, pipe); 6320 else 6321 i9xx_disable_pll(intel_crtc); 6322 } 6323 6324 for_each_encoder_on_crtc(dev, crtc, encoder) 6325 if (encoder->post_pll_disable) 6326 encoder->post_pll_disable(encoder); 6327 6328 if (!IS_GEN2(dev)) 6329 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6330 } 6331 6332 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) 6333 { 6334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6335 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 6336 enum intel_display_power_domain domain; 6337 unsigned long domains; 6338 6339 if (!intel_crtc->active) 6340 return; 6341 6342 if (to_intel_plane_state(crtc->primary->state)->visible) { 6343 WARN_ON(intel_crtc->unpin_work); 6344 6345 intel_pre_disable_primary(crtc); 6346 6347 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary)); 6348 to_intel_plane_state(crtc->primary->state)->visible = false; 6349 } 6350 6351 dev_priv->display.crtc_disable(crtc); 6352 intel_crtc->active = false; 6353 intel_fbc_disable(intel_crtc); 6354 intel_update_watermarks(crtc); 6355 intel_disable_shared_dpll(intel_crtc); 6356 6357 domains = intel_crtc->enabled_power_domains; 6358 for_each_power_domain(domain, domains) 6359 intel_display_power_put(dev_priv, domain); 6360 intel_crtc->enabled_power_domains = 0; 6361 6362 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); 6363 dev_priv->min_pixclk[intel_crtc->pipe] = 0; 6364 } 6365 6366 /* 6367 * turn all crtc's off, but do not adjust state 6368 * This has to be paired with a call to intel_modeset_setup_hw_state. 6369 */ 6370 int intel_display_suspend(struct drm_device *dev) 6371 { 6372 struct drm_i915_private *dev_priv = to_i915(dev); 6373 struct drm_atomic_state *state; 6374 int ret; 6375 6376 state = drm_atomic_helper_suspend(dev); 6377 ret = PTR_ERR_OR_ZERO(state); 6378 if (ret) 6379 DRM_ERROR("Suspending crtc's failed with %i\n", ret); 6380 else 6381 dev_priv->modeset_restore_state = state; 6382 return ret; 6383 } 6384 6385 void intel_encoder_destroy(struct drm_encoder *encoder) 6386 { 6387 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6388 6389 drm_encoder_cleanup(encoder); 6390 kfree(intel_encoder); 6391 } 6392 6393 /* Cross check the actual hw state with our own modeset state tracking (and it's 6394 * internal consistency). */ 6395 static void intel_connector_check_state(struct intel_connector *connector) 6396 { 6397 struct drm_crtc *crtc = connector->base.state->crtc; 6398 6399 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6400 connector->base.base.id, 6401 connector->base.name); 6402 6403 if (connector->get_hw_state(connector)) { 6404 struct intel_encoder *encoder = connector->encoder; 6405 struct drm_connector_state *conn_state = connector->base.state; 6406 6407 I915_STATE_WARN(!crtc, 6408 "connector enabled without attached crtc\n"); 6409 6410 if (!crtc) 6411 return; 6412 6413 I915_STATE_WARN(!crtc->state->active, 6414 "connector is active, but attached crtc isn't\n"); 6415 6416 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST) 6417 return; 6418 6419 I915_STATE_WARN(conn_state->best_encoder != &encoder->base, 6420 "atomic encoder doesn't match attached encoder\n"); 6421 6422 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc, 6423 "attached encoder crtc differs from connector crtc\n"); 6424 } else { 6425 I915_STATE_WARN(crtc && crtc->state->active, 6426 "attached crtc is active, but connector isn't\n"); 6427 I915_STATE_WARN(!crtc && connector->base.state->best_encoder, 6428 "best encoder set without crtc!\n"); 6429 } 6430 } 6431 6432 int intel_connector_init(struct intel_connector *connector) 6433 { 6434 drm_atomic_helper_connector_reset(&connector->base); 6435 6436 if (!connector->base.state) 6437 return -ENOMEM; 6438 6439 return 0; 6440 } 6441 6442 struct intel_connector *intel_connector_alloc(void) 6443 { 6444 struct intel_connector *connector; 6445 6446 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6447 if (!connector) 6448 return NULL; 6449 6450 if (intel_connector_init(connector) < 0) { 6451 kfree(connector); 6452 return NULL; 6453 } 6454 6455 return connector; 6456 } 6457 6458 /* Simple connector->get_hw_state implementation for encoders that support only 6459 * one connector and no cloning and hence the encoder state determines the state 6460 * of the connector. */ 6461 bool intel_connector_get_hw_state(struct intel_connector *connector) 6462 { 6463 enum i915_pipe pipe = 0; 6464 struct intel_encoder *encoder = connector->encoder; 6465 6466 return encoder->get_hw_state(encoder, &pipe); 6467 } 6468 6469 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6470 { 6471 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6472 return crtc_state->fdi_lanes; 6473 6474 return 0; 6475 } 6476 6477 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6478 struct intel_crtc_state *pipe_config) 6479 { 6480 struct drm_atomic_state *state = pipe_config->base.state; 6481 struct intel_crtc *other_crtc; 6482 struct intel_crtc_state *other_crtc_state; 6483 6484 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6485 pipe_name(pipe), pipe_config->fdi_lanes); 6486 if (pipe_config->fdi_lanes > 4) { 6487 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6488 pipe_name(pipe), pipe_config->fdi_lanes); 6489 return -EINVAL; 6490 } 6491 6492 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6493 if (pipe_config->fdi_lanes > 2) { 6494 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6495 pipe_config->fdi_lanes); 6496 return -EINVAL; 6497 } else { 6498 return 0; 6499 } 6500 } 6501 6502 if (INTEL_INFO(dev)->num_pipes == 2) 6503 return 0; 6504 6505 /* Ivybridge 3 pipe is really complicated */ 6506 switch (pipe) { 6507 case PIPE_A: 6508 return 0; 6509 case PIPE_B: 6510 if (pipe_config->fdi_lanes <= 2) 6511 return 0; 6512 6513 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6514 other_crtc_state = 6515 intel_atomic_get_crtc_state(state, other_crtc); 6516 if (IS_ERR(other_crtc_state)) 6517 return PTR_ERR(other_crtc_state); 6518 6519 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6520 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6521 pipe_name(pipe), pipe_config->fdi_lanes); 6522 return -EINVAL; 6523 } 6524 return 0; 6525 case PIPE_C: 6526 if (pipe_config->fdi_lanes > 2) { 6527 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6528 pipe_name(pipe), pipe_config->fdi_lanes); 6529 return -EINVAL; 6530 } 6531 6532 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6533 other_crtc_state = 6534 intel_atomic_get_crtc_state(state, other_crtc); 6535 if (IS_ERR(other_crtc_state)) 6536 return PTR_ERR(other_crtc_state); 6537 6538 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6539 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6540 return -EINVAL; 6541 } 6542 return 0; 6543 default: 6544 BUG(); 6545 } 6546 } 6547 6548 #define RETRY 1 6549 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6550 struct intel_crtc_state *pipe_config) 6551 { 6552 struct drm_device *dev = intel_crtc->base.dev; 6553 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6554 int lane, link_bw, fdi_dotclock, ret; 6555 bool needs_recompute = false; 6556 6557 retry: 6558 /* FDI is a binary signal running at ~2.7GHz, encoding 6559 * each output octet as 10 bits. The actual frequency 6560 * is stored as a divider into a 100MHz clock, and the 6561 * mode pixel clock is stored in units of 1KHz. 6562 * Hence the bw of each lane in terms of the mode signal 6563 * is: 6564 */ 6565 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 6566 6567 fdi_dotclock = adjusted_mode->crtc_clock; 6568 6569 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6570 pipe_config->pipe_bpp); 6571 6572 pipe_config->fdi_lanes = lane; 6573 6574 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6575 link_bw, &pipe_config->fdi_m_n); 6576 6577 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, 6578 intel_crtc->pipe, pipe_config); 6579 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6580 pipe_config->pipe_bpp -= 2*3; 6581 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6582 pipe_config->pipe_bpp); 6583 needs_recompute = true; 6584 pipe_config->bw_constrained = true; 6585 6586 goto retry; 6587 } 6588 6589 if (needs_recompute) 6590 return RETRY; 6591 6592 return ret; 6593 } 6594 6595 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv, 6596 struct intel_crtc_state *pipe_config) 6597 { 6598 if (pipe_config->pipe_bpp > 24) 6599 return false; 6600 6601 /* HSW can handle pixel rate up to cdclk? */ 6602 if (IS_HASWELL(dev_priv->dev)) 6603 return true; 6604 6605 /* 6606 * We compare against max which means we must take 6607 * the increased cdclk requirement into account when 6608 * calculating the new cdclk. 6609 * 6610 * Should measure whether using a lower cdclk w/o IPS 6611 */ 6612 return ilk_pipe_pixel_rate(pipe_config) <= 6613 dev_priv->max_cdclk_freq * 95 / 100; 6614 } 6615 6616 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6617 struct intel_crtc_state *pipe_config) 6618 { 6619 struct drm_device *dev = crtc->base.dev; 6620 struct drm_i915_private *dev_priv = dev->dev_private; 6621 6622 pipe_config->ips_enabled = i915.enable_ips && 6623 hsw_crtc_supports_ips(crtc) && 6624 pipe_config_supports_ips(dev_priv, pipe_config); 6625 } 6626 6627 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) 6628 { 6629 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6630 6631 /* GDG double wide on either pipe, otherwise pipe A only */ 6632 return INTEL_INFO(dev_priv)->gen < 4 && 6633 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6634 } 6635 6636 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6637 struct intel_crtc_state *pipe_config) 6638 { 6639 struct drm_device *dev = crtc->base.dev; 6640 struct drm_i915_private *dev_priv = dev->dev_private; 6641 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6642 6643 /* FIXME should check pixel clock limits on all platforms */ 6644 if (INTEL_INFO(dev)->gen < 4) { 6645 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6646 6647 /* 6648 * Enable double wide mode when the dot clock 6649 * is > 90% of the (display) core speed. 6650 */ 6651 if (intel_crtc_supports_double_wide(crtc) && 6652 adjusted_mode->crtc_clock > clock_limit) { 6653 clock_limit *= 2; 6654 pipe_config->double_wide = true; 6655 } 6656 6657 if (adjusted_mode->crtc_clock > clock_limit) { 6658 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6659 adjusted_mode->crtc_clock, clock_limit, 6660 yesno(pipe_config->double_wide)); 6661 return -EINVAL; 6662 } 6663 } 6664 6665 /* 6666 * Pipe horizontal size must be even in: 6667 * - DVO ganged mode 6668 * - LVDS dual channel mode 6669 * - Double wide pipe 6670 */ 6671 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6672 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6673 pipe_config->pipe_src_w &= ~1; 6674 6675 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6676 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6677 */ 6678 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6679 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6680 return -EINVAL; 6681 6682 if (HAS_IPS(dev)) 6683 hsw_compute_ips_config(crtc, pipe_config); 6684 6685 if (pipe_config->has_pch_encoder) 6686 return ironlake_fdi_compute_config(crtc, pipe_config); 6687 6688 return 0; 6689 } 6690 6691 static int skylake_get_display_clock_speed(struct drm_device *dev) 6692 { 6693 struct drm_i915_private *dev_priv = to_i915(dev); 6694 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6695 uint32_t cdctl = I915_READ(CDCLK_CTL); 6696 uint32_t linkrate; 6697 6698 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6699 return 24000; /* 24MHz is the cd freq with NSSC ref */ 6700 6701 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6702 return 540000; 6703 6704 linkrate = (I915_READ(DPLL_CTRL1) & 6705 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; 6706 6707 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6708 linkrate == DPLL_CTRL1_LINK_RATE_1080) { 6709 /* vco 8640 */ 6710 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6711 case CDCLK_FREQ_450_432: 6712 return 432000; 6713 case CDCLK_FREQ_337_308: 6714 return 308570; 6715 case CDCLK_FREQ_675_617: 6716 return 617140; 6717 default: 6718 WARN(1, "Unknown cd freq selection\n"); 6719 } 6720 } else { 6721 /* vco 8100 */ 6722 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6723 case CDCLK_FREQ_450_432: 6724 return 450000; 6725 case CDCLK_FREQ_337_308: 6726 return 337500; 6727 case CDCLK_FREQ_675_617: 6728 return 675000; 6729 default: 6730 WARN(1, "Unknown cd freq selection\n"); 6731 } 6732 } 6733 6734 /* error case, do as if DPLL0 isn't enabled */ 6735 return 24000; 6736 } 6737 6738 static int broxton_get_display_clock_speed(struct drm_device *dev) 6739 { 6740 struct drm_i915_private *dev_priv = to_i915(dev); 6741 uint32_t cdctl = I915_READ(CDCLK_CTL); 6742 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6743 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); 6744 int cdclk; 6745 6746 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6747 return 19200; 6748 6749 cdclk = 19200 * pll_ratio / 2; 6750 6751 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6752 case BXT_CDCLK_CD2X_DIV_SEL_1: 6753 return cdclk; /* 576MHz or 624MHz */ 6754 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6755 return cdclk * 2 / 3; /* 384MHz */ 6756 case BXT_CDCLK_CD2X_DIV_SEL_2: 6757 return cdclk / 2; /* 288MHz */ 6758 case BXT_CDCLK_CD2X_DIV_SEL_4: 6759 return cdclk / 4; /* 144MHz */ 6760 } 6761 6762 /* error case, do as if DE PLL isn't enabled */ 6763 return 19200; 6764 } 6765 6766 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6767 { 6768 struct drm_i915_private *dev_priv = dev->dev_private; 6769 uint32_t lcpll = I915_READ(LCPLL_CTL); 6770 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6771 6772 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6773 return 800000; 6774 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6775 return 450000; 6776 else if (freq == LCPLL_CLK_FREQ_450) 6777 return 450000; 6778 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6779 return 540000; 6780 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6781 return 337500; 6782 else 6783 return 675000; 6784 } 6785 6786 static int haswell_get_display_clock_speed(struct drm_device *dev) 6787 { 6788 struct drm_i915_private *dev_priv = dev->dev_private; 6789 uint32_t lcpll = I915_READ(LCPLL_CTL); 6790 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6791 6792 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6793 return 800000; 6794 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6795 return 450000; 6796 else if (freq == LCPLL_CLK_FREQ_450) 6797 return 450000; 6798 else if (IS_HSW_ULT(dev)) 6799 return 337500; 6800 else 6801 return 540000; 6802 } 6803 6804 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6805 { 6806 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", 6807 CCK_DISPLAY_CLOCK_CONTROL); 6808 } 6809 6810 static int ilk_get_display_clock_speed(struct drm_device *dev) 6811 { 6812 return 450000; 6813 } 6814 6815 static int i945_get_display_clock_speed(struct drm_device *dev) 6816 { 6817 return 400000; 6818 } 6819 6820 static int i915_get_display_clock_speed(struct drm_device *dev) 6821 { 6822 return 333333; 6823 } 6824 6825 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6826 { 6827 return 200000; 6828 } 6829 6830 static int pnv_get_display_clock_speed(struct drm_device *dev) 6831 { 6832 u16 gcfgc = 0; 6833 6834 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6835 6836 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6837 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6838 return 266667; 6839 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6840 return 333333; 6841 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6842 return 444444; 6843 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6844 return 200000; 6845 default: 6846 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6847 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6848 return 133333; 6849 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6850 return 166667; 6851 } 6852 } 6853 6854 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6855 { 6856 u16 gcfgc = 0; 6857 6858 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6859 6860 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6861 return 133333; 6862 else { 6863 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6864 case GC_DISPLAY_CLOCK_333_MHZ: 6865 return 333333; 6866 default: 6867 case GC_DISPLAY_CLOCK_190_200_MHZ: 6868 return 190000; 6869 } 6870 } 6871 } 6872 6873 static int i865_get_display_clock_speed(struct drm_device *dev) 6874 { 6875 return 266667; 6876 } 6877 6878 static int i85x_get_display_clock_speed(struct drm_device *dev) 6879 { 6880 u16 hpllcc = 0; 6881 6882 /* 6883 * 852GM/852GMV only supports 133 MHz and the HPLLCC 6884 * encoding is different :( 6885 * FIXME is this the right way to detect 852GM/852GMV? 6886 */ 6887 if (dev->pdev->revision == 0x1) 6888 return 133333; 6889 6890 #if 0 6891 pci_bus_read_config_word(dev->pdev->bus, 6892 PCI_DEVFN(0, 3), HPLLCC, &hpllcc); 6893 #endif 6894 6895 /* Assume that the hardware is in the high speed state. This 6896 * should be the default. 6897 */ 6898 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6899 case GC_CLOCK_133_200: 6900 case GC_CLOCK_133_200_2: 6901 case GC_CLOCK_100_200: 6902 return 200000; 6903 case GC_CLOCK_166_250: 6904 return 250000; 6905 case GC_CLOCK_100_133: 6906 return 133333; 6907 case GC_CLOCK_133_266: 6908 case GC_CLOCK_133_266_2: 6909 case GC_CLOCK_166_266: 6910 return 266667; 6911 } 6912 6913 /* Shouldn't happen */ 6914 return 0; 6915 } 6916 6917 static int i830_get_display_clock_speed(struct drm_device *dev) 6918 { 6919 return 133333; 6920 } 6921 6922 static unsigned int intel_hpll_vco(struct drm_device *dev) 6923 { 6924 struct drm_i915_private *dev_priv = dev->dev_private; 6925 static const unsigned int blb_vco[8] = { 6926 [0] = 3200000, 6927 [1] = 4000000, 6928 [2] = 5333333, 6929 [3] = 4800000, 6930 [4] = 6400000, 6931 }; 6932 static const unsigned int pnv_vco[8] = { 6933 [0] = 3200000, 6934 [1] = 4000000, 6935 [2] = 5333333, 6936 [3] = 4800000, 6937 [4] = 2666667, 6938 }; 6939 static const unsigned int cl_vco[8] = { 6940 [0] = 3200000, 6941 [1] = 4000000, 6942 [2] = 5333333, 6943 [3] = 6400000, 6944 [4] = 3333333, 6945 [5] = 3566667, 6946 [6] = 4266667, 6947 }; 6948 static const unsigned int elk_vco[8] = { 6949 [0] = 3200000, 6950 [1] = 4000000, 6951 [2] = 5333333, 6952 [3] = 4800000, 6953 }; 6954 static const unsigned int ctg_vco[8] = { 6955 [0] = 3200000, 6956 [1] = 4000000, 6957 [2] = 5333333, 6958 [3] = 6400000, 6959 [4] = 2666667, 6960 [5] = 4266667, 6961 }; 6962 const unsigned int *vco_table; 6963 unsigned int vco; 6964 uint8_t tmp = 0; 6965 6966 /* FIXME other chipsets? */ 6967 if (IS_GM45(dev)) 6968 vco_table = ctg_vco; 6969 else if (IS_G4X(dev)) 6970 vco_table = elk_vco; 6971 else if (IS_CRESTLINE(dev)) 6972 vco_table = cl_vco; 6973 else if (IS_PINEVIEW(dev)) 6974 vco_table = pnv_vco; 6975 else if (IS_G33(dev)) 6976 vco_table = blb_vco; 6977 else 6978 return 0; 6979 6980 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO); 6981 6982 vco = vco_table[tmp & 0x7]; 6983 if (vco == 0) 6984 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp); 6985 else 6986 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco); 6987 6988 return vco; 6989 } 6990 6991 static int gm45_get_display_clock_speed(struct drm_device *dev) 6992 { 6993 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 6994 uint16_t tmp = 0; 6995 6996 pci_read_config_word(dev->pdev, GCFGC, &tmp); 6997 6998 cdclk_sel = (tmp >> 12) & 0x1; 6999 7000 switch (vco) { 7001 case 2666667: 7002 case 4000000: 7003 case 5333333: 7004 return cdclk_sel ? 333333 : 222222; 7005 case 3200000: 7006 return cdclk_sel ? 320000 : 228571; 7007 default: 7008 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp); 7009 return 222222; 7010 } 7011 } 7012 7013 static int i965gm_get_display_clock_speed(struct drm_device *dev) 7014 { 7015 static const uint8_t div_3200[] = { 16, 10, 8 }; 7016 static const uint8_t div_4000[] = { 20, 12, 10 }; 7017 static const uint8_t div_5333[] = { 24, 16, 14 }; 7018 const uint8_t *div_table; 7019 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7020 uint16_t tmp = 0; 7021 7022 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7023 7024 cdclk_sel = ((tmp >> 8) & 0x1f) - 1; 7025 7026 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7027 goto fail; 7028 7029 switch (vco) { 7030 case 3200000: 7031 div_table = div_3200; 7032 break; 7033 case 4000000: 7034 div_table = div_4000; 7035 break; 7036 case 5333333: 7037 div_table = div_5333; 7038 break; 7039 default: 7040 goto fail; 7041 } 7042 7043 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7044 7045 fail: 7046 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp); 7047 return 200000; 7048 } 7049 7050 static int g33_get_display_clock_speed(struct drm_device *dev) 7051 { 7052 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 }; 7053 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 }; 7054 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 }; 7055 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 }; 7056 const uint8_t *div_table; 7057 unsigned int cdclk_sel, vco = intel_hpll_vco(dev); 7058 uint16_t tmp = 0; 7059 7060 pci_read_config_word(dev->pdev, GCFGC, &tmp); 7061 7062 cdclk_sel = (tmp >> 4) & 0x7; 7063 7064 if (cdclk_sel >= ARRAY_SIZE(div_3200)) 7065 goto fail; 7066 7067 switch (vco) { 7068 case 3200000: 7069 div_table = div_3200; 7070 break; 7071 case 4000000: 7072 div_table = div_4000; 7073 break; 7074 case 4800000: 7075 div_table = div_4800; 7076 break; 7077 case 5333333: 7078 div_table = div_5333; 7079 break; 7080 default: 7081 goto fail; 7082 } 7083 7084 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]); 7085 7086 fail: 7087 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp); 7088 return 190476; 7089 } 7090 7091 static void 7092 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 7093 { 7094 while (*num > DATA_LINK_M_N_MASK || 7095 *den > DATA_LINK_M_N_MASK) { 7096 *num >>= 1; 7097 *den >>= 1; 7098 } 7099 } 7100 7101 static void compute_m_n(unsigned int m, unsigned int n, 7102 uint32_t *ret_m, uint32_t *ret_n) 7103 { 7104 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 7105 *ret_m = div_u64((uint64_t) m * *ret_n, n); 7106 intel_reduce_m_n_ratio(ret_m, ret_n); 7107 } 7108 7109 void 7110 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 7111 int pixel_clock, int link_clock, 7112 struct intel_link_m_n *m_n) 7113 { 7114 m_n->tu = 64; 7115 7116 compute_m_n(bits_per_pixel * pixel_clock, 7117 link_clock * nlanes * 8, 7118 &m_n->gmch_m, &m_n->gmch_n); 7119 7120 compute_m_n(pixel_clock, link_clock, 7121 &m_n->link_m, &m_n->link_n); 7122 } 7123 7124 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 7125 { 7126 if (i915.panel_use_ssc >= 0) 7127 return i915.panel_use_ssc != 0; 7128 return dev_priv->vbt.lvds_use_ssc 7129 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 7130 } 7131 7132 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 7133 int num_connectors) 7134 { 7135 struct drm_device *dev = crtc_state->base.crtc->dev; 7136 struct drm_i915_private *dev_priv = dev->dev_private; 7137 int refclk; 7138 7139 WARN_ON(!crtc_state->base.state); 7140 7141 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || IS_BROXTON(dev)) { 7142 refclk = 100000; 7143 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7144 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 7145 refclk = dev_priv->vbt.lvds_ssc_freq; 7146 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7147 } else if (!IS_GEN2(dev)) { 7148 refclk = 96000; 7149 } else { 7150 refclk = 48000; 7151 } 7152 7153 return refclk; 7154 } 7155 7156 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 7157 { 7158 return (1 << dpll->n) << 16 | dpll->m2; 7159 } 7160 7161 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 7162 { 7163 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 7164 } 7165 7166 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7167 struct intel_crtc_state *crtc_state, 7168 intel_clock_t *reduced_clock) 7169 { 7170 struct drm_device *dev = crtc->base.dev; 7171 u32 fp, fp2 = 0; 7172 7173 if (IS_PINEVIEW(dev)) { 7174 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 7175 if (reduced_clock) 7176 fp2 = pnv_dpll_compute_fp(reduced_clock); 7177 } else { 7178 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7179 if (reduced_clock) 7180 fp2 = i9xx_dpll_compute_fp(reduced_clock); 7181 } 7182 7183 crtc_state->dpll_hw_state.fp0 = fp; 7184 7185 crtc->lowfreq_avail = false; 7186 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7187 reduced_clock) { 7188 crtc_state->dpll_hw_state.fp1 = fp2; 7189 crtc->lowfreq_avail = true; 7190 } else { 7191 crtc_state->dpll_hw_state.fp1 = fp; 7192 } 7193 } 7194 7195 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 7196 pipe) 7197 { 7198 u32 reg_val; 7199 7200 /* 7201 * PLLB opamp always calibrates to max value of 0x3f, force enable it 7202 * and set it to a reasonable value instead. 7203 */ 7204 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7205 reg_val &= 0xffffff00; 7206 reg_val |= 0x00000030; 7207 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7208 7209 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7210 reg_val &= 0x8cffffff; 7211 reg_val = 0x8c000000; 7212 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7213 7214 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 7215 reg_val &= 0xffffff00; 7216 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 7217 7218 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 7219 reg_val &= 0x00ffffff; 7220 reg_val |= 0xb0000000; 7221 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 7222 } 7223 7224 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 7225 struct intel_link_m_n *m_n) 7226 { 7227 struct drm_device *dev = crtc->base.dev; 7228 struct drm_i915_private *dev_priv = dev->dev_private; 7229 int pipe = crtc->pipe; 7230 7231 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7232 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 7233 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 7234 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 7235 } 7236 7237 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 7238 struct intel_link_m_n *m_n, 7239 struct intel_link_m_n *m2_n2) 7240 { 7241 struct drm_device *dev = crtc->base.dev; 7242 struct drm_i915_private *dev_priv = dev->dev_private; 7243 int pipe = crtc->pipe; 7244 enum transcoder transcoder = crtc->config->cpu_transcoder; 7245 7246 if (INTEL_INFO(dev)->gen >= 5) { 7247 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7248 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7249 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7250 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7251 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7252 * for gen < 8) and if DRRS is supported (to make sure the 7253 * registers are not unnecessarily accessed). 7254 */ 7255 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7256 crtc->config->has_drrs) { 7257 I915_WRITE(PIPE_DATA_M2(transcoder), 7258 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7259 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7260 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7261 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7262 } 7263 } else { 7264 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7265 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7266 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7267 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7268 } 7269 } 7270 7271 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7272 { 7273 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7274 7275 if (m_n == M1_N1) { 7276 dp_m_n = &crtc->config->dp_m_n; 7277 dp_m2_n2 = &crtc->config->dp_m2_n2; 7278 } else if (m_n == M2_N2) { 7279 7280 /* 7281 * M2_N2 registers are not supported. Hence m2_n2 divider value 7282 * needs to be programmed into M1_N1. 7283 */ 7284 dp_m_n = &crtc->config->dp_m2_n2; 7285 } else { 7286 DRM_ERROR("Unsupported divider value\n"); 7287 return; 7288 } 7289 7290 if (crtc->config->has_pch_encoder) 7291 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7292 else 7293 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7294 } 7295 7296 static void vlv_compute_dpll(struct intel_crtc *crtc, 7297 struct intel_crtc_state *pipe_config) 7298 { 7299 u32 dpll, dpll_md; 7300 7301 /* 7302 * Enable DPIO clock input. We should never disable the reference 7303 * clock for pipe B, since VGA hotplug / manual detection depends 7304 * on it. 7305 */ 7306 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REF_CLK_ENABLE_VLV | 7307 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_REF_CLK_VLV; 7308 /* We should never disable this, set it here for state tracking */ 7309 if (crtc->pipe == PIPE_B) 7310 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7311 dpll |= DPLL_VCO_ENABLE; 7312 pipe_config->dpll_hw_state.dpll = dpll; 7313 7314 dpll_md = (pipe_config->pixel_multiplier - 1) 7315 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7316 pipe_config->dpll_hw_state.dpll_md = dpll_md; 7317 } 7318 7319 static void vlv_prepare_pll(struct intel_crtc *crtc, 7320 const struct intel_crtc_state *pipe_config) 7321 { 7322 struct drm_device *dev = crtc->base.dev; 7323 struct drm_i915_private *dev_priv = dev->dev_private; 7324 int pipe = crtc->pipe; 7325 u32 mdiv; 7326 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7327 u32 coreclk, reg_val; 7328 7329 mutex_lock(&dev_priv->sb_lock); 7330 7331 bestn = pipe_config->dpll.n; 7332 bestm1 = pipe_config->dpll.m1; 7333 bestm2 = pipe_config->dpll.m2; 7334 bestp1 = pipe_config->dpll.p1; 7335 bestp2 = pipe_config->dpll.p2; 7336 7337 /* See eDP HDMI DPIO driver vbios notes doc */ 7338 7339 /* PLL B needs special handling */ 7340 if (pipe == PIPE_B) 7341 vlv_pllb_recal_opamp(dev_priv, pipe); 7342 7343 /* Set up Tx target for periodic Rcomp update */ 7344 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7345 7346 /* Disable target IRef on PLL */ 7347 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7348 reg_val &= 0x00ffffff; 7349 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7350 7351 /* Disable fast lock */ 7352 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7353 7354 /* Set idtafcrecal before PLL is enabled */ 7355 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7356 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7357 mdiv |= ((bestn << DPIO_N_SHIFT)); 7358 mdiv |= (1 << DPIO_K_SHIFT); 7359 7360 /* 7361 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7362 * but we don't support that). 7363 * Note: don't use the DAC post divider as it seems unstable. 7364 */ 7365 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7366 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7367 7368 mdiv |= DPIO_ENABLE_CALIBRATION; 7369 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7370 7371 /* Set HBR and RBR LPF coefficients */ 7372 if (pipe_config->port_clock == 162000 || 7373 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7374 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7375 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7376 0x009f0003); 7377 else 7378 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7379 0x00d0000f); 7380 7381 if (pipe_config->has_dp_encoder) { 7382 /* Use SSC source */ 7383 if (pipe == PIPE_A) 7384 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7385 0x0df40000); 7386 else 7387 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7388 0x0df70000); 7389 } else { /* HDMI or VGA */ 7390 /* Use bend source */ 7391 if (pipe == PIPE_A) 7392 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7393 0x0df70000); 7394 else 7395 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7396 0x0df40000); 7397 } 7398 7399 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7400 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7401 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7402 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 7403 coreclk |= 0x01000000; 7404 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7405 7406 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7407 mutex_unlock(&dev_priv->sb_lock); 7408 } 7409 7410 static void chv_compute_dpll(struct intel_crtc *crtc, 7411 struct intel_crtc_state *pipe_config) 7412 { 7413 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV | 7414 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 7415 DPLL_VCO_ENABLE; 7416 if (crtc->pipe != PIPE_A) 7417 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7418 7419 pipe_config->dpll_hw_state.dpll_md = 7420 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7421 } 7422 7423 static void chv_prepare_pll(struct intel_crtc *crtc, 7424 const struct intel_crtc_state *pipe_config) 7425 { 7426 struct drm_device *dev = crtc->base.dev; 7427 struct drm_i915_private *dev_priv = dev->dev_private; 7428 int pipe = crtc->pipe; 7429 i915_reg_t dpll_reg = DPLL(crtc->pipe); 7430 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7431 u32 loopfilter, tribuf_calcntr; 7432 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7433 u32 dpio_val; 7434 int vco; 7435 7436 bestn = pipe_config->dpll.n; 7437 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7438 bestm1 = pipe_config->dpll.m1; 7439 bestm2 = pipe_config->dpll.m2 >> 22; 7440 bestp1 = pipe_config->dpll.p1; 7441 bestp2 = pipe_config->dpll.p2; 7442 vco = pipe_config->dpll.vco; 7443 dpio_val = 0; 7444 loopfilter = 0; 7445 7446 /* 7447 * Enable Refclk and SSC 7448 */ 7449 I915_WRITE(dpll_reg, 7450 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7451 7452 mutex_lock(&dev_priv->sb_lock); 7453 7454 /* p1 and p2 divider */ 7455 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7456 5 << DPIO_CHV_S1_DIV_SHIFT | 7457 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7458 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7459 1 << DPIO_CHV_K_DIV_SHIFT); 7460 7461 /* Feedback post-divider - m2 */ 7462 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7463 7464 /* Feedback refclk divider - n and m1 */ 7465 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7466 DPIO_CHV_M1_DIV_BY_2 | 7467 1 << DPIO_CHV_N_DIV_SHIFT); 7468 7469 /* M2 fraction division */ 7470 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7471 7472 /* M2 fraction division enable */ 7473 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7474 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7475 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7476 if (bestm2_frac) 7477 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7478 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7479 7480 /* Program digital lock detect threshold */ 7481 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7482 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7483 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7484 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7485 if (!bestm2_frac) 7486 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7487 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7488 7489 /* Loop filter */ 7490 if (vco == 5400000) { 7491 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7492 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7493 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7494 tribuf_calcntr = 0x9; 7495 } else if (vco <= 6200000) { 7496 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7497 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7498 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7499 tribuf_calcntr = 0x9; 7500 } else if (vco <= 6480000) { 7501 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7502 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7503 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7504 tribuf_calcntr = 0x8; 7505 } else { 7506 /* Not supported. Apply the same limits as in the max case */ 7507 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7508 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7509 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7510 tribuf_calcntr = 0; 7511 } 7512 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7513 7514 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7515 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7516 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7517 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7518 7519 /* AFC Recal */ 7520 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7521 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7522 DPIO_AFC_RECAL); 7523 7524 mutex_unlock(&dev_priv->sb_lock); 7525 } 7526 7527 /** 7528 * vlv_force_pll_on - forcibly enable just the PLL 7529 * @dev_priv: i915 private structure 7530 * @pipe: pipe PLL to enable 7531 * @dpll: PLL configuration 7532 * 7533 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7534 * in cases where we need the PLL enabled even when @pipe is not going to 7535 * be enabled. 7536 */ 7537 int vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7538 const struct dpll *dpll) 7539 { 7540 struct intel_crtc *crtc = 7541 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7542 struct intel_crtc_state *pipe_config; 7543 7544 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 7545 if (!pipe_config) 7546 return -ENOMEM; 7547 7548 pipe_config->base.crtc = &crtc->base; 7549 pipe_config->pixel_multiplier = 1; 7550 pipe_config->dpll = *dpll; 7551 7552 if (IS_CHERRYVIEW(dev)) { 7553 chv_compute_dpll(crtc, pipe_config); 7554 chv_prepare_pll(crtc, pipe_config); 7555 chv_enable_pll(crtc, pipe_config); 7556 } else { 7557 vlv_compute_dpll(crtc, pipe_config); 7558 vlv_prepare_pll(crtc, pipe_config); 7559 vlv_enable_pll(crtc, pipe_config); 7560 } 7561 7562 kfree(pipe_config); 7563 7564 return 0; 7565 } 7566 7567 /** 7568 * vlv_force_pll_off - forcibly disable just the PLL 7569 * @dev_priv: i915 private structure 7570 * @pipe: pipe PLL to disable 7571 * 7572 * Disable the PLL for @pipe. To be used in cases where we need 7573 * the PLL enabled even when @pipe is not going to be enabled. 7574 */ 7575 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7576 { 7577 if (IS_CHERRYVIEW(dev)) 7578 chv_disable_pll(to_i915(dev), pipe); 7579 else 7580 vlv_disable_pll(to_i915(dev), pipe); 7581 } 7582 7583 static void i9xx_compute_dpll(struct intel_crtc *crtc, 7584 struct intel_crtc_state *crtc_state, 7585 intel_clock_t *reduced_clock, 7586 int num_connectors) 7587 { 7588 struct drm_device *dev = crtc->base.dev; 7589 struct drm_i915_private *dev_priv = dev->dev_private; 7590 u32 dpll; 7591 bool is_sdvo; 7592 struct dpll *clock = &crtc_state->dpll; 7593 7594 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7595 7596 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || 7597 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); 7598 7599 dpll = DPLL_VGA_MODE_DIS; 7600 7601 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7602 dpll |= DPLLB_MODE_LVDS; 7603 else 7604 dpll |= DPLLB_MODE_DAC_SERIAL; 7605 7606 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7607 dpll |= (crtc_state->pixel_multiplier - 1) 7608 << SDVO_MULTIPLIER_SHIFT_HIRES; 7609 } 7610 7611 if (is_sdvo) 7612 dpll |= DPLL_SDVO_HIGH_SPEED; 7613 7614 if (crtc_state->has_dp_encoder) 7615 dpll |= DPLL_SDVO_HIGH_SPEED; 7616 7617 /* compute bitmask from p1 value */ 7618 if (IS_PINEVIEW(dev)) 7619 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7620 else { 7621 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7622 if (IS_G4X(dev) && reduced_clock) 7623 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7624 } 7625 switch (clock->p2) { 7626 case 5: 7627 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7628 break; 7629 case 7: 7630 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7631 break; 7632 case 10: 7633 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7634 break; 7635 case 14: 7636 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7637 break; 7638 } 7639 if (INTEL_INFO(dev)->gen >= 4) 7640 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7641 7642 if (crtc_state->sdvo_tv_clock) 7643 dpll |= PLL_REF_INPUT_TVCLKINBC; 7644 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7645 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7646 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7647 else 7648 dpll |= PLL_REF_INPUT_DREFCLK; 7649 7650 dpll |= DPLL_VCO_ENABLE; 7651 crtc_state->dpll_hw_state.dpll = dpll; 7652 7653 if (INTEL_INFO(dev)->gen >= 4) { 7654 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7655 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7656 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7657 } 7658 } 7659 7660 static void i8xx_compute_dpll(struct intel_crtc *crtc, 7661 struct intel_crtc_state *crtc_state, 7662 intel_clock_t *reduced_clock, 7663 int num_connectors) 7664 { 7665 struct drm_device *dev = crtc->base.dev; 7666 struct drm_i915_private *dev_priv = dev->dev_private; 7667 u32 dpll; 7668 struct dpll *clock = &crtc_state->dpll; 7669 7670 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7671 7672 dpll = DPLL_VGA_MODE_DIS; 7673 7674 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7675 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7676 } else { 7677 if (clock->p1 == 2) 7678 dpll |= PLL_P1_DIVIDE_BY_TWO; 7679 else 7680 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7681 if (clock->p2 == 4) 7682 dpll |= PLL_P2_DIVIDE_BY_4; 7683 } 7684 7685 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7686 dpll |= DPLL_DVO_2X_MODE; 7687 7688 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7689 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7690 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7691 else 7692 dpll |= PLL_REF_INPUT_DREFCLK; 7693 7694 dpll |= DPLL_VCO_ENABLE; 7695 crtc_state->dpll_hw_state.dpll = dpll; 7696 } 7697 7698 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7699 { 7700 struct drm_device *dev = intel_crtc->base.dev; 7701 struct drm_i915_private *dev_priv = dev->dev_private; 7702 enum i915_pipe pipe = intel_crtc->pipe; 7703 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7704 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7705 uint32_t crtc_vtotal, crtc_vblank_end; 7706 int vsyncshift = 0; 7707 7708 /* We need to be careful not to changed the adjusted mode, for otherwise 7709 * the hw state checker will get angry at the mismatch. */ 7710 crtc_vtotal = adjusted_mode->crtc_vtotal; 7711 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7712 7713 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7714 /* the chip adds 2 halflines automatically */ 7715 crtc_vtotal -= 1; 7716 crtc_vblank_end -= 1; 7717 7718 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7719 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7720 else 7721 vsyncshift = adjusted_mode->crtc_hsync_start - 7722 adjusted_mode->crtc_htotal / 2; 7723 if (vsyncshift < 0) 7724 vsyncshift += adjusted_mode->crtc_htotal; 7725 } 7726 7727 if (INTEL_INFO(dev)->gen > 3) 7728 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7729 7730 I915_WRITE(HTOTAL(cpu_transcoder), 7731 (adjusted_mode->crtc_hdisplay - 1) | 7732 ((adjusted_mode->crtc_htotal - 1) << 16)); 7733 I915_WRITE(HBLANK(cpu_transcoder), 7734 (adjusted_mode->crtc_hblank_start - 1) | 7735 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7736 I915_WRITE(HSYNC(cpu_transcoder), 7737 (adjusted_mode->crtc_hsync_start - 1) | 7738 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7739 7740 I915_WRITE(VTOTAL(cpu_transcoder), 7741 (adjusted_mode->crtc_vdisplay - 1) | 7742 ((crtc_vtotal - 1) << 16)); 7743 I915_WRITE(VBLANK(cpu_transcoder), 7744 (adjusted_mode->crtc_vblank_start - 1) | 7745 ((crtc_vblank_end - 1) << 16)); 7746 I915_WRITE(VSYNC(cpu_transcoder), 7747 (adjusted_mode->crtc_vsync_start - 1) | 7748 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7749 7750 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7751 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7752 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7753 * bits. */ 7754 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7755 (pipe == PIPE_B || pipe == PIPE_C)) 7756 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7757 7758 /* pipesrc controls the size that is scaled from, which should 7759 * always be the user's requested size. 7760 */ 7761 I915_WRITE(PIPESRC(pipe), 7762 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7763 (intel_crtc->config->pipe_src_h - 1)); 7764 } 7765 7766 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7767 struct intel_crtc_state *pipe_config) 7768 { 7769 struct drm_device *dev = crtc->base.dev; 7770 struct drm_i915_private *dev_priv = dev->dev_private; 7771 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7772 uint32_t tmp; 7773 7774 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7775 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7776 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7777 tmp = I915_READ(HBLANK(cpu_transcoder)); 7778 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7779 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7780 tmp = I915_READ(HSYNC(cpu_transcoder)); 7781 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7782 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7783 7784 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7785 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7786 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7787 tmp = I915_READ(VBLANK(cpu_transcoder)); 7788 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7789 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7790 tmp = I915_READ(VSYNC(cpu_transcoder)); 7791 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7792 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7793 7794 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7795 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7796 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7797 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7798 } 7799 7800 tmp = I915_READ(PIPESRC(crtc->pipe)); 7801 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7802 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7803 7804 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7805 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7806 } 7807 7808 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7809 struct intel_crtc_state *pipe_config) 7810 { 7811 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7812 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7813 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7814 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7815 7816 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7817 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7818 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7819 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7820 7821 mode->flags = pipe_config->base.adjusted_mode.flags; 7822 mode->type = DRM_MODE_TYPE_DRIVER; 7823 7824 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7825 mode->flags |= pipe_config->base.adjusted_mode.flags; 7826 7827 mode->hsync = drm_mode_hsync(mode); 7828 mode->vrefresh = drm_mode_vrefresh(mode); 7829 drm_mode_set_name(mode); 7830 } 7831 7832 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7833 { 7834 struct drm_device *dev = intel_crtc->base.dev; 7835 struct drm_i915_private *dev_priv = dev->dev_private; 7836 uint32_t pipeconf; 7837 7838 pipeconf = 0; 7839 7840 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7841 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7842 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7843 7844 if (intel_crtc->config->double_wide) 7845 pipeconf |= PIPECONF_DOUBLE_WIDE; 7846 7847 /* only g4x and later have fancy bpc/dither controls */ 7848 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 7849 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7850 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7851 pipeconf |= PIPECONF_DITHER_EN | 7852 PIPECONF_DITHER_TYPE_SP; 7853 7854 switch (intel_crtc->config->pipe_bpp) { 7855 case 18: 7856 pipeconf |= PIPECONF_6BPC; 7857 break; 7858 case 24: 7859 pipeconf |= PIPECONF_8BPC; 7860 break; 7861 case 30: 7862 pipeconf |= PIPECONF_10BPC; 7863 break; 7864 default: 7865 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7866 BUG(); 7867 } 7868 } 7869 7870 if (HAS_PIPE_CXSR(dev)) { 7871 if (intel_crtc->lowfreq_avail) { 7872 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7873 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7874 } else { 7875 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7876 } 7877 } 7878 7879 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7880 if (INTEL_INFO(dev)->gen < 4 || 7881 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7882 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7883 else 7884 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7885 } else 7886 pipeconf |= PIPECONF_PROGRESSIVE; 7887 7888 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 7889 intel_crtc->config->limited_color_range) 7890 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7891 7892 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7893 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7894 } 7895 7896 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7897 struct intel_crtc_state *crtc_state) 7898 { 7899 struct drm_device *dev = crtc->base.dev; 7900 struct drm_i915_private *dev_priv = dev->dev_private; 7901 int refclk, num_connectors = 0; 7902 intel_clock_t clock; 7903 bool ok; 7904 const intel_limit_t *limit; 7905 struct drm_atomic_state *state = crtc_state->base.state; 7906 struct drm_connector *connector; 7907 struct drm_connector_state *connector_state; 7908 int i; 7909 7910 memset(&crtc_state->dpll_hw_state, 0, 7911 sizeof(crtc_state->dpll_hw_state)); 7912 7913 if (crtc_state->has_dsi_encoder) 7914 return 0; 7915 7916 for_each_connector_in_state(state, connector, connector_state, i) { 7917 if (connector_state->crtc == &crtc->base) 7918 num_connectors++; 7919 } 7920 7921 if (!crtc_state->clock_set) { 7922 refclk = i9xx_get_refclk(crtc_state, num_connectors); 7923 7924 /* 7925 * Returns a set of divisors for the desired target clock with 7926 * the given refclk, or FALSE. The returned values represent 7927 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 7928 * 2) / p1 / p2. 7929 */ 7930 limit = intel_limit(crtc_state, refclk); 7931 ok = dev_priv->display.find_dpll(limit, crtc_state, 7932 crtc_state->port_clock, 7933 refclk, NULL, &clock); 7934 if (!ok) { 7935 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7936 return -EINVAL; 7937 } 7938 7939 /* Compat-code for transition, will disappear. */ 7940 crtc_state->dpll.n = clock.n; 7941 crtc_state->dpll.m1 = clock.m1; 7942 crtc_state->dpll.m2 = clock.m2; 7943 crtc_state->dpll.p1 = clock.p1; 7944 crtc_state->dpll.p2 = clock.p2; 7945 } 7946 7947 if (IS_GEN2(dev)) { 7948 i8xx_compute_dpll(crtc, crtc_state, NULL, 7949 num_connectors); 7950 } else if (IS_CHERRYVIEW(dev)) { 7951 chv_compute_dpll(crtc, crtc_state); 7952 } else if (IS_VALLEYVIEW(dev)) { 7953 vlv_compute_dpll(crtc, crtc_state); 7954 } else { 7955 i9xx_compute_dpll(crtc, crtc_state, NULL, 7956 num_connectors); 7957 } 7958 7959 return 0; 7960 } 7961 7962 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7963 struct intel_crtc_state *pipe_config) 7964 { 7965 struct drm_device *dev = crtc->base.dev; 7966 struct drm_i915_private *dev_priv = dev->dev_private; 7967 uint32_t tmp; 7968 7969 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 7970 return; 7971 7972 tmp = I915_READ(PFIT_CONTROL); 7973 if (!(tmp & PFIT_ENABLE)) 7974 return; 7975 7976 /* Check whether the pfit is attached to our pipe. */ 7977 if (INTEL_INFO(dev)->gen < 4) { 7978 if (crtc->pipe != PIPE_B) 7979 return; 7980 } else { 7981 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7982 return; 7983 } 7984 7985 pipe_config->gmch_pfit.control = tmp; 7986 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7987 } 7988 7989 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7990 struct intel_crtc_state *pipe_config) 7991 { 7992 struct drm_device *dev = crtc->base.dev; 7993 struct drm_i915_private *dev_priv = dev->dev_private; 7994 int pipe = pipe_config->cpu_transcoder; 7995 intel_clock_t clock; 7996 u32 mdiv; 7997 int refclk = 100000; 7998 7999 /* In case of MIPI DPLL will not even be used */ 8000 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 8001 return; 8002 8003 mutex_lock(&dev_priv->sb_lock); 8004 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 8005 mutex_unlock(&dev_priv->sb_lock); 8006 8007 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 8008 clock.m2 = mdiv & DPIO_M2DIV_MASK; 8009 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 8010 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 8011 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 8012 8013 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock); 8014 } 8015 8016 static void 8017 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 8018 struct intel_initial_plane_config *plane_config) 8019 { 8020 struct drm_device *dev = crtc->base.dev; 8021 struct drm_i915_private *dev_priv = dev->dev_private; 8022 u32 val, base, offset; 8023 int pipe = crtc->pipe, plane = crtc->plane; 8024 int fourcc, pixel_format; 8025 unsigned int aligned_height; 8026 struct drm_framebuffer *fb; 8027 struct intel_framebuffer *intel_fb; 8028 8029 val = I915_READ(DSPCNTR(plane)); 8030 if (!(val & DISPLAY_PLANE_ENABLE)) 8031 return; 8032 8033 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8034 if (!intel_fb) { 8035 DRM_DEBUG_KMS("failed to alloc fb\n"); 8036 return; 8037 } 8038 8039 fb = &intel_fb->base; 8040 8041 if (INTEL_INFO(dev)->gen >= 4) { 8042 if (val & DISPPLANE_TILED) { 8043 plane_config->tiling = I915_TILING_X; 8044 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8045 } 8046 } 8047 8048 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8049 fourcc = i9xx_format_to_fourcc(pixel_format); 8050 fb->pixel_format = fourcc; 8051 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8052 8053 if (INTEL_INFO(dev)->gen >= 4) { 8054 if (plane_config->tiling) 8055 offset = I915_READ(DSPTILEOFF(plane)); 8056 else 8057 offset = I915_READ(DSPLINOFF(plane)); 8058 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 8059 } else { 8060 base = I915_READ(DSPADDR(plane)); 8061 } 8062 plane_config->base = base; 8063 8064 val = I915_READ(PIPESRC(pipe)); 8065 fb->width = ((val >> 16) & 0xfff) + 1; 8066 fb->height = ((val >> 0) & 0xfff) + 1; 8067 8068 val = I915_READ(DSPSTRIDE(pipe)); 8069 fb->pitches[0] = val & 0xffffffc0; 8070 8071 aligned_height = intel_fb_align_height(dev, fb->height, 8072 fb->pixel_format, 8073 fb->modifier[0]); 8074 8075 plane_config->size = fb->pitches[0] * aligned_height; 8076 8077 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8078 pipe_name(pipe), plane, fb->width, fb->height, 8079 fb->bits_per_pixel, base, fb->pitches[0], 8080 plane_config->size); 8081 8082 plane_config->fb = intel_fb; 8083 } 8084 8085 static void chv_crtc_clock_get(struct intel_crtc *crtc, 8086 struct intel_crtc_state *pipe_config) 8087 { 8088 struct drm_device *dev = crtc->base.dev; 8089 struct drm_i915_private *dev_priv = dev->dev_private; 8090 int pipe = pipe_config->cpu_transcoder; 8091 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8092 intel_clock_t clock; 8093 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8094 int refclk = 100000; 8095 8096 mutex_lock(&dev_priv->sb_lock); 8097 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 8098 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 8099 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 8100 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 8101 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 8102 mutex_unlock(&dev_priv->sb_lock); 8103 8104 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 8105 clock.m2 = (pll_dw0 & 0xff) << 22; 8106 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 8107 clock.m2 |= pll_dw2 & 0x3fffff; 8108 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 8109 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 8110 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 8111 8112 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); 8113 } 8114 8115 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 8116 struct intel_crtc_state *pipe_config) 8117 { 8118 struct drm_device *dev = crtc->base.dev; 8119 struct drm_i915_private *dev_priv = dev->dev_private; 8120 enum intel_display_power_domain power_domain; 8121 uint32_t tmp; 8122 bool ret; 8123 8124 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 8125 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 8126 return false; 8127 8128 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8129 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8130 8131 ret = false; 8132 8133 tmp = I915_READ(PIPECONF(crtc->pipe)); 8134 if (!(tmp & PIPECONF_ENABLE)) 8135 goto out; 8136 8137 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 8138 switch (tmp & PIPECONF_BPC_MASK) { 8139 case PIPECONF_6BPC: 8140 pipe_config->pipe_bpp = 18; 8141 break; 8142 case PIPECONF_8BPC: 8143 pipe_config->pipe_bpp = 24; 8144 break; 8145 case PIPECONF_10BPC: 8146 pipe_config->pipe_bpp = 30; 8147 break; 8148 default: 8149 break; 8150 } 8151 } 8152 8153 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) && 8154 (tmp & PIPECONF_COLOR_RANGE_SELECT)) 8155 pipe_config->limited_color_range = true; 8156 8157 if (INTEL_INFO(dev)->gen < 4) 8158 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 8159 8160 intel_get_pipe_timings(crtc, pipe_config); 8161 8162 i9xx_get_pfit_config(crtc, pipe_config); 8163 8164 if (INTEL_INFO(dev)->gen >= 4) { 8165 tmp = I915_READ(DPLL_MD(crtc->pipe)); 8166 pipe_config->pixel_multiplier = 8167 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 8168 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 8169 pipe_config->dpll_hw_state.dpll_md = tmp; 8170 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 8171 tmp = I915_READ(DPLL(crtc->pipe)); 8172 pipe_config->pixel_multiplier = 8173 ((tmp & SDVO_MULTIPLIER_MASK) 8174 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 8175 } else { 8176 /* Note that on i915G/GM the pixel multiplier is in the sdvo 8177 * port and will be fixed up in the encoder->get_config 8178 * function. */ 8179 pipe_config->pixel_multiplier = 1; 8180 } 8181 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 8182 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 8183 /* 8184 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 8185 * on 830. Filter it out here so that we don't 8186 * report errors due to that. 8187 */ 8188 if (IS_I830(dev)) 8189 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 8190 8191 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 8192 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 8193 } else { 8194 /* Mask out read-only status bits. */ 8195 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 8196 DPLL_PORTC_READY_MASK | 8197 DPLL_PORTB_READY_MASK); 8198 } 8199 8200 if (IS_CHERRYVIEW(dev)) 8201 chv_crtc_clock_get(crtc, pipe_config); 8202 else if (IS_VALLEYVIEW(dev)) 8203 vlv_crtc_clock_get(crtc, pipe_config); 8204 else 8205 i9xx_crtc_clock_get(crtc, pipe_config); 8206 8207 /* 8208 * Normally the dotclock is filled in by the encoder .get_config() 8209 * but in case the pipe is enabled w/o any ports we need a sane 8210 * default. 8211 */ 8212 pipe_config->base.adjusted_mode.crtc_clock = 8213 pipe_config->port_clock / pipe_config->pixel_multiplier; 8214 8215 ret = true; 8216 8217 out: 8218 intel_display_power_put(dev_priv, power_domain); 8219 8220 return ret; 8221 } 8222 8223 static void ironlake_init_pch_refclk(struct drm_device *dev) 8224 { 8225 struct drm_i915_private *dev_priv = dev->dev_private; 8226 struct intel_encoder *encoder; 8227 u32 val, final; 8228 bool has_lvds = false; 8229 bool has_cpu_edp = false; 8230 bool has_panel = false; 8231 bool has_ck505 = false; 8232 bool can_ssc = false; 8233 8234 /* We need to take the global config into account */ 8235 for_each_intel_encoder(dev, encoder) { 8236 switch (encoder->type) { 8237 case INTEL_OUTPUT_LVDS: 8238 has_panel = true; 8239 has_lvds = true; 8240 break; 8241 case INTEL_OUTPUT_EDP: 8242 has_panel = true; 8243 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8244 has_cpu_edp = true; 8245 break; 8246 default: 8247 break; 8248 } 8249 } 8250 8251 if (HAS_PCH_IBX(dev)) { 8252 has_ck505 = dev_priv->vbt.display_clock_mode; 8253 can_ssc = has_ck505; 8254 } else { 8255 has_ck505 = false; 8256 can_ssc = true; 8257 } 8258 8259 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8260 has_panel, has_lvds, has_ck505); 8261 8262 /* Ironlake: try to setup display ref clock before DPLL 8263 * enabling. This is only under driver's control after 8264 * PCH B stepping, previous chipset stepping should be 8265 * ignoring this setting. 8266 */ 8267 val = I915_READ(PCH_DREF_CONTROL); 8268 8269 /* As we must carefully and slowly disable/enable each source in turn, 8270 * compute the final state we want first and check if we need to 8271 * make any changes at all. 8272 */ 8273 final = val; 8274 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8275 if (has_ck505) 8276 final |= DREF_NONSPREAD_CK505_ENABLE; 8277 else 8278 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8279 8280 final &= ~DREF_SSC_SOURCE_MASK; 8281 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8282 final &= ~DREF_SSC1_ENABLE; 8283 8284 if (has_panel) { 8285 final |= DREF_SSC_SOURCE_ENABLE; 8286 8287 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8288 final |= DREF_SSC1_ENABLE; 8289 8290 if (has_cpu_edp) { 8291 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8292 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8293 else 8294 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8295 } else 8296 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8297 } else { 8298 final |= DREF_SSC_SOURCE_DISABLE; 8299 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8300 } 8301 8302 if (final == val) 8303 return; 8304 8305 /* Always enable nonspread source */ 8306 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8307 8308 if (has_ck505) 8309 val |= DREF_NONSPREAD_CK505_ENABLE; 8310 else 8311 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8312 8313 if (has_panel) { 8314 val &= ~DREF_SSC_SOURCE_MASK; 8315 val |= DREF_SSC_SOURCE_ENABLE; 8316 8317 /* SSC must be turned on before enabling the CPU output */ 8318 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8319 DRM_DEBUG_KMS("Using SSC on panel\n"); 8320 val |= DREF_SSC1_ENABLE; 8321 } else 8322 val &= ~DREF_SSC1_ENABLE; 8323 8324 /* Get SSC going before enabling the outputs */ 8325 I915_WRITE(PCH_DREF_CONTROL, val); 8326 POSTING_READ(PCH_DREF_CONTROL); 8327 udelay(200); 8328 8329 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8330 8331 /* Enable CPU source on CPU attached eDP */ 8332 if (has_cpu_edp) { 8333 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8334 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8335 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8336 } else 8337 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8338 } else 8339 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8340 8341 I915_WRITE(PCH_DREF_CONTROL, val); 8342 POSTING_READ(PCH_DREF_CONTROL); 8343 udelay(200); 8344 } else { 8345 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8346 8347 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8348 8349 /* Turn off CPU output */ 8350 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8351 8352 I915_WRITE(PCH_DREF_CONTROL, val); 8353 POSTING_READ(PCH_DREF_CONTROL); 8354 udelay(200); 8355 8356 /* Turn off the SSC source */ 8357 val &= ~DREF_SSC_SOURCE_MASK; 8358 val |= DREF_SSC_SOURCE_DISABLE; 8359 8360 /* Turn off SSC1 */ 8361 val &= ~DREF_SSC1_ENABLE; 8362 8363 I915_WRITE(PCH_DREF_CONTROL, val); 8364 POSTING_READ(PCH_DREF_CONTROL); 8365 udelay(200); 8366 } 8367 8368 BUG_ON(val != final); 8369 } 8370 8371 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8372 { 8373 uint32_t tmp; 8374 8375 tmp = I915_READ(SOUTH_CHICKEN2); 8376 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8377 I915_WRITE(SOUTH_CHICKEN2, tmp); 8378 8379 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8380 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8381 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8382 8383 tmp = I915_READ(SOUTH_CHICKEN2); 8384 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8385 I915_WRITE(SOUTH_CHICKEN2, tmp); 8386 8387 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8388 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8389 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8390 } 8391 8392 /* WaMPhyProgramming:hsw */ 8393 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8394 { 8395 uint32_t tmp; 8396 8397 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8398 tmp &= ~(0xFF << 24); 8399 tmp |= (0x12 << 24); 8400 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8401 8402 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8403 tmp |= (1 << 11); 8404 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8405 8406 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8407 tmp |= (1 << 11); 8408 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8409 8410 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8411 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8412 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8413 8414 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8415 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8416 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8417 8418 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8419 tmp &= ~(7 << 13); 8420 tmp |= (5 << 13); 8421 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8422 8423 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8424 tmp &= ~(7 << 13); 8425 tmp |= (5 << 13); 8426 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8427 8428 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8429 tmp &= ~0xFF; 8430 tmp |= 0x1C; 8431 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8432 8433 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8434 tmp &= ~0xFF; 8435 tmp |= 0x1C; 8436 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8437 8438 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8439 tmp &= ~(0xFF << 16); 8440 tmp |= (0x1C << 16); 8441 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8442 8443 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8444 tmp &= ~(0xFF << 16); 8445 tmp |= (0x1C << 16); 8446 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8447 8448 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8449 tmp |= (1 << 27); 8450 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8451 8452 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8453 tmp |= (1 << 27); 8454 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8455 8456 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8457 tmp &= ~(0xF << 28); 8458 tmp |= (4 << 28); 8459 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8460 8461 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8462 tmp &= ~(0xF << 28); 8463 tmp |= (4 << 28); 8464 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8465 } 8466 8467 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8468 * Programming" based on the parameters passed: 8469 * - Sequence to enable CLKOUT_DP 8470 * - Sequence to enable CLKOUT_DP without spread 8471 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8472 */ 8473 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8474 bool with_fdi) 8475 { 8476 struct drm_i915_private *dev_priv = dev->dev_private; 8477 uint32_t reg, tmp; 8478 8479 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8480 with_spread = true; 8481 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n")) 8482 with_fdi = false; 8483 8484 mutex_lock(&dev_priv->sb_lock); 8485 8486 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8487 tmp &= ~SBI_SSCCTL_DISABLE; 8488 tmp |= SBI_SSCCTL_PATHALT; 8489 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8490 8491 udelay(24); 8492 8493 if (with_spread) { 8494 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8495 tmp &= ~SBI_SSCCTL_PATHALT; 8496 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8497 8498 if (with_fdi) { 8499 lpt_reset_fdi_mphy(dev_priv); 8500 lpt_program_fdi_mphy(dev_priv); 8501 } 8502 } 8503 8504 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8505 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8506 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8507 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8508 8509 mutex_unlock(&dev_priv->sb_lock); 8510 } 8511 8512 /* Sequence to disable CLKOUT_DP */ 8513 static void lpt_disable_clkout_dp(struct drm_device *dev) 8514 { 8515 struct drm_i915_private *dev_priv = dev->dev_private; 8516 uint32_t reg, tmp; 8517 8518 mutex_lock(&dev_priv->sb_lock); 8519 8520 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0; 8521 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8522 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8523 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8524 8525 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8526 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8527 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8528 tmp |= SBI_SSCCTL_PATHALT; 8529 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8530 udelay(32); 8531 } 8532 tmp |= SBI_SSCCTL_DISABLE; 8533 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8534 } 8535 8536 mutex_unlock(&dev_priv->sb_lock); 8537 } 8538 8539 #define BEND_IDX(steps) ((50 + (steps)) / 5) 8540 8541 static const uint16_t sscdivintphase[] = { 8542 [BEND_IDX( 50)] = 0x3B23, 8543 [BEND_IDX( 45)] = 0x3B23, 8544 [BEND_IDX( 40)] = 0x3C23, 8545 [BEND_IDX( 35)] = 0x3C23, 8546 [BEND_IDX( 30)] = 0x3D23, 8547 [BEND_IDX( 25)] = 0x3D23, 8548 [BEND_IDX( 20)] = 0x3E23, 8549 [BEND_IDX( 15)] = 0x3E23, 8550 [BEND_IDX( 10)] = 0x3F23, 8551 [BEND_IDX( 5)] = 0x3F23, 8552 [BEND_IDX( 0)] = 0x0025, 8553 [BEND_IDX( -5)] = 0x0025, 8554 [BEND_IDX(-10)] = 0x0125, 8555 [BEND_IDX(-15)] = 0x0125, 8556 [BEND_IDX(-20)] = 0x0225, 8557 [BEND_IDX(-25)] = 0x0225, 8558 [BEND_IDX(-30)] = 0x0325, 8559 [BEND_IDX(-35)] = 0x0325, 8560 [BEND_IDX(-40)] = 0x0425, 8561 [BEND_IDX(-45)] = 0x0425, 8562 [BEND_IDX(-50)] = 0x0525, 8563 }; 8564 8565 /* 8566 * Bend CLKOUT_DP 8567 * steps -50 to 50 inclusive, in steps of 5 8568 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz) 8569 * change in clock period = -(steps / 10) * 5.787 ps 8570 */ 8571 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps) 8572 { 8573 uint32_t tmp; 8574 int idx = BEND_IDX(steps); 8575 8576 if (WARN_ON(steps % 5 != 0)) 8577 return; 8578 8579 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase))) 8580 return; 8581 8582 mutex_lock(&dev_priv->sb_lock); 8583 8584 if (steps % 10 != 0) 8585 tmp = 0xAAAAAAAB; 8586 else 8587 tmp = 0x00000000; 8588 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK); 8589 8590 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK); 8591 tmp &= 0xffff0000; 8592 tmp |= sscdivintphase[idx]; 8593 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK); 8594 8595 mutex_unlock(&dev_priv->sb_lock); 8596 } 8597 8598 #undef BEND_IDX 8599 8600 static void lpt_init_pch_refclk(struct drm_device *dev) 8601 { 8602 struct intel_encoder *encoder; 8603 bool has_vga = false; 8604 8605 for_each_intel_encoder(dev, encoder) { 8606 switch (encoder->type) { 8607 case INTEL_OUTPUT_ANALOG: 8608 has_vga = true; 8609 break; 8610 default: 8611 break; 8612 } 8613 } 8614 8615 if (has_vga) { 8616 lpt_bend_clkout_dp(to_i915(dev), 0); 8617 lpt_enable_clkout_dp(dev, true, true); 8618 } else { 8619 lpt_disable_clkout_dp(dev); 8620 } 8621 } 8622 8623 /* 8624 * Initialize reference clocks when the driver loads 8625 */ 8626 void intel_init_pch_refclk(struct drm_device *dev) 8627 { 8628 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8629 ironlake_init_pch_refclk(dev); 8630 else if (HAS_PCH_LPT(dev)) 8631 lpt_init_pch_refclk(dev); 8632 } 8633 8634 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) 8635 { 8636 struct drm_device *dev = crtc_state->base.crtc->dev; 8637 struct drm_i915_private *dev_priv = dev->dev_private; 8638 struct drm_atomic_state *state = crtc_state->base.state; 8639 struct drm_connector *connector; 8640 struct drm_connector_state *connector_state; 8641 struct intel_encoder *encoder; 8642 int num_connectors = 0, i; 8643 bool is_lvds = false; 8644 8645 for_each_connector_in_state(state, connector, connector_state, i) { 8646 if (connector_state->crtc != crtc_state->base.crtc) 8647 continue; 8648 8649 encoder = to_intel_encoder(connector_state->best_encoder); 8650 8651 switch (encoder->type) { 8652 case INTEL_OUTPUT_LVDS: 8653 is_lvds = true; 8654 break; 8655 default: 8656 break; 8657 } 8658 num_connectors++; 8659 } 8660 8661 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 8662 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8663 dev_priv->vbt.lvds_ssc_freq); 8664 return dev_priv->vbt.lvds_ssc_freq; 8665 } 8666 8667 return 120000; 8668 } 8669 8670 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8671 { 8672 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8673 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8674 int pipe = intel_crtc->pipe; 8675 uint32_t val; 8676 8677 val = 0; 8678 8679 switch (intel_crtc->config->pipe_bpp) { 8680 case 18: 8681 val |= PIPECONF_6BPC; 8682 break; 8683 case 24: 8684 val |= PIPECONF_8BPC; 8685 break; 8686 case 30: 8687 val |= PIPECONF_10BPC; 8688 break; 8689 case 36: 8690 val |= PIPECONF_12BPC; 8691 break; 8692 default: 8693 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8694 BUG(); 8695 } 8696 8697 if (intel_crtc->config->dither) 8698 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8699 8700 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8701 val |= PIPECONF_INTERLACED_ILK; 8702 else 8703 val |= PIPECONF_PROGRESSIVE; 8704 8705 if (intel_crtc->config->limited_color_range) 8706 val |= PIPECONF_COLOR_RANGE_SELECT; 8707 8708 I915_WRITE(PIPECONF(pipe), val); 8709 POSTING_READ(PIPECONF(pipe)); 8710 } 8711 8712 /* 8713 * Set up the pipe CSC unit. 8714 * 8715 * Currently only full range RGB to limited range RGB conversion 8716 * is supported, but eventually this should handle various 8717 * RGB<->YCbCr scenarios as well. 8718 */ 8719 static void intel_set_pipe_csc(struct drm_crtc *crtc) 8720 { 8721 struct drm_device *dev = crtc->dev; 8722 struct drm_i915_private *dev_priv = dev->dev_private; 8723 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8724 int pipe = intel_crtc->pipe; 8725 uint16_t coeff = 0x7800; /* 1.0 */ 8726 8727 /* 8728 * TODO: Check what kind of values actually come out of the pipe 8729 * with these coeff/postoff values and adjust to get the best 8730 * accuracy. Perhaps we even need to take the bpc value into 8731 * consideration. 8732 */ 8733 8734 if (intel_crtc->config->limited_color_range) 8735 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 8736 8737 /* 8738 * GY/GU and RY/RU should be the other way around according 8739 * to BSpec, but reality doesn't agree. Just set them up in 8740 * a way that results in the correct picture. 8741 */ 8742 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 8743 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 8744 8745 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 8746 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 8747 8748 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 8749 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 8750 8751 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 8752 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 8753 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 8754 8755 if (INTEL_INFO(dev)->gen > 6) { 8756 uint16_t postoff = 0; 8757 8758 if (intel_crtc->config->limited_color_range) 8759 postoff = (16 * (1 << 12) / 255) & 0x1fff; 8760 8761 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 8762 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 8763 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 8764 8765 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 8766 } else { 8767 uint32_t mode = CSC_MODE_YUV_TO_RGB; 8768 8769 if (intel_crtc->config->limited_color_range) 8770 mode |= CSC_BLACK_SCREEN_OFFSET; 8771 8772 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 8773 } 8774 } 8775 8776 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8777 { 8778 struct drm_device *dev = crtc->dev; 8779 struct drm_i915_private *dev_priv = dev->dev_private; 8780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8781 enum i915_pipe pipe = intel_crtc->pipe; 8782 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8783 uint32_t val; 8784 8785 val = 0; 8786 8787 if (IS_HASWELL(dev) && intel_crtc->config->dither) 8788 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8789 8790 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8791 val |= PIPECONF_INTERLACED_ILK; 8792 else 8793 val |= PIPECONF_PROGRESSIVE; 8794 8795 I915_WRITE(PIPECONF(cpu_transcoder), val); 8796 POSTING_READ(PIPECONF(cpu_transcoder)); 8797 8798 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 8799 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 8800 8801 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 8802 val = 0; 8803 8804 switch (intel_crtc->config->pipe_bpp) { 8805 case 18: 8806 val |= PIPEMISC_DITHER_6_BPC; 8807 break; 8808 case 24: 8809 val |= PIPEMISC_DITHER_8_BPC; 8810 break; 8811 case 30: 8812 val |= PIPEMISC_DITHER_10_BPC; 8813 break; 8814 case 36: 8815 val |= PIPEMISC_DITHER_12_BPC; 8816 break; 8817 default: 8818 /* Case prevented by pipe_config_set_bpp. */ 8819 BUG(); 8820 } 8821 8822 if (intel_crtc->config->dither) 8823 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8824 8825 I915_WRITE(PIPEMISC(pipe), val); 8826 } 8827 } 8828 8829 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 8830 struct intel_crtc_state *crtc_state, 8831 intel_clock_t *clock, 8832 bool *has_reduced_clock, 8833 intel_clock_t *reduced_clock) 8834 { 8835 struct drm_device *dev = crtc->dev; 8836 struct drm_i915_private *dev_priv = dev->dev_private; 8837 int refclk; 8838 const intel_limit_t *limit; 8839 bool ret; 8840 8841 refclk = ironlake_get_refclk(crtc_state); 8842 8843 /* 8844 * Returns a set of divisors for the desired target clock with the given 8845 * refclk, or FALSE. The returned values represent the clock equation: 8846 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 8847 */ 8848 limit = intel_limit(crtc_state, refclk); 8849 ret = dev_priv->display.find_dpll(limit, crtc_state, 8850 crtc_state->port_clock, 8851 refclk, NULL, clock); 8852 if (!ret) 8853 return false; 8854 8855 return true; 8856 } 8857 8858 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8859 { 8860 /* 8861 * Account for spread spectrum to avoid 8862 * oversubscribing the link. Max center spread 8863 * is 2.5%; use 5% for safety's sake. 8864 */ 8865 u32 bps = target_clock * bpp * 21 / 20; 8866 return DIV_ROUND_UP(bps, link_bw * 8); 8867 } 8868 8869 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8870 { 8871 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8872 } 8873 8874 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8875 struct intel_crtc_state *crtc_state, 8876 u32 *fp, 8877 intel_clock_t *reduced_clock, u32 *fp2) 8878 { 8879 struct drm_crtc *crtc = &intel_crtc->base; 8880 struct drm_device *dev = crtc->dev; 8881 struct drm_i915_private *dev_priv = dev->dev_private; 8882 struct drm_atomic_state *state = crtc_state->base.state; 8883 struct drm_connector *connector; 8884 struct drm_connector_state *connector_state; 8885 struct intel_encoder *encoder; 8886 uint32_t dpll; 8887 int factor, num_connectors = 0, i; 8888 bool is_lvds = false, is_sdvo = false; 8889 8890 for_each_connector_in_state(state, connector, connector_state, i) { 8891 if (connector_state->crtc != crtc_state->base.crtc) 8892 continue; 8893 8894 encoder = to_intel_encoder(connector_state->best_encoder); 8895 8896 switch (encoder->type) { 8897 case INTEL_OUTPUT_LVDS: 8898 is_lvds = true; 8899 break; 8900 case INTEL_OUTPUT_SDVO: 8901 case INTEL_OUTPUT_HDMI: 8902 is_sdvo = true; 8903 break; 8904 default: 8905 break; 8906 } 8907 8908 num_connectors++; 8909 } 8910 8911 /* Enable autotuning of the PLL clock (if permissible) */ 8912 factor = 21; 8913 if (is_lvds) { 8914 if ((intel_panel_use_ssc(dev_priv) && 8915 dev_priv->vbt.lvds_ssc_freq == 100000) || 8916 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8917 factor = 25; 8918 } else if (crtc_state->sdvo_tv_clock) 8919 factor = 20; 8920 8921 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8922 *fp |= FP_CB_TUNE; 8923 8924 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 8925 *fp2 |= FP_CB_TUNE; 8926 8927 dpll = 0; 8928 8929 if (is_lvds) 8930 dpll |= DPLLB_MODE_LVDS; 8931 else 8932 dpll |= DPLLB_MODE_DAC_SERIAL; 8933 8934 dpll |= (crtc_state->pixel_multiplier - 1) 8935 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8936 8937 if (is_sdvo) 8938 dpll |= DPLL_SDVO_HIGH_SPEED; 8939 if (crtc_state->has_dp_encoder) 8940 dpll |= DPLL_SDVO_HIGH_SPEED; 8941 8942 /* compute bitmask from p1 value */ 8943 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8944 /* also FPA1 */ 8945 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8946 8947 switch (crtc_state->dpll.p2) { 8948 case 5: 8949 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8950 break; 8951 case 7: 8952 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8953 break; 8954 case 10: 8955 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8956 break; 8957 case 14: 8958 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8959 break; 8960 } 8961 8962 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 8963 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8964 else 8965 dpll |= PLL_REF_INPUT_DREFCLK; 8966 8967 return dpll | DPLL_VCO_ENABLE; 8968 } 8969 8970 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8971 struct intel_crtc_state *crtc_state) 8972 { 8973 struct drm_device *dev = crtc->base.dev; 8974 intel_clock_t clock, reduced_clock; 8975 u32 dpll = 0, fp = 0, fp2 = 0; 8976 bool ok, has_reduced_clock = false; 8977 bool is_lvds = false; 8978 struct intel_shared_dpll *pll; 8979 8980 memset(&crtc_state->dpll_hw_state, 0, 8981 sizeof(crtc_state->dpll_hw_state)); 8982 8983 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); 8984 8985 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 8986 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 8987 8988 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 8989 &has_reduced_clock, &reduced_clock); 8990 if (!ok && !crtc_state->clock_set) { 8991 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8992 return -EINVAL; 8993 } 8994 /* Compat-code for transition, will disappear. */ 8995 if (!crtc_state->clock_set) { 8996 crtc_state->dpll.n = clock.n; 8997 crtc_state->dpll.m1 = clock.m1; 8998 crtc_state->dpll.m2 = clock.m2; 8999 crtc_state->dpll.p1 = clock.p1; 9000 crtc_state->dpll.p2 = clock.p2; 9001 } 9002 9003 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 9004 if (crtc_state->has_pch_encoder) { 9005 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 9006 if (has_reduced_clock) 9007 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 9008 9009 dpll = ironlake_compute_dpll(crtc, crtc_state, 9010 &fp, &reduced_clock, 9011 has_reduced_clock ? &fp2 : NULL); 9012 9013 crtc_state->dpll_hw_state.dpll = dpll; 9014 crtc_state->dpll_hw_state.fp0 = fp; 9015 if (has_reduced_clock) 9016 crtc_state->dpll_hw_state.fp1 = fp2; 9017 else 9018 crtc_state->dpll_hw_state.fp1 = fp; 9019 9020 pll = intel_get_shared_dpll(crtc, crtc_state); 9021 if (pll == NULL) { 9022 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 9023 pipe_name(crtc->pipe)); 9024 return -EINVAL; 9025 } 9026 } 9027 9028 if (is_lvds && has_reduced_clock) 9029 crtc->lowfreq_avail = true; 9030 else 9031 crtc->lowfreq_avail = false; 9032 9033 return 0; 9034 } 9035 9036 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 9037 struct intel_link_m_n *m_n) 9038 { 9039 struct drm_device *dev = crtc->base.dev; 9040 struct drm_i915_private *dev_priv = dev->dev_private; 9041 enum i915_pipe pipe = crtc->pipe; 9042 9043 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9044 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 9045 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 9046 & ~TU_SIZE_MASK; 9047 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 9048 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 9049 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9050 } 9051 9052 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 9053 enum transcoder transcoder, 9054 struct intel_link_m_n *m_n, 9055 struct intel_link_m_n *m2_n2) 9056 { 9057 struct drm_device *dev = crtc->base.dev; 9058 struct drm_i915_private *dev_priv = dev->dev_private; 9059 enum i915_pipe pipe = crtc->pipe; 9060 9061 if (INTEL_INFO(dev)->gen >= 5) { 9062 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 9063 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 9064 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 9065 & ~TU_SIZE_MASK; 9066 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 9067 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 9068 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9069 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 9070 * gen < 8) and if DRRS is supported (to make sure the 9071 * registers are not unnecessarily read). 9072 */ 9073 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 9074 crtc->config->has_drrs) { 9075 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 9076 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 9077 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 9078 & ~TU_SIZE_MASK; 9079 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 9080 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 9081 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9082 } 9083 } else { 9084 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 9085 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 9086 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 9087 & ~TU_SIZE_MASK; 9088 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 9089 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 9090 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 9091 } 9092 } 9093 9094 void intel_dp_get_m_n(struct intel_crtc *crtc, 9095 struct intel_crtc_state *pipe_config) 9096 { 9097 if (pipe_config->has_pch_encoder) 9098 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 9099 else 9100 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9101 &pipe_config->dp_m_n, 9102 &pipe_config->dp_m2_n2); 9103 } 9104 9105 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 9106 struct intel_crtc_state *pipe_config) 9107 { 9108 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 9109 &pipe_config->fdi_m_n, NULL); 9110 } 9111 9112 static void skylake_get_pfit_config(struct intel_crtc *crtc, 9113 struct intel_crtc_state *pipe_config) 9114 { 9115 struct drm_device *dev = crtc->base.dev; 9116 struct drm_i915_private *dev_priv = dev->dev_private; 9117 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9118 uint32_t ps_ctrl = 0; 9119 int id = -1; 9120 int i; 9121 9122 /* find scaler attached to this pipe */ 9123 for (i = 0; i < crtc->num_scalers; i++) { 9124 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 9125 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 9126 id = i; 9127 pipe_config->pch_pfit.enabled = true; 9128 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 9129 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 9130 break; 9131 } 9132 } 9133 9134 scaler_state->scaler_id = id; 9135 if (id >= 0) { 9136 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 9137 } else { 9138 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 9139 } 9140 } 9141 9142 static void 9143 skylake_get_initial_plane_config(struct intel_crtc *crtc, 9144 struct intel_initial_plane_config *plane_config) 9145 { 9146 struct drm_device *dev = crtc->base.dev; 9147 struct drm_i915_private *dev_priv = dev->dev_private; 9148 u32 val, base, offset, stride_mult, tiling; 9149 int pipe = crtc->pipe; 9150 int fourcc, pixel_format; 9151 unsigned int aligned_height; 9152 struct drm_framebuffer *fb; 9153 struct intel_framebuffer *intel_fb; 9154 9155 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9156 if (!intel_fb) { 9157 DRM_DEBUG_KMS("failed to alloc fb\n"); 9158 return; 9159 } 9160 9161 fb = &intel_fb->base; 9162 9163 val = I915_READ(PLANE_CTL(pipe, 0)); 9164 if (!(val & PLANE_CTL_ENABLE)) 9165 goto error; 9166 9167 pixel_format = val & PLANE_CTL_FORMAT_MASK; 9168 fourcc = skl_format_to_fourcc(pixel_format, 9169 val & PLANE_CTL_ORDER_RGBX, 9170 val & PLANE_CTL_ALPHA_MASK); 9171 fb->pixel_format = fourcc; 9172 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9173 9174 tiling = val & PLANE_CTL_TILED_MASK; 9175 switch (tiling) { 9176 case PLANE_CTL_TILED_LINEAR: 9177 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 9178 break; 9179 case PLANE_CTL_TILED_X: 9180 plane_config->tiling = I915_TILING_X; 9181 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9182 break; 9183 case PLANE_CTL_TILED_Y: 9184 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 9185 break; 9186 case PLANE_CTL_TILED_YF: 9187 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 9188 break; 9189 default: 9190 MISSING_CASE(tiling); 9191 goto error; 9192 } 9193 9194 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 9195 plane_config->base = base; 9196 9197 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 9198 9199 val = I915_READ(PLANE_SIZE(pipe, 0)); 9200 fb->height = ((val >> 16) & 0xfff) + 1; 9201 fb->width = ((val >> 0) & 0x1fff) + 1; 9202 9203 val = I915_READ(PLANE_STRIDE(pipe, 0)); 9204 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0], 9205 fb->pixel_format); 9206 fb->pitches[0] = (val & 0x3ff) * stride_mult; 9207 9208 aligned_height = intel_fb_align_height(dev, fb->height, 9209 fb->pixel_format, 9210 fb->modifier[0]); 9211 9212 plane_config->size = fb->pitches[0] * aligned_height; 9213 9214 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9215 pipe_name(pipe), fb->width, fb->height, 9216 fb->bits_per_pixel, base, fb->pitches[0], 9217 plane_config->size); 9218 9219 plane_config->fb = intel_fb; 9220 return; 9221 9222 error: 9223 kfree(fb); 9224 } 9225 9226 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 9227 struct intel_crtc_state *pipe_config) 9228 { 9229 struct drm_device *dev = crtc->base.dev; 9230 struct drm_i915_private *dev_priv = dev->dev_private; 9231 uint32_t tmp; 9232 9233 tmp = I915_READ(PF_CTL(crtc->pipe)); 9234 9235 if (tmp & PF_ENABLE) { 9236 pipe_config->pch_pfit.enabled = true; 9237 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 9238 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 9239 9240 /* We currently do not free assignements of panel fitters on 9241 * ivb/hsw (since we don't use the higher upscaling modes which 9242 * differentiates them) so just WARN about this case for now. */ 9243 if (IS_GEN7(dev)) { 9244 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 9245 PF_PIPE_SEL_IVB(crtc->pipe)); 9246 } 9247 } 9248 } 9249 9250 static void 9251 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 9252 struct intel_initial_plane_config *plane_config) 9253 { 9254 struct drm_device *dev = crtc->base.dev; 9255 struct drm_i915_private *dev_priv = dev->dev_private; 9256 u32 val, base, offset; 9257 int pipe = crtc->pipe; 9258 int fourcc, pixel_format; 9259 unsigned int aligned_height; 9260 struct drm_framebuffer *fb; 9261 struct intel_framebuffer *intel_fb; 9262 9263 val = I915_READ(DSPCNTR(pipe)); 9264 if (!(val & DISPLAY_PLANE_ENABLE)) 9265 return; 9266 9267 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9268 if (!intel_fb) { 9269 DRM_DEBUG_KMS("failed to alloc fb\n"); 9270 return; 9271 } 9272 9273 fb = &intel_fb->base; 9274 9275 if (INTEL_INFO(dev)->gen >= 4) { 9276 if (val & DISPPLANE_TILED) { 9277 plane_config->tiling = I915_TILING_X; 9278 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9279 } 9280 } 9281 9282 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9283 fourcc = i9xx_format_to_fourcc(pixel_format); 9284 fb->pixel_format = fourcc; 9285 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9286 9287 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9288 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9289 offset = I915_READ(DSPOFFSET(pipe)); 9290 } else { 9291 if (plane_config->tiling) 9292 offset = I915_READ(DSPTILEOFF(pipe)); 9293 else 9294 offset = I915_READ(DSPLINOFF(pipe)); 9295 } 9296 plane_config->base = base; 9297 9298 val = I915_READ(PIPESRC(pipe)); 9299 fb->width = ((val >> 16) & 0xfff) + 1; 9300 fb->height = ((val >> 0) & 0xfff) + 1; 9301 9302 val = I915_READ(DSPSTRIDE(pipe)); 9303 fb->pitches[0] = val & 0xffffffc0; 9304 9305 aligned_height = intel_fb_align_height(dev, fb->height, 9306 fb->pixel_format, 9307 fb->modifier[0]); 9308 9309 plane_config->size = fb->pitches[0] * aligned_height; 9310 9311 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9312 pipe_name(pipe), fb->width, fb->height, 9313 fb->bits_per_pixel, base, fb->pitches[0], 9314 plane_config->size); 9315 9316 plane_config->fb = intel_fb; 9317 } 9318 9319 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9320 struct intel_crtc_state *pipe_config) 9321 { 9322 struct drm_device *dev = crtc->base.dev; 9323 struct drm_i915_private *dev_priv = dev->dev_private; 9324 enum intel_display_power_domain power_domain; 9325 uint32_t tmp; 9326 bool ret; 9327 9328 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9329 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9330 return false; 9331 9332 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9333 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9334 9335 ret = false; 9336 tmp = I915_READ(PIPECONF(crtc->pipe)); 9337 if (!(tmp & PIPECONF_ENABLE)) 9338 goto out; 9339 9340 switch (tmp & PIPECONF_BPC_MASK) { 9341 case PIPECONF_6BPC: 9342 pipe_config->pipe_bpp = 18; 9343 break; 9344 case PIPECONF_8BPC: 9345 pipe_config->pipe_bpp = 24; 9346 break; 9347 case PIPECONF_10BPC: 9348 pipe_config->pipe_bpp = 30; 9349 break; 9350 case PIPECONF_12BPC: 9351 pipe_config->pipe_bpp = 36; 9352 break; 9353 default: 9354 break; 9355 } 9356 9357 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9358 pipe_config->limited_color_range = true; 9359 9360 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9361 struct intel_shared_dpll *pll; 9362 9363 pipe_config->has_pch_encoder = true; 9364 9365 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9366 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9367 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9368 9369 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9370 9371 if (HAS_PCH_IBX(dev_priv->dev)) { 9372 pipe_config->shared_dpll = 9373 (enum intel_dpll_id) crtc->pipe; 9374 } else { 9375 tmp = I915_READ(PCH_DPLL_SEL); 9376 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9377 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 9378 else 9379 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 9380 } 9381 9382 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9383 9384 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9385 &pipe_config->dpll_hw_state)); 9386 9387 tmp = pipe_config->dpll_hw_state.dpll; 9388 pipe_config->pixel_multiplier = 9389 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9390 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9391 9392 ironlake_pch_clock_get(crtc, pipe_config); 9393 } else { 9394 pipe_config->pixel_multiplier = 1; 9395 } 9396 9397 intel_get_pipe_timings(crtc, pipe_config); 9398 9399 ironlake_get_pfit_config(crtc, pipe_config); 9400 9401 ret = true; 9402 9403 out: 9404 intel_display_power_put(dev_priv, power_domain); 9405 9406 return ret; 9407 } 9408 9409 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9410 { 9411 struct drm_device *dev = dev_priv->dev; 9412 struct intel_crtc *crtc; 9413 9414 for_each_intel_crtc(dev, crtc) 9415 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9416 pipe_name(crtc->pipe)); 9417 9418 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9419 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9420 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9421 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9422 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9423 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9424 "CPU PWM1 enabled\n"); 9425 if (IS_HASWELL(dev)) 9426 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9427 "CPU PWM2 enabled\n"); 9428 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9429 "PCH PWM1 enabled\n"); 9430 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9431 "Utility pin enabled\n"); 9432 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9433 9434 /* 9435 * In theory we can still leave IRQs enabled, as long as only the HPD 9436 * interrupts remain enabled. We used to check for that, but since it's 9437 * gen-specific and since we only disable LCPLL after we fully disable 9438 * the interrupts, the check below should be enough. 9439 */ 9440 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9441 } 9442 9443 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9444 { 9445 struct drm_device *dev = dev_priv->dev; 9446 9447 if (IS_HASWELL(dev)) 9448 return I915_READ(D_COMP_HSW); 9449 else 9450 return I915_READ(D_COMP_BDW); 9451 } 9452 9453 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9454 { 9455 struct drm_device *dev = dev_priv->dev; 9456 9457 if (IS_HASWELL(dev)) { 9458 mutex_lock(&dev_priv->rps.hw_lock); 9459 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9460 val)) 9461 DRM_ERROR("Failed to write to D_COMP\n"); 9462 mutex_unlock(&dev_priv->rps.hw_lock); 9463 } else { 9464 I915_WRITE(D_COMP_BDW, val); 9465 POSTING_READ(D_COMP_BDW); 9466 } 9467 } 9468 9469 /* 9470 * This function implements pieces of two sequences from BSpec: 9471 * - Sequence for display software to disable LCPLL 9472 * - Sequence for display software to allow package C8+ 9473 * The steps implemented here are just the steps that actually touch the LCPLL 9474 * register. Callers should take care of disabling all the display engine 9475 * functions, doing the mode unset, fixing interrupts, etc. 9476 */ 9477 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9478 bool switch_to_fclk, bool allow_power_down) 9479 { 9480 uint32_t val; 9481 9482 assert_can_disable_lcpll(dev_priv); 9483 9484 val = I915_READ(LCPLL_CTL); 9485 9486 if (switch_to_fclk) { 9487 val |= LCPLL_CD_SOURCE_FCLK; 9488 I915_WRITE(LCPLL_CTL, val); 9489 9490 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9491 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9492 DRM_ERROR("Switching to FCLK failed\n"); 9493 9494 val = I915_READ(LCPLL_CTL); 9495 } 9496 9497 val |= LCPLL_PLL_DISABLE; 9498 I915_WRITE(LCPLL_CTL, val); 9499 POSTING_READ(LCPLL_CTL); 9500 9501 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9502 DRM_ERROR("LCPLL still locked\n"); 9503 9504 val = hsw_read_dcomp(dev_priv); 9505 val |= D_COMP_COMP_DISABLE; 9506 hsw_write_dcomp(dev_priv, val); 9507 ndelay(100); 9508 9509 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9510 1)) 9511 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9512 9513 if (allow_power_down) { 9514 val = I915_READ(LCPLL_CTL); 9515 val |= LCPLL_POWER_DOWN_ALLOW; 9516 I915_WRITE(LCPLL_CTL, val); 9517 POSTING_READ(LCPLL_CTL); 9518 } 9519 } 9520 9521 /* 9522 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9523 * source. 9524 */ 9525 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9526 { 9527 uint32_t val; 9528 9529 val = I915_READ(LCPLL_CTL); 9530 9531 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9532 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9533 return; 9534 9535 /* 9536 * Make sure we're not on PC8 state before disabling PC8, otherwise 9537 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9538 */ 9539 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9540 9541 if (val & LCPLL_POWER_DOWN_ALLOW) { 9542 val &= ~LCPLL_POWER_DOWN_ALLOW; 9543 I915_WRITE(LCPLL_CTL, val); 9544 POSTING_READ(LCPLL_CTL); 9545 } 9546 9547 val = hsw_read_dcomp(dev_priv); 9548 val |= D_COMP_COMP_FORCE; 9549 val &= ~D_COMP_COMP_DISABLE; 9550 hsw_write_dcomp(dev_priv, val); 9551 9552 val = I915_READ(LCPLL_CTL); 9553 val &= ~LCPLL_PLL_DISABLE; 9554 I915_WRITE(LCPLL_CTL, val); 9555 9556 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9557 DRM_ERROR("LCPLL not locked yet\n"); 9558 9559 if (val & LCPLL_CD_SOURCE_FCLK) { 9560 val = I915_READ(LCPLL_CTL); 9561 val &= ~LCPLL_CD_SOURCE_FCLK; 9562 I915_WRITE(LCPLL_CTL, val); 9563 9564 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9565 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9566 DRM_ERROR("Switching back to LCPLL failed\n"); 9567 } 9568 9569 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9570 intel_update_cdclk(dev_priv->dev); 9571 } 9572 9573 /* 9574 * Package states C8 and deeper are really deep PC states that can only be 9575 * reached when all the devices on the system allow it, so even if the graphics 9576 * device allows PC8+, it doesn't mean the system will actually get to these 9577 * states. Our driver only allows PC8+ when going into runtime PM. 9578 * 9579 * The requirements for PC8+ are that all the outputs are disabled, the power 9580 * well is disabled and most interrupts are disabled, and these are also 9581 * requirements for runtime PM. When these conditions are met, we manually do 9582 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9583 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9584 * hang the machine. 9585 * 9586 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9587 * the state of some registers, so when we come back from PC8+ we need to 9588 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9589 * need to take care of the registers kept by RC6. Notice that this happens even 9590 * if we don't put the device in PCI D3 state (which is what currently happens 9591 * because of the runtime PM support). 9592 * 9593 * For more, read "Display Sequences for Package C8" on the hardware 9594 * documentation. 9595 */ 9596 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9597 { 9598 struct drm_device *dev = dev_priv->dev; 9599 uint32_t val; 9600 9601 DRM_DEBUG_KMS("Enabling package C8+\n"); 9602 9603 if (HAS_PCH_LPT_LP(dev)) { 9604 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9605 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9606 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9607 } 9608 9609 lpt_disable_clkout_dp(dev); 9610 hsw_disable_lcpll(dev_priv, true, true); 9611 } 9612 9613 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9614 { 9615 struct drm_device *dev = dev_priv->dev; 9616 uint32_t val; 9617 9618 DRM_DEBUG_KMS("Disabling package C8+\n"); 9619 9620 hsw_restore_lcpll(dev_priv); 9621 lpt_init_pch_refclk(dev); 9622 9623 if (HAS_PCH_LPT_LP(dev)) { 9624 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9625 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9626 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9627 } 9628 } 9629 9630 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9631 { 9632 struct drm_device *dev = old_state->dev; 9633 struct intel_atomic_state *old_intel_state = 9634 to_intel_atomic_state(old_state); 9635 unsigned int req_cdclk = old_intel_state->dev_cdclk; 9636 9637 broxton_set_cdclk(dev, req_cdclk); 9638 } 9639 9640 /* compute the max rate for new configuration */ 9641 static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9642 { 9643 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9644 struct drm_i915_private *dev_priv = state->dev->dev_private; 9645 struct drm_crtc *crtc; 9646 struct drm_crtc_state *cstate; 9647 struct intel_crtc_state *crtc_state; 9648 unsigned max_pixel_rate = 0, i; 9649 enum i915_pipe pipe; 9650 9651 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk, 9652 sizeof(intel_state->min_pixclk)); 9653 9654 for_each_crtc_in_state(state, crtc, cstate, i) { 9655 int pixel_rate; 9656 9657 crtc_state = to_intel_crtc_state(cstate); 9658 if (!crtc_state->base.enable) { 9659 intel_state->min_pixclk[i] = 0; 9660 continue; 9661 } 9662 9663 pixel_rate = ilk_pipe_pixel_rate(crtc_state); 9664 9665 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 9666 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) 9667 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); 9668 9669 intel_state->min_pixclk[i] = pixel_rate; 9670 } 9671 9672 for_each_pipe(dev_priv, pipe) 9673 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate); 9674 9675 return max_pixel_rate; 9676 } 9677 9678 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9679 { 9680 struct drm_i915_private *dev_priv = dev->dev_private; 9681 uint32_t val, data; 9682 int ret; 9683 9684 if (WARN((I915_READ(LCPLL_CTL) & 9685 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK | 9686 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE | 9687 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW | 9688 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK, 9689 "trying to change cdclk frequency with cdclk not enabled\n")) 9690 return; 9691 9692 mutex_lock(&dev_priv->rps.hw_lock); 9693 ret = sandybridge_pcode_write(dev_priv, 9694 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); 9695 mutex_unlock(&dev_priv->rps.hw_lock); 9696 if (ret) { 9697 DRM_ERROR("failed to inform pcode about cdclk change\n"); 9698 return; 9699 } 9700 9701 val = I915_READ(LCPLL_CTL); 9702 val |= LCPLL_CD_SOURCE_FCLK; 9703 I915_WRITE(LCPLL_CTL, val); 9704 9705 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9706 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9707 DRM_ERROR("Switching to FCLK failed\n"); 9708 9709 val = I915_READ(LCPLL_CTL); 9710 val &= ~LCPLL_CLK_FREQ_MASK; 9711 9712 switch (cdclk) { 9713 case 450000: 9714 val |= LCPLL_CLK_FREQ_450; 9715 data = 0; 9716 break; 9717 case 540000: 9718 val |= LCPLL_CLK_FREQ_54O_BDW; 9719 data = 1; 9720 break; 9721 case 337500: 9722 val |= LCPLL_CLK_FREQ_337_5_BDW; 9723 data = 2; 9724 break; 9725 case 675000: 9726 val |= LCPLL_CLK_FREQ_675_BDW; 9727 data = 3; 9728 break; 9729 default: 9730 WARN(1, "invalid cdclk frequency\n"); 9731 return; 9732 } 9733 9734 I915_WRITE(LCPLL_CTL, val); 9735 9736 val = I915_READ(LCPLL_CTL); 9737 val &= ~LCPLL_CD_SOURCE_FCLK; 9738 I915_WRITE(LCPLL_CTL, val); 9739 9740 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9741 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9742 DRM_ERROR("Switching back to LCPLL failed\n"); 9743 9744 mutex_lock(&dev_priv->rps.hw_lock); 9745 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data); 9746 mutex_unlock(&dev_priv->rps.hw_lock); 9747 9748 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); 9749 9750 intel_update_cdclk(dev); 9751 9752 WARN(cdclk != dev_priv->cdclk_freq, 9753 "cdclk requested %d kHz but got %d kHz\n", 9754 cdclk, dev_priv->cdclk_freq); 9755 } 9756 9757 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9758 { 9759 struct drm_i915_private *dev_priv = to_i915(state->dev); 9760 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9761 int max_pixclk = ilk_max_pixel_rate(state); 9762 int cdclk; 9763 9764 /* 9765 * FIXME should also account for plane ratio 9766 * once 64bpp pixel formats are supported. 9767 */ 9768 if (max_pixclk > 540000) 9769 cdclk = 675000; 9770 else if (max_pixclk > 450000) 9771 cdclk = 540000; 9772 else if (max_pixclk > 337500) 9773 cdclk = 450000; 9774 else 9775 cdclk = 337500; 9776 9777 if (cdclk > dev_priv->max_cdclk_freq) { 9778 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9779 cdclk, dev_priv->max_cdclk_freq); 9780 return -EINVAL; 9781 } 9782 9783 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9784 if (!intel_state->active_crtcs) 9785 intel_state->dev_cdclk = 337500; 9786 9787 return 0; 9788 } 9789 9790 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9791 { 9792 struct drm_device *dev = old_state->dev; 9793 struct intel_atomic_state *old_intel_state = 9794 to_intel_atomic_state(old_state); 9795 unsigned req_cdclk = old_intel_state->dev_cdclk; 9796 9797 broadwell_set_cdclk(dev, req_cdclk); 9798 } 9799 9800 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9801 struct intel_crtc_state *crtc_state) 9802 { 9803 struct intel_encoder *intel_encoder = 9804 intel_ddi_get_crtc_new_encoder(crtc_state); 9805 9806 if (intel_encoder->type != INTEL_OUTPUT_DSI) { 9807 if (!intel_ddi_pll_select(crtc, crtc_state)) 9808 return -EINVAL; 9809 } 9810 9811 crtc->lowfreq_avail = false; 9812 9813 return 0; 9814 } 9815 9816 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9817 enum port port, 9818 struct intel_crtc_state *pipe_config) 9819 { 9820 switch (port) { 9821 case PORT_A: 9822 pipe_config->ddi_pll_sel = SKL_DPLL0; 9823 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9824 break; 9825 case PORT_B: 9826 pipe_config->ddi_pll_sel = SKL_DPLL1; 9827 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9828 break; 9829 case PORT_C: 9830 pipe_config->ddi_pll_sel = SKL_DPLL2; 9831 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9832 break; 9833 default: 9834 DRM_ERROR("Incorrect port type\n"); 9835 } 9836 } 9837 9838 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 9839 enum port port, 9840 struct intel_crtc_state *pipe_config) 9841 { 9842 u32 temp, dpll_ctl1; 9843 9844 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 9845 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 9846 9847 switch (pipe_config->ddi_pll_sel) { 9848 case SKL_DPLL0: 9849 /* 9850 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 9851 * of the shared DPLL framework and thus needs to be read out 9852 * separately 9853 */ 9854 dpll_ctl1 = I915_READ(DPLL_CTRL1); 9855 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 9856 break; 9857 case SKL_DPLL1: 9858 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9859 break; 9860 case SKL_DPLL2: 9861 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9862 break; 9863 case SKL_DPLL3: 9864 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9865 break; 9866 } 9867 } 9868 9869 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 9870 enum port port, 9871 struct intel_crtc_state *pipe_config) 9872 { 9873 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 9874 9875 switch (pipe_config->ddi_pll_sel) { 9876 case PORT_CLK_SEL_WRPLL1: 9877 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 9878 break; 9879 case PORT_CLK_SEL_WRPLL2: 9880 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9881 break; 9882 case PORT_CLK_SEL_SPLL: 9883 pipe_config->shared_dpll = DPLL_ID_SPLL; 9884 break; 9885 } 9886 } 9887 9888 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9889 struct intel_crtc_state *pipe_config) 9890 { 9891 struct drm_device *dev = crtc->base.dev; 9892 struct drm_i915_private *dev_priv = dev->dev_private; 9893 struct intel_shared_dpll *pll; 9894 enum port port; 9895 uint32_t tmp; 9896 9897 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9898 9899 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9900 9901 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 9902 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9903 else if (IS_BROXTON(dev)) 9904 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9905 else 9906 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9907 9908 if (pipe_config->shared_dpll >= 0) { 9909 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9910 9911 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9912 &pipe_config->dpll_hw_state)); 9913 } 9914 9915 /* 9916 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9917 * DDI E. So just check whether this pipe is wired to DDI E and whether 9918 * the PCH transcoder is on. 9919 */ 9920 if (INTEL_INFO(dev)->gen < 9 && 9921 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9922 pipe_config->has_pch_encoder = true; 9923 9924 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9925 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9926 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9927 9928 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9929 } 9930 } 9931 9932 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9933 struct intel_crtc_state *pipe_config) 9934 { 9935 struct drm_device *dev = crtc->base.dev; 9936 struct drm_i915_private *dev_priv = dev->dev_private; 9937 enum intel_display_power_domain power_domain; 9938 unsigned long power_domain_mask; 9939 uint32_t tmp; 9940 bool ret; 9941 9942 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9943 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9944 return false; 9945 power_domain_mask = BIT(power_domain); 9946 9947 ret = false; 9948 9949 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9950 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9951 9952 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9953 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9954 enum i915_pipe trans_edp_pipe; 9955 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9956 default: 9957 WARN(1, "unknown pipe linked to edp transcoder\n"); 9958 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9959 case TRANS_DDI_EDP_INPUT_A_ON: 9960 trans_edp_pipe = PIPE_A; 9961 break; 9962 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9963 trans_edp_pipe = PIPE_B; 9964 break; 9965 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9966 trans_edp_pipe = PIPE_C; 9967 break; 9968 } 9969 9970 if (trans_edp_pipe == crtc->pipe) 9971 pipe_config->cpu_transcoder = TRANSCODER_EDP; 9972 } 9973 9974 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); 9975 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9976 goto out; 9977 power_domain_mask |= BIT(power_domain); 9978 9979 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 9980 if (!(tmp & PIPECONF_ENABLE)) 9981 goto out; 9982 9983 haswell_get_ddi_port_state(crtc, pipe_config); 9984 9985 intel_get_pipe_timings(crtc, pipe_config); 9986 9987 if (INTEL_INFO(dev)->gen >= 9) { 9988 skl_init_scalers(dev, crtc, pipe_config); 9989 } 9990 9991 if (INTEL_INFO(dev)->gen >= 9) { 9992 pipe_config->scaler_state.scaler_id = -1; 9993 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9994 } 9995 9996 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9997 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { 9998 power_domain_mask |= BIT(power_domain); 9999 if (INTEL_INFO(dev)->gen >= 9) 10000 skylake_get_pfit_config(crtc, pipe_config); 10001 else 10002 ironlake_get_pfit_config(crtc, pipe_config); 10003 } 10004 10005 if (IS_HASWELL(dev)) 10006 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 10007 (I915_READ(IPS_CTL) & IPS_ENABLE); 10008 10009 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 10010 pipe_config->pixel_multiplier = 10011 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 10012 } else { 10013 pipe_config->pixel_multiplier = 1; 10014 } 10015 10016 ret = true; 10017 10018 out: 10019 for_each_power_domain(power_domain, power_domain_mask) 10020 intel_display_power_put(dev_priv, power_domain); 10021 10022 return ret; 10023 } 10024 10025 static void i845_update_cursor(struct drm_crtc *crtc, u32 base, 10026 const struct intel_plane_state *plane_state) 10027 { 10028 struct drm_device *dev = crtc->dev; 10029 struct drm_i915_private *dev_priv = dev->dev_private; 10030 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10031 uint32_t cntl = 0, size = 0; 10032 10033 if (plane_state && plane_state->visible) { 10034 unsigned int width = plane_state->base.crtc_w; 10035 unsigned int height = plane_state->base.crtc_h; 10036 unsigned int stride = roundup_pow_of_two(width) * 4; 10037 10038 switch (stride) { 10039 default: 10040 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 10041 width, stride); 10042 stride = 256; 10043 /* fallthrough */ 10044 case 256: 10045 case 512: 10046 case 1024: 10047 case 2048: 10048 break; 10049 } 10050 10051 cntl |= CURSOR_ENABLE | 10052 CURSOR_GAMMA_ENABLE | 10053 CURSOR_FORMAT_ARGB | 10054 CURSOR_STRIDE(stride); 10055 10056 size = (height << 12) | width; 10057 } 10058 10059 if (intel_crtc->cursor_cntl != 0 && 10060 (intel_crtc->cursor_base != base || 10061 intel_crtc->cursor_size != size || 10062 intel_crtc->cursor_cntl != cntl)) { 10063 /* On these chipsets we can only modify the base/size/stride 10064 * whilst the cursor is disabled. 10065 */ 10066 I915_WRITE(CURCNTR(PIPE_A), 0); 10067 POSTING_READ(CURCNTR(PIPE_A)); 10068 intel_crtc->cursor_cntl = 0; 10069 } 10070 10071 if (intel_crtc->cursor_base != base) { 10072 I915_WRITE(CURBASE(PIPE_A), base); 10073 intel_crtc->cursor_base = base; 10074 } 10075 10076 if (intel_crtc->cursor_size != size) { 10077 I915_WRITE(CURSIZE, size); 10078 intel_crtc->cursor_size = size; 10079 } 10080 10081 if (intel_crtc->cursor_cntl != cntl) { 10082 I915_WRITE(CURCNTR(PIPE_A), cntl); 10083 POSTING_READ(CURCNTR(PIPE_A)); 10084 intel_crtc->cursor_cntl = cntl; 10085 } 10086 } 10087 10088 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, 10089 const struct intel_plane_state *plane_state) 10090 { 10091 struct drm_device *dev = crtc->dev; 10092 struct drm_i915_private *dev_priv = dev->dev_private; 10093 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10094 int pipe = intel_crtc->pipe; 10095 uint32_t cntl = 0; 10096 10097 if (plane_state && plane_state->visible) { 10098 cntl = MCURSOR_GAMMA_ENABLE; 10099 switch (plane_state->base.crtc_w) { 10100 case 64: 10101 cntl |= CURSOR_MODE_64_ARGB_AX; 10102 break; 10103 case 128: 10104 cntl |= CURSOR_MODE_128_ARGB_AX; 10105 break; 10106 case 256: 10107 cntl |= CURSOR_MODE_256_ARGB_AX; 10108 break; 10109 default: 10110 MISSING_CASE(plane_state->base.crtc_w); 10111 return; 10112 } 10113 cntl |= pipe << 28; /* Connect to correct pipe */ 10114 10115 if (HAS_DDI(dev)) 10116 cntl |= CURSOR_PIPE_CSC_ENABLE; 10117 10118 if (plane_state->base.rotation == BIT(DRM_ROTATE_180)) 10119 cntl |= CURSOR_ROTATE_180; 10120 } 10121 10122 if (intel_crtc->cursor_cntl != cntl) { 10123 I915_WRITE(CURCNTR(pipe), cntl); 10124 POSTING_READ(CURCNTR(pipe)); 10125 intel_crtc->cursor_cntl = cntl; 10126 } 10127 10128 /* and commit changes on next vblank */ 10129 I915_WRITE(CURBASE(pipe), base); 10130 POSTING_READ(CURBASE(pipe)); 10131 10132 intel_crtc->cursor_base = base; 10133 } 10134 10135 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 10136 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 10137 const struct intel_plane_state *plane_state) 10138 { 10139 struct drm_device *dev = crtc->dev; 10140 struct drm_i915_private *dev_priv = dev->dev_private; 10141 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10142 int pipe = intel_crtc->pipe; 10143 u32 base = intel_crtc->cursor_addr; 10144 u32 pos = 0; 10145 10146 if (plane_state) { 10147 int x = plane_state->base.crtc_x; 10148 int y = plane_state->base.crtc_y; 10149 10150 if (x < 0) { 10151 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 10152 x = -x; 10153 } 10154 pos |= x << CURSOR_X_SHIFT; 10155 10156 if (y < 0) { 10157 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 10158 y = -y; 10159 } 10160 pos |= y << CURSOR_Y_SHIFT; 10161 10162 /* ILK+ do this automagically */ 10163 if (HAS_GMCH_DISPLAY(dev) && 10164 plane_state->base.rotation == BIT(DRM_ROTATE_180)) { 10165 base += (plane_state->base.crtc_h * 10166 plane_state->base.crtc_w - 1) * 4; 10167 } 10168 } 10169 10170 I915_WRITE(CURPOS(pipe), pos); 10171 10172 if (IS_845G(dev) || IS_I865G(dev)) 10173 i845_update_cursor(crtc, base, plane_state); 10174 else 10175 i9xx_update_cursor(crtc, base, plane_state); 10176 } 10177 10178 static bool cursor_size_ok(struct drm_device *dev, 10179 uint32_t width, uint32_t height) 10180 { 10181 if (width == 0 || height == 0) 10182 return false; 10183 10184 /* 10185 * 845g/865g are special in that they are only limited by 10186 * the width of their cursors, the height is arbitrary up to 10187 * the precision of the register. Everything else requires 10188 * square cursors, limited to a few power-of-two sizes. 10189 */ 10190 if (IS_845G(dev) || IS_I865G(dev)) { 10191 if ((width & 63) != 0) 10192 return false; 10193 10194 if (width > (IS_845G(dev) ? 64 : 512)) 10195 return false; 10196 10197 if (height > 1023) 10198 return false; 10199 } else { 10200 switch (width | height) { 10201 case 256: 10202 case 128: 10203 if (IS_GEN2(dev)) 10204 return false; 10205 case 64: 10206 break; 10207 default: 10208 return false; 10209 } 10210 } 10211 10212 return true; 10213 } 10214 10215 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 10216 u16 *blue, uint32_t start, uint32_t size) 10217 { 10218 int end = (start + size > 256) ? 256 : start + size, i; 10219 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10220 10221 for (i = start; i < end; i++) { 10222 intel_crtc->lut_r[i] = red[i] >> 8; 10223 intel_crtc->lut_g[i] = green[i] >> 8; 10224 intel_crtc->lut_b[i] = blue[i] >> 8; 10225 } 10226 10227 intel_crtc_load_lut(crtc); 10228 } 10229 10230 /* VESA 640x480x72Hz mode to set on the pipe */ 10231 static struct drm_display_mode load_detect_mode = { 10232 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 10233 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 10234 }; 10235 10236 struct drm_framebuffer * 10237 __intel_framebuffer_create(struct drm_device *dev, 10238 struct drm_mode_fb_cmd2 *mode_cmd, 10239 struct drm_i915_gem_object *obj) 10240 { 10241 struct intel_framebuffer *intel_fb; 10242 int ret; 10243 10244 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 10245 if (!intel_fb) 10246 return ERR_PTR(-ENOMEM); 10247 10248 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 10249 if (ret) 10250 goto err; 10251 10252 return &intel_fb->base; 10253 10254 err: 10255 kfree(intel_fb); 10256 return ERR_PTR(ret); 10257 } 10258 10259 static struct drm_framebuffer * 10260 intel_framebuffer_create(struct drm_device *dev, 10261 struct drm_mode_fb_cmd2 *mode_cmd, 10262 struct drm_i915_gem_object *obj) 10263 { 10264 struct drm_framebuffer *fb; 10265 int ret; 10266 10267 ret = i915_mutex_lock_interruptible(dev); 10268 if (ret) 10269 return ERR_PTR(ret); 10270 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 10271 mutex_unlock(&dev->struct_mutex); 10272 10273 return fb; 10274 } 10275 10276 static u32 10277 intel_framebuffer_pitch_for_width(int width, int bpp) 10278 { 10279 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 10280 return ALIGN(pitch, 64); 10281 } 10282 10283 static u32 10284 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 10285 { 10286 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 10287 return PAGE_ALIGN(pitch * mode->vdisplay); 10288 } 10289 10290 static struct drm_framebuffer * 10291 intel_framebuffer_create_for_mode(struct drm_device *dev, 10292 struct drm_display_mode *mode, 10293 int depth, int bpp) 10294 { 10295 struct drm_framebuffer *fb; 10296 struct drm_i915_gem_object *obj; 10297 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10298 10299 obj = i915_gem_alloc_object(dev, 10300 intel_framebuffer_size_for_mode(mode, bpp)); 10301 if (obj == NULL) 10302 return ERR_PTR(-ENOMEM); 10303 10304 mode_cmd.width = mode->hdisplay; 10305 mode_cmd.height = mode->vdisplay; 10306 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 10307 bpp); 10308 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 10309 10310 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 10311 if (IS_ERR(fb)) 10312 drm_gem_object_unreference_unlocked(&obj->base); 10313 10314 return fb; 10315 } 10316 10317 static struct drm_framebuffer * 10318 mode_fits_in_fbdev(struct drm_device *dev, 10319 struct drm_display_mode *mode) 10320 { 10321 #ifdef CONFIG_DRM_FBDEV_EMULATION 10322 struct drm_i915_private *dev_priv = dev->dev_private; 10323 struct drm_i915_gem_object *obj; 10324 struct drm_framebuffer *fb; 10325 10326 if (!dev_priv->fbdev) 10327 return NULL; 10328 10329 if (!dev_priv->fbdev->fb) 10330 return NULL; 10331 10332 obj = dev_priv->fbdev->fb->obj; 10333 BUG_ON(!obj); 10334 10335 fb = &dev_priv->fbdev->fb->base; 10336 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 10337 fb->bits_per_pixel)) 10338 return NULL; 10339 10340 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 10341 return NULL; 10342 10343 drm_framebuffer_reference(fb); 10344 return fb; 10345 #else 10346 return NULL; 10347 #endif 10348 } 10349 10350 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 10351 struct drm_crtc *crtc, 10352 struct drm_display_mode *mode, 10353 struct drm_framebuffer *fb, 10354 int x, int y) 10355 { 10356 struct drm_plane_state *plane_state; 10357 int hdisplay, vdisplay; 10358 int ret; 10359 10360 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 10361 if (IS_ERR(plane_state)) 10362 return PTR_ERR(plane_state); 10363 10364 if (mode) 10365 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 10366 else 10367 hdisplay = vdisplay = 0; 10368 10369 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 10370 if (ret) 10371 return ret; 10372 drm_atomic_set_fb_for_plane(plane_state, fb); 10373 plane_state->crtc_x = 0; 10374 plane_state->crtc_y = 0; 10375 plane_state->crtc_w = hdisplay; 10376 plane_state->crtc_h = vdisplay; 10377 plane_state->src_x = x << 16; 10378 plane_state->src_y = y << 16; 10379 plane_state->src_w = hdisplay << 16; 10380 plane_state->src_h = vdisplay << 16; 10381 10382 return 0; 10383 } 10384 10385 bool intel_get_load_detect_pipe(struct drm_connector *connector, 10386 struct drm_display_mode *mode, 10387 struct intel_load_detect_pipe *old, 10388 struct drm_modeset_acquire_ctx *ctx) 10389 { 10390 struct intel_crtc *intel_crtc; 10391 struct intel_encoder *intel_encoder = 10392 intel_attached_encoder(connector); 10393 struct drm_crtc *possible_crtc; 10394 struct drm_encoder *encoder = &intel_encoder->base; 10395 struct drm_crtc *crtc = NULL; 10396 struct drm_device *dev = encoder->dev; 10397 struct drm_framebuffer *fb; 10398 struct drm_mode_config *config = &dev->mode_config; 10399 struct drm_atomic_state *state = NULL, *restore_state = NULL; 10400 struct drm_connector_state *connector_state; 10401 struct intel_crtc_state *crtc_state; 10402 int ret, i = -1; 10403 10404 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10405 connector->base.id, connector->name, 10406 encoder->base.id, encoder->name); 10407 10408 old->restore_state = NULL; 10409 10410 retry: 10411 ret = drm_modeset_lock(&config->connection_mutex, ctx); 10412 if (ret) 10413 goto fail; 10414 10415 /* 10416 * Algorithm gets a little messy: 10417 * 10418 * - if the connector already has an assigned crtc, use it (but make 10419 * sure it's on first) 10420 * 10421 * - try to find the first unused crtc that can drive this connector, 10422 * and use that if we find one 10423 */ 10424 10425 /* See if we already have a CRTC for this connector */ 10426 if (connector->state->crtc) { 10427 crtc = connector->state->crtc; 10428 10429 ret = drm_modeset_lock(&crtc->mutex, ctx); 10430 if (ret) 10431 goto fail; 10432 10433 /* Make sure the crtc and connector are running */ 10434 goto found; 10435 } 10436 10437 /* Find an unused one (if possible) */ 10438 for_each_crtc(dev, possible_crtc) { 10439 i++; 10440 if (!(encoder->possible_crtcs & (1 << i))) 10441 continue; 10442 10443 ret = drm_modeset_lock(&possible_crtc->mutex, ctx); 10444 if (ret) 10445 goto fail; 10446 10447 if (possible_crtc->state->enable) { 10448 drm_modeset_unlock(&possible_crtc->mutex); 10449 continue; 10450 } 10451 10452 crtc = possible_crtc; 10453 break; 10454 } 10455 10456 /* 10457 * If we didn't find an unused CRTC, don't use any. 10458 */ 10459 if (!crtc) { 10460 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10461 goto fail; 10462 } 10463 10464 found: 10465 intel_crtc = to_intel_crtc(crtc); 10466 10467 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10468 if (ret) 10469 goto fail; 10470 10471 state = drm_atomic_state_alloc(dev); 10472 restore_state = drm_atomic_state_alloc(dev); 10473 if (!state || !restore_state) { 10474 ret = -ENOMEM; 10475 goto fail; 10476 } 10477 10478 state->acquire_ctx = ctx; 10479 restore_state->acquire_ctx = ctx; 10480 10481 connector_state = drm_atomic_get_connector_state(state, connector); 10482 if (IS_ERR(connector_state)) { 10483 ret = PTR_ERR(connector_state); 10484 goto fail; 10485 } 10486 10487 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc); 10488 if (ret) 10489 goto fail; 10490 10491 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10492 if (IS_ERR(crtc_state)) { 10493 ret = PTR_ERR(crtc_state); 10494 goto fail; 10495 } 10496 10497 crtc_state->base.active = crtc_state->base.enable = true; 10498 10499 if (!mode) 10500 mode = &load_detect_mode; 10501 10502 /* We need a framebuffer large enough to accommodate all accesses 10503 * that the plane may generate whilst we perform load detection. 10504 * We can not rely on the fbcon either being present (we get called 10505 * during its initialisation to detect all boot displays, or it may 10506 * not even exist) or that it is large enough to satisfy the 10507 * requested mode. 10508 */ 10509 fb = mode_fits_in_fbdev(dev, mode); 10510 if (fb == NULL) { 10511 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10512 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10513 } else 10514 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10515 if (IS_ERR(fb)) { 10516 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10517 goto fail; 10518 } 10519 10520 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10521 if (ret) 10522 goto fail; 10523 10524 drm_framebuffer_unreference(fb); 10525 10526 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 10527 if (ret) 10528 goto fail; 10529 10530 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector)); 10531 if (!ret) 10532 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc)); 10533 if (!ret) 10534 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary)); 10535 if (ret) { 10536 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret); 10537 goto fail; 10538 } 10539 10540 ret = drm_atomic_commit(state); 10541 if (ret) { 10542 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10543 goto fail; 10544 } 10545 10546 old->restore_state = restore_state; 10547 10548 /* let the connector get through one full cycle before testing */ 10549 intel_wait_for_vblank(dev, intel_crtc->pipe); 10550 return true; 10551 10552 fail: 10553 drm_atomic_state_free(state); 10554 drm_atomic_state_free(restore_state); 10555 restore_state = state = NULL; 10556 10557 if (ret == -EDEADLK) { 10558 drm_modeset_backoff(ctx); 10559 goto retry; 10560 } 10561 10562 return false; 10563 } 10564 10565 void intel_release_load_detect_pipe(struct drm_connector *connector, 10566 struct intel_load_detect_pipe *old, 10567 struct drm_modeset_acquire_ctx *ctx) 10568 { 10569 struct intel_encoder *intel_encoder = 10570 intel_attached_encoder(connector); 10571 struct drm_encoder *encoder = &intel_encoder->base; 10572 struct drm_atomic_state *state = old->restore_state; 10573 int ret; 10574 10575 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10576 connector->base.id, connector->name, 10577 encoder->base.id, encoder->name); 10578 10579 if (!state) 10580 return; 10581 10582 ret = drm_atomic_commit(state); 10583 if (ret) { 10584 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret); 10585 drm_atomic_state_free(state); 10586 } 10587 } 10588 10589 static int i9xx_pll_refclk(struct drm_device *dev, 10590 const struct intel_crtc_state *pipe_config) 10591 { 10592 struct drm_i915_private *dev_priv = dev->dev_private; 10593 u32 dpll = pipe_config->dpll_hw_state.dpll; 10594 10595 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10596 return dev_priv->vbt.lvds_ssc_freq; 10597 else if (HAS_PCH_SPLIT(dev)) 10598 return 120000; 10599 else if (!IS_GEN2(dev)) 10600 return 96000; 10601 else 10602 return 48000; 10603 } 10604 10605 /* Returns the clock of the currently programmed mode of the given pipe. */ 10606 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10607 struct intel_crtc_state *pipe_config) 10608 { 10609 struct drm_device *dev = crtc->base.dev; 10610 struct drm_i915_private *dev_priv = dev->dev_private; 10611 int pipe = pipe_config->cpu_transcoder; 10612 u32 dpll = pipe_config->dpll_hw_state.dpll; 10613 u32 fp; 10614 intel_clock_t clock; 10615 int port_clock; 10616 int refclk = i9xx_pll_refclk(dev, pipe_config); 10617 10618 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10619 fp = pipe_config->dpll_hw_state.fp0; 10620 else 10621 fp = pipe_config->dpll_hw_state.fp1; 10622 10623 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10624 if (IS_PINEVIEW(dev)) { 10625 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10626 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10627 } else { 10628 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10629 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10630 } 10631 10632 if (!IS_GEN2(dev)) { 10633 if (IS_PINEVIEW(dev)) 10634 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10635 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10636 else 10637 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10638 DPLL_FPA01_P1_POST_DIV_SHIFT); 10639 10640 switch (dpll & DPLL_MODE_MASK) { 10641 case DPLLB_MODE_DAC_SERIAL: 10642 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10643 5 : 10; 10644 break; 10645 case DPLLB_MODE_LVDS: 10646 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10647 7 : 14; 10648 break; 10649 default: 10650 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10651 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10652 return; 10653 } 10654 10655 if (IS_PINEVIEW(dev)) 10656 port_clock = pnv_calc_dpll_params(refclk, &clock); 10657 else 10658 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10659 } else { 10660 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10661 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10662 10663 if (is_lvds) { 10664 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10665 DPLL_FPA01_P1_POST_DIV_SHIFT); 10666 10667 if (lvds & LVDS_CLKB_POWER_UP) 10668 clock.p2 = 7; 10669 else 10670 clock.p2 = 14; 10671 } else { 10672 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10673 clock.p1 = 2; 10674 else { 10675 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10676 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10677 } 10678 if (dpll & PLL_P2_DIVIDE_BY_4) 10679 clock.p2 = 4; 10680 else 10681 clock.p2 = 2; 10682 } 10683 10684 port_clock = i9xx_calc_dpll_params(refclk, &clock); 10685 } 10686 10687 /* 10688 * This value includes pixel_multiplier. We will use 10689 * port_clock to compute adjusted_mode.crtc_clock in the 10690 * encoder's get_config() function. 10691 */ 10692 pipe_config->port_clock = port_clock; 10693 } 10694 10695 int intel_dotclock_calculate(int link_freq, 10696 const struct intel_link_m_n *m_n) 10697 { 10698 /* 10699 * The calculation for the data clock is: 10700 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10701 * But we want to avoid losing precison if possible, so: 10702 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10703 * 10704 * and the link clock is simpler: 10705 * link_clock = (m * link_clock) / n 10706 */ 10707 10708 if (!m_n->link_n) 10709 return 0; 10710 10711 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10712 } 10713 10714 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10715 struct intel_crtc_state *pipe_config) 10716 { 10717 struct drm_device *dev = crtc->base.dev; 10718 10719 /* read out port_clock from the DPLL */ 10720 i9xx_crtc_clock_get(crtc, pipe_config); 10721 10722 /* 10723 * This value does not include pixel_multiplier. 10724 * We will check that port_clock and adjusted_mode.crtc_clock 10725 * agree once we know their relationship in the encoder's 10726 * get_config() function. 10727 */ 10728 pipe_config->base.adjusted_mode.crtc_clock = 10729 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 10730 &pipe_config->fdi_m_n); 10731 } 10732 10733 /** Returns the currently programmed mode of the given pipe. */ 10734 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10735 struct drm_crtc *crtc) 10736 { 10737 struct drm_i915_private *dev_priv = dev->dev_private; 10738 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10739 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10740 struct drm_display_mode *mode; 10741 struct intel_crtc_state *pipe_config; 10742 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10743 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10744 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10745 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10746 enum i915_pipe pipe = intel_crtc->pipe; 10747 10748 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10749 if (!mode) 10750 return NULL; 10751 10752 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10753 if (!pipe_config) { 10754 kfree(mode); 10755 return NULL; 10756 } 10757 10758 /* 10759 * Construct a pipe_config sufficient for getting the clock info 10760 * back out of crtc_clock_get. 10761 * 10762 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10763 * to use a real value here instead. 10764 */ 10765 pipe_config->cpu_transcoder = (enum transcoder) pipe; 10766 pipe_config->pixel_multiplier = 1; 10767 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10768 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10769 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10770 i9xx_crtc_clock_get(intel_crtc, pipe_config); 10771 10772 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; 10773 mode->hdisplay = (htot & 0xffff) + 1; 10774 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10775 mode->hsync_start = (hsync & 0xffff) + 1; 10776 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10777 mode->vdisplay = (vtot & 0xffff) + 1; 10778 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10779 mode->vsync_start = (vsync & 0xffff) + 1; 10780 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10781 10782 drm_mode_set_name(mode); 10783 10784 kfree(pipe_config); 10785 10786 return mode; 10787 } 10788 10789 void intel_mark_busy(struct drm_device *dev) 10790 { 10791 struct drm_i915_private *dev_priv = dev->dev_private; 10792 10793 if (dev_priv->mm.busy) 10794 return; 10795 10796 intel_runtime_pm_get(dev_priv); 10797 i915_update_gfx_val(dev_priv); 10798 if (INTEL_INFO(dev)->gen >= 6) 10799 gen6_rps_busy(dev_priv); 10800 dev_priv->mm.busy = true; 10801 } 10802 10803 void intel_mark_idle(struct drm_device *dev) 10804 { 10805 struct drm_i915_private *dev_priv = dev->dev_private; 10806 10807 if (!dev_priv->mm.busy) 10808 return; 10809 10810 dev_priv->mm.busy = false; 10811 10812 if (INTEL_INFO(dev)->gen >= 6) 10813 gen6_rps_idle(dev->dev_private); 10814 10815 intel_runtime_pm_put(dev_priv); 10816 } 10817 10818 static void intel_crtc_destroy(struct drm_crtc *crtc) 10819 { 10820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10821 struct drm_device *dev = crtc->dev; 10822 struct intel_unpin_work *work; 10823 10824 spin_lock_irq(&dev->event_lock); 10825 work = intel_crtc->unpin_work; 10826 intel_crtc->unpin_work = NULL; 10827 spin_unlock_irq(&dev->event_lock); 10828 10829 if (work) { 10830 cancel_work_sync(&work->work); 10831 kfree(work); 10832 } 10833 10834 drm_crtc_cleanup(crtc); 10835 10836 kfree(intel_crtc); 10837 } 10838 10839 static void intel_unpin_work_fn(struct work_struct *__work) 10840 { 10841 struct intel_unpin_work *work = 10842 container_of(__work, struct intel_unpin_work, work); 10843 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10844 struct drm_device *dev = crtc->base.dev; 10845 struct drm_plane *primary = crtc->base.primary; 10846 10847 mutex_lock(&dev->struct_mutex); 10848 intel_unpin_fb_obj(work->old_fb, primary->state); 10849 drm_gem_object_unreference(&work->pending_flip_obj->base); 10850 10851 if (work->flip_queued_req) 10852 i915_gem_request_assign(&work->flip_queued_req, NULL); 10853 mutex_unlock(&dev->struct_mutex); 10854 10855 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit); 10856 intel_fbc_post_update(crtc); 10857 drm_framebuffer_unreference(work->old_fb); 10858 10859 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0); 10860 atomic_dec(&crtc->unpin_work_count); 10861 10862 kfree(work); 10863 } 10864 10865 static void do_intel_finish_page_flip(struct drm_device *dev, 10866 struct drm_crtc *crtc) 10867 { 10868 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10869 struct intel_unpin_work *work; 10870 unsigned long flags; 10871 10872 /* Ignore early vblank irqs */ 10873 if (intel_crtc == NULL) 10874 return; 10875 10876 /* 10877 * This is called both by irq handlers and the reset code (to complete 10878 * lost pageflips) so needs the full irqsave spinlocks. 10879 */ 10880 spin_lock_irqsave(&dev->event_lock, flags); 10881 work = intel_crtc->unpin_work; 10882 10883 /* Ensure we don't miss a work->pending update ... */ 10884 smp_rmb(); 10885 10886 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 10887 spin_unlock_irqrestore(&dev->event_lock, flags); 10888 return; 10889 } 10890 10891 page_flip_completed(intel_crtc); 10892 10893 spin_unlock_irqrestore(&dev->event_lock, flags); 10894 } 10895 10896 void intel_finish_page_flip(struct drm_device *dev, int pipe) 10897 { 10898 struct drm_i915_private *dev_priv = dev->dev_private; 10899 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 10900 10901 do_intel_finish_page_flip(dev, crtc); 10902 } 10903 10904 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 10905 { 10906 struct drm_i915_private *dev_priv = dev->dev_private; 10907 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 10908 10909 do_intel_finish_page_flip(dev, crtc); 10910 } 10911 10912 /* Is 'a' after or equal to 'b'? */ 10913 static bool g4x_flip_count_after_eq(u32 a, u32 b) 10914 { 10915 return !((a - b) & 0x80000000); 10916 } 10917 10918 static bool page_flip_finished(struct intel_crtc *crtc) 10919 { 10920 struct drm_device *dev = crtc->base.dev; 10921 struct drm_i915_private *dev_priv = dev->dev_private; 10922 10923 if (i915_reset_in_progress(&dev_priv->gpu_error) || 10924 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 10925 return true; 10926 10927 /* 10928 * The relevant registers doen't exist on pre-ctg. 10929 * As the flip done interrupt doesn't trigger for mmio 10930 * flips on gmch platforms, a flip count check isn't 10931 * really needed there. But since ctg has the registers, 10932 * include it in the check anyway. 10933 */ 10934 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 10935 return true; 10936 10937 /* 10938 * BDW signals flip done immediately if the plane 10939 * is disabled, even if the plane enable is already 10940 * armed to occur at the next vblank :( 10941 */ 10942 10943 /* 10944 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 10945 * used the same base address. In that case the mmio flip might 10946 * have completed, but the CS hasn't even executed the flip yet. 10947 * 10948 * A flip count check isn't enough as the CS might have updated 10949 * the base address just after start of vblank, but before we 10950 * managed to process the interrupt. This means we'd complete the 10951 * CS flip too soon. 10952 * 10953 * Combining both checks should get us a good enough result. It may 10954 * still happen that the CS flip has been executed, but has not 10955 * yet actually completed. But in case the base address is the same 10956 * anyway, we don't really care. 10957 */ 10958 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10959 crtc->unpin_work->gtt_offset && 10960 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 10961 crtc->unpin_work->flip_count); 10962 } 10963 10964 void intel_prepare_page_flip(struct drm_device *dev, int plane) 10965 { 10966 struct drm_i915_private *dev_priv = dev->dev_private; 10967 struct intel_crtc *intel_crtc = 10968 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 10969 unsigned long flags; 10970 10971 10972 /* 10973 * This is called both by irq handlers and the reset code (to complete 10974 * lost pageflips) so needs the full irqsave spinlocks. 10975 * 10976 * NB: An MMIO update of the plane base pointer will also 10977 * generate a page-flip completion irq, i.e. every modeset 10978 * is also accompanied by a spurious intel_prepare_page_flip(). 10979 */ 10980 spin_lock_irqsave(&dev->event_lock, flags); 10981 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 10982 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 10983 spin_unlock_irqrestore(&dev->event_lock, flags); 10984 } 10985 10986 static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 10987 { 10988 /* Ensure that the work item is consistent when activating it ... */ 10989 smp_wmb(); 10990 atomic_set(&work->pending, INTEL_FLIP_PENDING); 10991 /* and that it is marked active as soon as the irq could fire. */ 10992 smp_wmb(); 10993 } 10994 10995 static int intel_gen2_queue_flip(struct drm_device *dev, 10996 struct drm_crtc *crtc, 10997 struct drm_framebuffer *fb, 10998 struct drm_i915_gem_object *obj, 10999 struct drm_i915_gem_request *req, 11000 uint32_t flags) 11001 { 11002 struct intel_engine_cs *ring = req->ring; 11003 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11004 u32 flip_mask; 11005 int ret; 11006 11007 ret = intel_ring_begin(req, 6); 11008 if (ret) 11009 return ret; 11010 11011 /* Can't queue multiple flips, so wait for the previous 11012 * one to finish before executing the next. 11013 */ 11014 if (intel_crtc->plane) 11015 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11016 else 11017 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11018 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11019 intel_ring_emit(ring, MI_NOOP); 11020 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11021 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11022 intel_ring_emit(ring, fb->pitches[0]); 11023 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11024 intel_ring_emit(ring, 0); /* aux display base address, unused */ 11025 11026 intel_mark_page_flip_active(intel_crtc->unpin_work); 11027 return 0; 11028 } 11029 11030 static int intel_gen3_queue_flip(struct drm_device *dev, 11031 struct drm_crtc *crtc, 11032 struct drm_framebuffer *fb, 11033 struct drm_i915_gem_object *obj, 11034 struct drm_i915_gem_request *req, 11035 uint32_t flags) 11036 { 11037 struct intel_engine_cs *ring = req->ring; 11038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11039 u32 flip_mask; 11040 int ret; 11041 11042 ret = intel_ring_begin(req, 6); 11043 if (ret) 11044 return ret; 11045 11046 if (intel_crtc->plane) 11047 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 11048 else 11049 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 11050 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 11051 intel_ring_emit(ring, MI_NOOP); 11052 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 11053 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11054 intel_ring_emit(ring, fb->pitches[0]); 11055 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11056 intel_ring_emit(ring, MI_NOOP); 11057 11058 intel_mark_page_flip_active(intel_crtc->unpin_work); 11059 return 0; 11060 } 11061 11062 static int intel_gen4_queue_flip(struct drm_device *dev, 11063 struct drm_crtc *crtc, 11064 struct drm_framebuffer *fb, 11065 struct drm_i915_gem_object *obj, 11066 struct drm_i915_gem_request *req, 11067 uint32_t flags) 11068 { 11069 struct intel_engine_cs *ring = req->ring; 11070 struct drm_i915_private *dev_priv = dev->dev_private; 11071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11072 uint32_t pf, pipesrc; 11073 int ret; 11074 11075 ret = intel_ring_begin(req, 4); 11076 if (ret) 11077 return ret; 11078 11079 /* i965+ uses the linear or tiled offsets from the 11080 * Display Registers (which do not change across a page-flip) 11081 * so we need only reprogram the base address. 11082 */ 11083 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11084 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11085 intel_ring_emit(ring, fb->pitches[0]); 11086 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 11087 obj->tiling_mode); 11088 11089 /* XXX Enabling the panel-fitter across page-flip is so far 11090 * untested on non-native modes, so ignore it for now. 11091 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 11092 */ 11093 pf = 0; 11094 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11095 intel_ring_emit(ring, pf | pipesrc); 11096 11097 intel_mark_page_flip_active(intel_crtc->unpin_work); 11098 return 0; 11099 } 11100 11101 static int intel_gen6_queue_flip(struct drm_device *dev, 11102 struct drm_crtc *crtc, 11103 struct drm_framebuffer *fb, 11104 struct drm_i915_gem_object *obj, 11105 struct drm_i915_gem_request *req, 11106 uint32_t flags) 11107 { 11108 struct intel_engine_cs *ring = req->ring; 11109 struct drm_i915_private *dev_priv = dev->dev_private; 11110 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11111 uint32_t pf, pipesrc; 11112 int ret; 11113 11114 ret = intel_ring_begin(req, 4); 11115 if (ret) 11116 return ret; 11117 11118 intel_ring_emit(ring, MI_DISPLAY_FLIP | 11119 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11120 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 11121 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11122 11123 /* Contrary to the suggestions in the documentation, 11124 * "Enable Panel Fitter" does not seem to be required when page 11125 * flipping with a non-native mode, and worse causes a normal 11126 * modeset to fail. 11127 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 11128 */ 11129 pf = 0; 11130 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11131 intel_ring_emit(ring, pf | pipesrc); 11132 11133 intel_mark_page_flip_active(intel_crtc->unpin_work); 11134 return 0; 11135 } 11136 11137 static int intel_gen7_queue_flip(struct drm_device *dev, 11138 struct drm_crtc *crtc, 11139 struct drm_framebuffer *fb, 11140 struct drm_i915_gem_object *obj, 11141 struct drm_i915_gem_request *req, 11142 uint32_t flags) 11143 { 11144 struct intel_engine_cs *ring = req->ring; 11145 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11146 uint32_t plane_bit = 0; 11147 int len, ret; 11148 11149 switch (intel_crtc->plane) { 11150 case PLANE_A: 11151 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 11152 break; 11153 case PLANE_B: 11154 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 11155 break; 11156 case PLANE_C: 11157 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 11158 break; 11159 default: 11160 WARN_ONCE(1, "unknown plane in flip command\n"); 11161 return -ENODEV; 11162 } 11163 11164 len = 4; 11165 if (ring->id == RCS) { 11166 len += 6; 11167 /* 11168 * On Gen 8, SRM is now taking an extra dword to accommodate 11169 * 48bits addresses, and we need a NOOP for the batch size to 11170 * stay even. 11171 */ 11172 if (IS_GEN8(dev)) 11173 len += 2; 11174 } 11175 11176 /* 11177 * BSpec MI_DISPLAY_FLIP for IVB: 11178 * "The full packet must be contained within the same cache line." 11179 * 11180 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 11181 * cacheline, if we ever start emitting more commands before 11182 * the MI_DISPLAY_FLIP we may need to first emit everything else, 11183 * then do the cacheline alignment, and finally emit the 11184 * MI_DISPLAY_FLIP. 11185 */ 11186 ret = intel_ring_cacheline_align(req); 11187 if (ret) 11188 return ret; 11189 11190 ret = intel_ring_begin(req, len); 11191 if (ret) 11192 return ret; 11193 11194 /* Unmask the flip-done completion message. Note that the bspec says that 11195 * we should do this for both the BCS and RCS, and that we must not unmask 11196 * more than one flip event at any time (or ensure that one flip message 11197 * can be sent by waiting for flip-done prior to queueing new flips). 11198 * Experimentation says that BCS works despite DERRMR masking all 11199 * flip-done completion events and that unmasking all planes at once 11200 * for the RCS also doesn't appear to drop events. Setting the DERRMR 11201 * to zero does lead to lockups within MI_DISPLAY_FLIP. 11202 */ 11203 if (ring->id == RCS) { 11204 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 11205 intel_ring_emit_reg(ring, DERRMR); 11206 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 11207 DERRMR_PIPEB_PRI_FLIP_DONE | 11208 DERRMR_PIPEC_PRI_FLIP_DONE)); 11209 if (IS_GEN8(dev)) 11210 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 | 11211 MI_SRM_LRM_GLOBAL_GTT); 11212 else 11213 intel_ring_emit(ring, MI_STORE_REGISTER_MEM | 11214 MI_SRM_LRM_GLOBAL_GTT); 11215 intel_ring_emit_reg(ring, DERRMR); 11216 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 11217 if (IS_GEN8(dev)) { 11218 intel_ring_emit(ring, 0); 11219 intel_ring_emit(ring, MI_NOOP); 11220 } 11221 } 11222 11223 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 11224 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 11225 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 11226 intel_ring_emit(ring, (MI_NOOP)); 11227 11228 intel_mark_page_flip_active(intel_crtc->unpin_work); 11229 return 0; 11230 } 11231 11232 static bool use_mmio_flip(struct intel_engine_cs *ring, 11233 struct drm_i915_gem_object *obj) 11234 { 11235 /* 11236 * This is not being used for older platforms, because 11237 * non-availability of flip done interrupt forces us to use 11238 * CS flips. Older platforms derive flip done using some clever 11239 * tricks involving the flip_pending status bits and vblank irqs. 11240 * So using MMIO flips there would disrupt this mechanism. 11241 */ 11242 11243 if (ring == NULL) 11244 return true; 11245 11246 if (INTEL_INFO(ring->dev)->gen < 5) 11247 return false; 11248 11249 if (i915.use_mmio_flip < 0) 11250 return false; 11251 else if (i915.use_mmio_flip > 0) 11252 return true; 11253 else if (i915.enable_execlists) 11254 return true; 11255 #if 0 11256 else if (obj->base.dma_buf && 11257 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, 11258 false)) 11259 return true; 11260 #endif 11261 else 11262 return ring != i915_gem_request_get_ring(obj->last_write_req); 11263 } 11264 11265 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11266 unsigned int rotation, 11267 struct intel_unpin_work *work) 11268 { 11269 struct drm_device *dev = intel_crtc->base.dev; 11270 struct drm_i915_private *dev_priv = dev->dev_private; 11271 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11272 const enum i915_pipe pipe = intel_crtc->pipe; 11273 u32 ctl, stride, tile_height; 11274 11275 ctl = I915_READ(PLANE_CTL(pipe, 0)); 11276 ctl &= ~PLANE_CTL_TILED_MASK; 11277 switch (fb->modifier[0]) { 11278 case DRM_FORMAT_MOD_NONE: 11279 break; 11280 case I915_FORMAT_MOD_X_TILED: 11281 ctl |= PLANE_CTL_TILED_X; 11282 break; 11283 case I915_FORMAT_MOD_Y_TILED: 11284 ctl |= PLANE_CTL_TILED_Y; 11285 break; 11286 case I915_FORMAT_MOD_Yf_TILED: 11287 ctl |= PLANE_CTL_TILED_YF; 11288 break; 11289 default: 11290 MISSING_CASE(fb->modifier[0]); 11291 } 11292 11293 /* 11294 * The stride is either expressed as a multiple of 64 bytes chunks for 11295 * linear buffers or in number of tiles for tiled buffers. 11296 */ 11297 if (intel_rotation_90_or_270(rotation)) { 11298 /* stride = Surface height in tiles */ 11299 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0); 11300 stride = DIV_ROUND_UP(fb->height, tile_height); 11301 } else { 11302 stride = fb->pitches[0] / 11303 intel_fb_stride_alignment(dev_priv, fb->modifier[0], 11304 fb->pixel_format); 11305 } 11306 11307 /* 11308 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 11309 * PLANE_SURF updates, the update is then guaranteed to be atomic. 11310 */ 11311 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 11312 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 11313 11314 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset); 11315 POSTING_READ(PLANE_SURF(pipe, 0)); 11316 } 11317 11318 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11319 struct intel_unpin_work *work) 11320 { 11321 struct drm_device *dev = intel_crtc->base.dev; 11322 struct drm_i915_private *dev_priv = dev->dev_private; 11323 struct intel_framebuffer *intel_fb = 11324 to_intel_framebuffer(intel_crtc->base.primary->fb); 11325 struct drm_i915_gem_object *obj = intel_fb->obj; 11326 i915_reg_t reg = DSPCNTR(intel_crtc->plane); 11327 u32 dspcntr; 11328 11329 dspcntr = I915_READ(reg); 11330 11331 if (obj->tiling_mode != I915_TILING_NONE) 11332 dspcntr |= DISPPLANE_TILED; 11333 else 11334 dspcntr &= ~DISPPLANE_TILED; 11335 11336 I915_WRITE(reg, dspcntr); 11337 11338 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset); 11339 POSTING_READ(DSPSURF(intel_crtc->plane)); 11340 } 11341 11342 /* 11343 * XXX: This is the temporary way to update the plane registers until we get 11344 * around to using the usual plane update functions for MMIO flips 11345 */ 11346 static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip) 11347 { 11348 struct intel_crtc *crtc = mmio_flip->crtc; 11349 struct intel_unpin_work *work; 11350 11351 spin_lock_irq(&crtc->base.dev->event_lock); 11352 work = crtc->unpin_work; 11353 spin_unlock_irq(&crtc->base.dev->event_lock); 11354 if (work == NULL) 11355 return; 11356 11357 intel_mark_page_flip_active(work); 11358 11359 intel_pipe_update_start(crtc); 11360 11361 if (INTEL_INFO(mmio_flip->i915)->gen >= 9) 11362 skl_do_mmio_flip(crtc, mmio_flip->rotation, work); 11363 else 11364 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 11365 ilk_do_mmio_flip(crtc, work); 11366 11367 intel_pipe_update_end(crtc); 11368 } 11369 11370 static void intel_mmio_flip_work_func(struct work_struct *work) 11371 { 11372 struct intel_mmio_flip *mmio_flip = 11373 container_of(work, struct intel_mmio_flip, work); 11374 #if 0 11375 struct intel_framebuffer *intel_fb = 11376 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11377 struct drm_i915_gem_object *obj = intel_fb->obj; 11378 #endif 11379 11380 if (mmio_flip->req) { 11381 WARN_ON(__i915_wait_request(mmio_flip->req, 11382 mmio_flip->crtc->reset_counter, 11383 false, NULL, 11384 &mmio_flip->i915->rps.mmioflips)); 11385 i915_gem_request_unreference__unlocked(mmio_flip->req); 11386 } 11387 11388 /* For framebuffer backed by dmabuf, wait for fence */ 11389 #if 0 11390 if (obj->base.dma_buf) 11391 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 11392 false, false, 11393 MAX_SCHEDULE_TIMEOUT) < 0); 11394 #endif 11395 11396 intel_do_mmio_flip(mmio_flip); 11397 kfree(mmio_flip); 11398 } 11399 11400 static int intel_queue_mmio_flip(struct drm_device *dev, 11401 struct drm_crtc *crtc, 11402 struct drm_i915_gem_object *obj) 11403 { 11404 struct intel_mmio_flip *mmio_flip; 11405 11406 mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK); 11407 if (mmio_flip == NULL) 11408 return -ENOMEM; 11409 11410 mmio_flip->i915 = to_i915(dev); 11411 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11412 mmio_flip->crtc = to_intel_crtc(crtc); 11413 mmio_flip->rotation = crtc->primary->state->rotation; 11414 11415 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11416 schedule_work(&mmio_flip->work); 11417 11418 return 0; 11419 } 11420 11421 static int intel_default_queue_flip(struct drm_device *dev, 11422 struct drm_crtc *crtc, 11423 struct drm_framebuffer *fb, 11424 struct drm_i915_gem_object *obj, 11425 struct drm_i915_gem_request *req, 11426 uint32_t flags) 11427 { 11428 return -ENODEV; 11429 } 11430 11431 static bool __intel_pageflip_stall_check(struct drm_device *dev, 11432 struct drm_crtc *crtc) 11433 { 11434 struct drm_i915_private *dev_priv = dev->dev_private; 11435 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11436 struct intel_unpin_work *work = intel_crtc->unpin_work; 11437 u32 addr; 11438 11439 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11440 return true; 11441 11442 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) 11443 return false; 11444 11445 if (!work->enable_stall_check) 11446 return false; 11447 11448 if (work->flip_ready_vblank == 0) { 11449 if (work->flip_queued_req && 11450 !i915_gem_request_completed(work->flip_queued_req, true)) 11451 return false; 11452 11453 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11454 } 11455 11456 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11457 return false; 11458 11459 /* Potential stall - if we see that the flip has happened, 11460 * assume a missed interrupt. */ 11461 if (INTEL_INFO(dev)->gen >= 4) 11462 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11463 else 11464 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11465 11466 /* There is a potential issue here with a false positive after a flip 11467 * to the same address. We could address this by checking for a 11468 * non-incrementing frame counter. 11469 */ 11470 return addr == work->gtt_offset; 11471 } 11472 11473 void intel_check_page_flip(struct drm_device *dev, int pipe) 11474 { 11475 struct drm_i915_private *dev_priv = dev->dev_private; 11476 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11477 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11478 struct intel_unpin_work *work; 11479 11480 // WARN_ON(!in_interrupt()); 11481 11482 if (crtc == NULL) 11483 return; 11484 11485 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11486 work = intel_crtc->unpin_work; 11487 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11488 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11489 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11490 page_flip_completed(intel_crtc); 11491 work = NULL; 11492 } 11493 if (work != NULL && 11494 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11495 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11496 lockmgr(&dev->event_lock, LK_RELEASE); 11497 } 11498 11499 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11500 struct drm_framebuffer *fb, 11501 struct drm_pending_vblank_event *event, 11502 uint32_t page_flip_flags) 11503 { 11504 struct drm_device *dev = crtc->dev; 11505 struct drm_i915_private *dev_priv = dev->dev_private; 11506 struct drm_framebuffer *old_fb = crtc->primary->fb; 11507 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11508 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11509 struct drm_plane *primary = crtc->primary; 11510 enum i915_pipe pipe = intel_crtc->pipe; 11511 struct intel_unpin_work *work; 11512 struct intel_engine_cs *ring; 11513 bool mmio_flip; 11514 struct drm_i915_gem_request *request = NULL; 11515 int ret; 11516 11517 /* 11518 * drm_mode_page_flip_ioctl() should already catch this, but double 11519 * check to be safe. In the future we may enable pageflipping from 11520 * a disabled primary plane. 11521 */ 11522 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11523 return -EBUSY; 11524 11525 /* Can't change pixel format via MI display flips. */ 11526 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11527 return -EINVAL; 11528 11529 /* 11530 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11531 * Note that pitch changes could also affect these register. 11532 */ 11533 if (INTEL_INFO(dev)->gen > 3 && 11534 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11535 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11536 return -EINVAL; 11537 11538 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11539 goto out_hang; 11540 11541 work = kzalloc(sizeof(*work), GFP_KERNEL); 11542 if (work == NULL) 11543 return -ENOMEM; 11544 11545 work->event = event; 11546 work->crtc = crtc; 11547 work->old_fb = old_fb; 11548 INIT_WORK(&work->work, intel_unpin_work_fn); 11549 11550 ret = drm_crtc_vblank_get(crtc); 11551 if (ret) 11552 goto free_work; 11553 11554 /* We borrow the event spin lock for protecting unpin_work */ 11555 spin_lock_irq(&dev->event_lock); 11556 if (intel_crtc->unpin_work) { 11557 /* Before declaring the flip queue wedged, check if 11558 * the hardware completed the operation behind our backs. 11559 */ 11560 if (__intel_pageflip_stall_check(dev, crtc)) { 11561 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11562 page_flip_completed(intel_crtc); 11563 } else { 11564 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11565 spin_unlock_irq(&dev->event_lock); 11566 11567 drm_crtc_vblank_put(crtc); 11568 kfree(work); 11569 return -EBUSY; 11570 } 11571 } 11572 intel_crtc->unpin_work = work; 11573 spin_unlock_irq(&dev->event_lock); 11574 11575 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11576 flush_workqueue(dev_priv->wq); 11577 11578 /* Reference the objects for the scheduled work. */ 11579 drm_framebuffer_reference(work->old_fb); 11580 drm_gem_object_reference(&obj->base); 11581 11582 crtc->primary->fb = fb; 11583 update_state_fb(crtc->primary); 11584 intel_fbc_pre_update(intel_crtc); 11585 11586 work->pending_flip_obj = obj; 11587 11588 ret = i915_mutex_lock_interruptible(dev); 11589 if (ret) 11590 goto cleanup; 11591 11592 atomic_inc(&intel_crtc->unpin_work_count); 11593 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 11594 11595 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11596 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1; 11597 11598 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 11599 ring = &dev_priv->ring[BCS]; 11600 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 11601 /* vlv: DISPLAY_FLIP fails to change tiling */ 11602 ring = NULL; 11603 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11604 ring = &dev_priv->ring[BCS]; 11605 } else if (INTEL_INFO(dev)->gen >= 7) { 11606 ring = i915_gem_request_get_ring(obj->last_write_req); 11607 if (ring == NULL || ring->id != RCS) 11608 ring = &dev_priv->ring[BCS]; 11609 } else { 11610 ring = &dev_priv->ring[RCS]; 11611 } 11612 11613 mmio_flip = use_mmio_flip(ring, obj); 11614 11615 /* When using CS flips, we want to emit semaphores between rings. 11616 * However, when using mmio flips we will create a task to do the 11617 * synchronisation, so all we want here is to pin the framebuffer 11618 * into the display plane and skip any waits. 11619 */ 11620 if (!mmio_flip) { 11621 ret = i915_gem_object_sync(obj, ring, &request); 11622 if (ret) 11623 goto cleanup_pending; 11624 } 11625 11626 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11627 crtc->primary->state); 11628 if (ret) 11629 goto cleanup_pending; 11630 11631 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11632 obj, 0); 11633 work->gtt_offset += intel_crtc->dspaddr_offset; 11634 11635 if (mmio_flip) { 11636 ret = intel_queue_mmio_flip(dev, crtc, obj); 11637 if (ret) 11638 goto cleanup_unpin; 11639 11640 i915_gem_request_assign(&work->flip_queued_req, 11641 obj->last_write_req); 11642 } else { 11643 if (!request) { 11644 request = i915_gem_request_alloc(ring, NULL); 11645 if (IS_ERR(request)) { 11646 ret = PTR_ERR(request); 11647 goto cleanup_unpin; 11648 } 11649 } 11650 11651 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11652 page_flip_flags); 11653 if (ret) 11654 goto cleanup_unpin; 11655 11656 i915_gem_request_assign(&work->flip_queued_req, request); 11657 } 11658 11659 if (request) 11660 i915_add_request_no_flush(request); 11661 11662 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11663 work->enable_stall_check = true; 11664 11665 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 11666 to_intel_plane(primary)->frontbuffer_bit); 11667 mutex_unlock(&dev->struct_mutex); 11668 11669 intel_frontbuffer_flip_prepare(dev, 11670 to_intel_plane(primary)->frontbuffer_bit); 11671 11672 trace_i915_flip_request(intel_crtc->plane, obj); 11673 11674 return 0; 11675 11676 cleanup_unpin: 11677 intel_unpin_fb_obj(fb, crtc->primary->state); 11678 cleanup_pending: 11679 if (!IS_ERR_OR_NULL(request)) 11680 i915_gem_request_cancel(request); 11681 atomic_dec(&intel_crtc->unpin_work_count); 11682 mutex_unlock(&dev->struct_mutex); 11683 cleanup: 11684 crtc->primary->fb = old_fb; 11685 update_state_fb(crtc->primary); 11686 11687 drm_gem_object_unreference_unlocked(&obj->base); 11688 drm_framebuffer_unreference(work->old_fb); 11689 11690 spin_lock_irq(&dev->event_lock); 11691 intel_crtc->unpin_work = NULL; 11692 spin_unlock_irq(&dev->event_lock); 11693 11694 drm_crtc_vblank_put(crtc); 11695 free_work: 11696 kfree(work); 11697 11698 if (ret == -EIO) { 11699 struct drm_atomic_state *state; 11700 struct drm_plane_state *plane_state; 11701 11702 out_hang: 11703 state = drm_atomic_state_alloc(dev); 11704 if (!state) 11705 return -ENOMEM; 11706 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 11707 11708 retry: 11709 plane_state = drm_atomic_get_plane_state(state, primary); 11710 ret = PTR_ERR_OR_ZERO(plane_state); 11711 if (!ret) { 11712 drm_atomic_set_fb_for_plane(plane_state, fb); 11713 11714 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc); 11715 if (!ret) 11716 ret = drm_atomic_commit(state); 11717 } 11718 11719 if (ret == -EDEADLK) { 11720 drm_modeset_backoff(state->acquire_ctx); 11721 drm_atomic_state_clear(state); 11722 goto retry; 11723 } 11724 11725 if (ret) 11726 drm_atomic_state_free(state); 11727 11728 if (ret == 0 && event) { 11729 spin_lock_irq(&dev->event_lock); 11730 drm_send_vblank_event(dev, pipe, event); 11731 spin_unlock_irq(&dev->event_lock); 11732 } 11733 } 11734 return ret; 11735 } 11736 11737 11738 /** 11739 * intel_wm_need_update - Check whether watermarks need updating 11740 * @plane: drm plane 11741 * @state: new plane state 11742 * 11743 * Check current plane state versus the new one to determine whether 11744 * watermarks need to be recalculated. 11745 * 11746 * Returns true or false. 11747 */ 11748 static bool intel_wm_need_update(struct drm_plane *plane, 11749 struct drm_plane_state *state) 11750 { 11751 struct intel_plane_state *new = to_intel_plane_state(state); 11752 struct intel_plane_state *cur = to_intel_plane_state(plane->state); 11753 11754 /* Update watermarks on tiling or size changes. */ 11755 if (new->visible != cur->visible) 11756 return true; 11757 11758 if (!cur->base.fb || !new->base.fb) 11759 return false; 11760 11761 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] || 11762 cur->base.rotation != new->base.rotation || 11763 drm_rect_width(&new->src) != drm_rect_width(&cur->src) || 11764 drm_rect_height(&new->src) != drm_rect_height(&cur->src) || 11765 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || 11766 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) 11767 return true; 11768 11769 return false; 11770 } 11771 11772 static bool needs_scaling(struct intel_plane_state *state) 11773 { 11774 int src_w = drm_rect_width(&state->src) >> 16; 11775 int src_h = drm_rect_height(&state->src) >> 16; 11776 int dst_w = drm_rect_width(&state->dst); 11777 int dst_h = drm_rect_height(&state->dst); 11778 11779 return (src_w != dst_w || src_h != dst_h); 11780 } 11781 11782 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, 11783 struct drm_plane_state *plane_state) 11784 { 11785 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state); 11786 struct drm_crtc *crtc = crtc_state->crtc; 11787 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11788 struct drm_plane *plane = plane_state->plane; 11789 struct drm_device *dev = crtc->dev; 11790 struct intel_plane_state *old_plane_state = 11791 to_intel_plane_state(plane->state); 11792 int idx = intel_crtc->base.base.id, ret; 11793 bool mode_changed = needs_modeset(crtc_state); 11794 bool was_crtc_enabled = crtc->state->active; 11795 bool is_crtc_enabled = crtc_state->active; 11796 bool turn_off, turn_on, visible, was_visible; 11797 struct drm_framebuffer *fb = plane_state->fb; 11798 11799 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11800 plane->type != DRM_PLANE_TYPE_CURSOR) { 11801 ret = skl_update_scaler_plane( 11802 to_intel_crtc_state(crtc_state), 11803 to_intel_plane_state(plane_state)); 11804 if (ret) 11805 return ret; 11806 } 11807 11808 was_visible = old_plane_state->visible; 11809 visible = to_intel_plane_state(plane_state)->visible; 11810 11811 if (!was_crtc_enabled && WARN_ON(was_visible)) 11812 was_visible = false; 11813 11814 /* 11815 * Visibility is calculated as if the crtc was on, but 11816 * after scaler setup everything depends on it being off 11817 * when the crtc isn't active. 11818 */ 11819 if (!is_crtc_enabled) 11820 to_intel_plane_state(plane_state)->visible = visible = false; 11821 11822 if (!was_visible && !visible) 11823 return 0; 11824 11825 if (fb != old_plane_state->base.fb) 11826 pipe_config->fb_changed = true; 11827 11828 turn_off = was_visible && (!visible || mode_changed); 11829 turn_on = visible && (!was_visible || mode_changed); 11830 11831 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11832 plane->base.id, fb ? fb->base.id : -1); 11833 11834 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11835 plane->base.id, was_visible, visible, 11836 turn_off, turn_on, mode_changed); 11837 11838 if (turn_on || turn_off) { 11839 pipe_config->wm_changed = true; 11840 11841 /* must disable cxsr around plane enable/disable */ 11842 if (plane->type != DRM_PLANE_TYPE_CURSOR) 11843 pipe_config->disable_cxsr = true; 11844 } else if (intel_wm_need_update(plane, plane_state)) { 11845 pipe_config->wm_changed = true; 11846 } 11847 11848 if (visible || was_visible) 11849 intel_crtc->atomic.fb_bits |= 11850 to_intel_plane(plane)->frontbuffer_bit; 11851 11852 switch (plane->type) { 11853 case DRM_PLANE_TYPE_PRIMARY: 11854 intel_crtc->atomic.post_enable_primary = turn_on; 11855 intel_crtc->atomic.update_fbc = true; 11856 11857 break; 11858 case DRM_PLANE_TYPE_CURSOR: 11859 break; 11860 case DRM_PLANE_TYPE_OVERLAY: 11861 /* 11862 * WaCxSRDisabledForSpriteScaling:ivb 11863 * 11864 * cstate->update_wm was already set above, so this flag will 11865 * take effect when we commit and program watermarks. 11866 */ 11867 if (IS_IVYBRIDGE(dev) && 11868 needs_scaling(to_intel_plane_state(plane_state)) && 11869 !needs_scaling(old_plane_state)) 11870 pipe_config->disable_lp_wm = true; 11871 11872 break; 11873 } 11874 return 0; 11875 } 11876 11877 static bool encoders_cloneable(const struct intel_encoder *a, 11878 const struct intel_encoder *b) 11879 { 11880 /* masks could be asymmetric, so check both ways */ 11881 return a == b || (a->cloneable & (1 << b->type) && 11882 b->cloneable & (1 << a->type)); 11883 } 11884 11885 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11886 struct intel_crtc *crtc, 11887 struct intel_encoder *encoder) 11888 { 11889 struct intel_encoder *source_encoder; 11890 struct drm_connector *connector; 11891 struct drm_connector_state *connector_state; 11892 int i; 11893 11894 for_each_connector_in_state(state, connector, connector_state, i) { 11895 if (connector_state->crtc != &crtc->base) 11896 continue; 11897 11898 source_encoder = 11899 to_intel_encoder(connector_state->best_encoder); 11900 if (!encoders_cloneable(encoder, source_encoder)) 11901 return false; 11902 } 11903 11904 return true; 11905 } 11906 11907 static bool check_encoder_cloning(struct drm_atomic_state *state, 11908 struct intel_crtc *crtc) 11909 { 11910 struct intel_encoder *encoder; 11911 struct drm_connector *connector; 11912 struct drm_connector_state *connector_state; 11913 int i; 11914 11915 for_each_connector_in_state(state, connector, connector_state, i) { 11916 if (connector_state->crtc != &crtc->base) 11917 continue; 11918 11919 encoder = to_intel_encoder(connector_state->best_encoder); 11920 if (!check_single_encoder_cloning(state, crtc, encoder)) 11921 return false; 11922 } 11923 11924 return true; 11925 } 11926 11927 static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11928 struct drm_crtc_state *crtc_state) 11929 { 11930 struct drm_device *dev = crtc->dev; 11931 struct drm_i915_private *dev_priv = dev->dev_private; 11932 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11933 struct intel_crtc_state *pipe_config = 11934 to_intel_crtc_state(crtc_state); 11935 struct drm_atomic_state *state = crtc_state->state; 11936 int ret; 11937 bool mode_changed = needs_modeset(crtc_state); 11938 11939 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) { 11940 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11941 return -EINVAL; 11942 } 11943 11944 if (mode_changed && !crtc_state->active) 11945 pipe_config->wm_changed = true; 11946 11947 if (mode_changed && crtc_state->enable && 11948 dev_priv->display.crtc_compute_clock && 11949 !WARN_ON(pipe_config->shared_dpll != DPLL_ID_PRIVATE)) { 11950 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11951 pipe_config); 11952 if (ret) 11953 return ret; 11954 } 11955 11956 ret = 0; 11957 if (dev_priv->display.compute_pipe_wm) { 11958 ret = dev_priv->display.compute_pipe_wm(intel_crtc, state); 11959 if (ret) 11960 return ret; 11961 } 11962 11963 if (INTEL_INFO(dev)->gen >= 9) { 11964 if (mode_changed) 11965 ret = skl_update_scaler_crtc(pipe_config); 11966 11967 if (!ret) 11968 ret = intel_atomic_setup_scalers(dev, intel_crtc, 11969 pipe_config); 11970 } 11971 11972 return ret; 11973 } 11974 11975 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11976 .mode_set_base_atomic = intel_pipe_set_base_atomic, 11977 .load_lut = intel_crtc_load_lut, 11978 .atomic_begin = intel_begin_crtc_commit, 11979 .atomic_flush = intel_finish_crtc_commit, 11980 .atomic_check = intel_crtc_atomic_check, 11981 }; 11982 11983 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11984 { 11985 struct intel_connector *connector; 11986 11987 for_each_intel_connector(dev, connector) { 11988 if (connector->base.encoder) { 11989 connector->base.state->best_encoder = 11990 connector->base.encoder; 11991 connector->base.state->crtc = 11992 connector->base.encoder->crtc; 11993 } else { 11994 connector->base.state->best_encoder = NULL; 11995 connector->base.state->crtc = NULL; 11996 } 11997 } 11998 } 11999 12000 static void 12001 connected_sink_compute_bpp(struct intel_connector *connector, 12002 struct intel_crtc_state *pipe_config) 12003 { 12004 int bpp = pipe_config->pipe_bpp; 12005 12006 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 12007 connector->base.base.id, 12008 connector->base.name); 12009 12010 /* Don't use an invalid EDID bpc value */ 12011 if (connector->base.display_info.bpc && 12012 connector->base.display_info.bpc * 3 < bpp) { 12013 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 12014 bpp, connector->base.display_info.bpc*3); 12015 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 12016 } 12017 12018 /* Clamp bpp to default limit on screens without EDID 1.4 */ 12019 if (connector->base.display_info.bpc == 0) { 12020 int type = connector->base.connector_type; 12021 int clamp_bpp = 24; 12022 12023 /* Fall back to 18 bpp when DP sink capability is unknown. */ 12024 if (type == DRM_MODE_CONNECTOR_DisplayPort || 12025 type == DRM_MODE_CONNECTOR_eDP) 12026 clamp_bpp = 18; 12027 12028 if (bpp > clamp_bpp) { 12029 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", 12030 bpp, clamp_bpp); 12031 pipe_config->pipe_bpp = clamp_bpp; 12032 } 12033 } 12034 } 12035 12036 static int 12037 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 12038 struct intel_crtc_state *pipe_config) 12039 { 12040 struct drm_device *dev = crtc->base.dev; 12041 struct drm_atomic_state *state; 12042 struct drm_connector *connector; 12043 struct drm_connector_state *connector_state; 12044 int bpp, i; 12045 12046 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) 12047 bpp = 10*3; 12048 else if (INTEL_INFO(dev)->gen >= 5) 12049 bpp = 12*3; 12050 else 12051 bpp = 8*3; 12052 12053 12054 pipe_config->pipe_bpp = bpp; 12055 12056 state = pipe_config->base.state; 12057 12058 /* Clamp display bpp to EDID value */ 12059 for_each_connector_in_state(state, connector, connector_state, i) { 12060 if (connector_state->crtc != &crtc->base) 12061 continue; 12062 12063 connected_sink_compute_bpp(to_intel_connector(connector), 12064 pipe_config); 12065 } 12066 12067 return bpp; 12068 } 12069 12070 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 12071 { 12072 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 12073 "type: 0x%x flags: 0x%x\n", 12074 mode->crtc_clock, 12075 mode->crtc_hdisplay, mode->crtc_hsync_start, 12076 mode->crtc_hsync_end, mode->crtc_htotal, 12077 mode->crtc_vdisplay, mode->crtc_vsync_start, 12078 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 12079 } 12080 12081 static void intel_dump_pipe_config(struct intel_crtc *crtc, 12082 struct intel_crtc_state *pipe_config, 12083 const char *context) 12084 { 12085 struct drm_device *dev = crtc->base.dev; 12086 struct drm_plane *plane; 12087 struct intel_plane *intel_plane; 12088 struct intel_plane_state *state; 12089 struct drm_framebuffer *fb; 12090 12091 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12092 context, pipe_config, pipe_name(crtc->pipe)); 12093 12094 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 12095 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 12096 pipe_config->pipe_bpp, pipe_config->dither); 12097 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12098 pipe_config->has_pch_encoder, 12099 pipe_config->fdi_lanes, 12100 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 12101 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12102 pipe_config->fdi_m_n.tu); 12103 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12104 pipe_config->has_dp_encoder, 12105 pipe_config->lane_count, 12106 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12107 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12108 pipe_config->dp_m_n.tu); 12109 12110 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12111 pipe_config->has_dp_encoder, 12112 pipe_config->lane_count, 12113 pipe_config->dp_m2_n2.gmch_m, 12114 pipe_config->dp_m2_n2.gmch_n, 12115 pipe_config->dp_m2_n2.link_m, 12116 pipe_config->dp_m2_n2.link_n, 12117 pipe_config->dp_m2_n2.tu); 12118 12119 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 12120 pipe_config->has_audio, 12121 pipe_config->has_infoframe); 12122 12123 DRM_DEBUG_KMS("requested mode:\n"); 12124 drm_mode_debug_printmodeline(&pipe_config->base.mode); 12125 DRM_DEBUG_KMS("adjusted mode:\n"); 12126 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 12127 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 12128 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 12129 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 12130 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 12131 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 12132 crtc->num_scalers, 12133 pipe_config->scaler_state.scaler_users, 12134 pipe_config->scaler_state.scaler_id); 12135 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 12136 pipe_config->gmch_pfit.control, 12137 pipe_config->gmch_pfit.pgm_ratios, 12138 pipe_config->gmch_pfit.lvds_border_bits); 12139 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 12140 pipe_config->pch_pfit.pos, 12141 pipe_config->pch_pfit.size, 12142 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 12143 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 12144 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 12145 12146 if (IS_BROXTON(dev)) { 12147 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x," 12148 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 12149 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n", 12150 pipe_config->ddi_pll_sel, 12151 pipe_config->dpll_hw_state.ebb0, 12152 pipe_config->dpll_hw_state.ebb4, 12153 pipe_config->dpll_hw_state.pll0, 12154 pipe_config->dpll_hw_state.pll1, 12155 pipe_config->dpll_hw_state.pll2, 12156 pipe_config->dpll_hw_state.pll3, 12157 pipe_config->dpll_hw_state.pll6, 12158 pipe_config->dpll_hw_state.pll8, 12159 pipe_config->dpll_hw_state.pll9, 12160 pipe_config->dpll_hw_state.pll10, 12161 pipe_config->dpll_hw_state.pcsdw12); 12162 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 12163 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 12164 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 12165 pipe_config->ddi_pll_sel, 12166 pipe_config->dpll_hw_state.ctrl1, 12167 pipe_config->dpll_hw_state.cfgcr1, 12168 pipe_config->dpll_hw_state.cfgcr2); 12169 } else if (HAS_DDI(dev)) { 12170 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n", 12171 pipe_config->ddi_pll_sel, 12172 pipe_config->dpll_hw_state.wrpll, 12173 pipe_config->dpll_hw_state.spll); 12174 } else { 12175 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 12176 "fp0: 0x%x, fp1: 0x%x\n", 12177 pipe_config->dpll_hw_state.dpll, 12178 pipe_config->dpll_hw_state.dpll_md, 12179 pipe_config->dpll_hw_state.fp0, 12180 pipe_config->dpll_hw_state.fp1); 12181 } 12182 12183 DRM_DEBUG_KMS("planes on this crtc\n"); 12184 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 12185 intel_plane = to_intel_plane(plane); 12186 if (intel_plane->pipe != crtc->pipe) 12187 continue; 12188 12189 state = to_intel_plane_state(plane->state); 12190 fb = state->base.fb; 12191 if (!fb) { 12192 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12193 "disabled, scaler_id = %d\n", 12194 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12195 plane->base.id, intel_plane->pipe, 12196 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, 12197 drm_plane_index(plane), state->scaler_id); 12198 continue; 12199 } 12200 12201 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12202 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12203 plane->base.id, intel_plane->pipe, 12204 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12205 drm_plane_index(plane)); 12206 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12207 fb->base.id, fb->width, fb->height, fb->pixel_format); 12208 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12209 state->scaler_id, 12210 state->src.x1 >> 16, state->src.y1 >> 16, 12211 drm_rect_width(&state->src) >> 16, 12212 drm_rect_height(&state->src) >> 16, 12213 state->dst.x1, state->dst.y1, 12214 drm_rect_width(&state->dst), drm_rect_height(&state->dst)); 12215 } 12216 } 12217 12218 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 12219 { 12220 struct drm_device *dev = state->dev; 12221 struct drm_connector *connector; 12222 unsigned int used_ports = 0; 12223 12224 /* 12225 * Walk the connector list instead of the encoder 12226 * list to detect the problem on ddi platforms 12227 * where there's just one encoder per digital port. 12228 */ 12229 drm_for_each_connector(connector, dev) { 12230 struct drm_connector_state *connector_state; 12231 struct intel_encoder *encoder; 12232 12233 connector_state = drm_atomic_get_existing_connector_state(state, connector); 12234 if (!connector_state) 12235 connector_state = connector->state; 12236 12237 if (!connector_state->best_encoder) 12238 continue; 12239 12240 encoder = to_intel_encoder(connector_state->best_encoder); 12241 12242 WARN_ON(!connector_state->crtc); 12243 12244 switch (encoder->type) { 12245 unsigned int port_mask; 12246 case INTEL_OUTPUT_UNKNOWN: 12247 if (WARN_ON(!HAS_DDI(dev))) 12248 break; 12249 case INTEL_OUTPUT_DISPLAYPORT: 12250 case INTEL_OUTPUT_HDMI: 12251 case INTEL_OUTPUT_EDP: 12252 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12253 12254 /* the same port mustn't appear more than once */ 12255 if (used_ports & port_mask) 12256 return false; 12257 12258 used_ports |= port_mask; 12259 default: 12260 break; 12261 } 12262 } 12263 12264 return true; 12265 } 12266 12267 static void 12268 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 12269 { 12270 struct drm_crtc_state tmp_state; 12271 struct intel_crtc_scaler_state scaler_state; 12272 struct intel_dpll_hw_state dpll_hw_state; 12273 enum intel_dpll_id shared_dpll; 12274 uint32_t ddi_pll_sel; 12275 bool force_thru; 12276 12277 /* FIXME: before the switch to atomic started, a new pipe_config was 12278 * kzalloc'd. Code that depends on any field being zero should be 12279 * fixed, so that the crtc_state can be safely duplicated. For now, 12280 * only fields that are know to not cause problems are preserved. */ 12281 12282 tmp_state = crtc_state->base; 12283 scaler_state = crtc_state->scaler_state; 12284 shared_dpll = crtc_state->shared_dpll; 12285 dpll_hw_state = crtc_state->dpll_hw_state; 12286 ddi_pll_sel = crtc_state->ddi_pll_sel; 12287 force_thru = crtc_state->pch_pfit.force_thru; 12288 12289 memset(crtc_state, 0, sizeof *crtc_state); 12290 12291 crtc_state->base = tmp_state; 12292 crtc_state->scaler_state = scaler_state; 12293 crtc_state->shared_dpll = shared_dpll; 12294 crtc_state->dpll_hw_state = dpll_hw_state; 12295 crtc_state->ddi_pll_sel = ddi_pll_sel; 12296 crtc_state->pch_pfit.force_thru = force_thru; 12297 } 12298 12299 static int 12300 intel_modeset_pipe_config(struct drm_crtc *crtc, 12301 struct intel_crtc_state *pipe_config) 12302 { 12303 struct drm_atomic_state *state = pipe_config->base.state; 12304 struct intel_encoder *encoder; 12305 struct drm_connector *connector; 12306 struct drm_connector_state *connector_state; 12307 int base_bpp, ret = -EINVAL; 12308 int i; 12309 bool retry = true; 12310 12311 clear_intel_crtc_state(pipe_config); 12312 12313 pipe_config->cpu_transcoder = 12314 (enum transcoder) to_intel_crtc(crtc)->pipe; 12315 12316 /* 12317 * Sanitize sync polarity flags based on requested ones. If neither 12318 * positive or negative polarity is requested, treat this as meaning 12319 * negative polarity. 12320 */ 12321 if (!(pipe_config->base.adjusted_mode.flags & 12322 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 12323 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 12324 12325 if (!(pipe_config->base.adjusted_mode.flags & 12326 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 12327 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 12328 12329 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 12330 pipe_config); 12331 if (base_bpp < 0) 12332 goto fail; 12333 12334 /* 12335 * Determine the real pipe dimensions. Note that stereo modes can 12336 * increase the actual pipe size due to the frame doubling and 12337 * insertion of additional space for blanks between the frame. This 12338 * is stored in the crtc timings. We use the requested mode to do this 12339 * computation to clearly distinguish it from the adjusted mode, which 12340 * can be changed by the connectors in the below retry loop. 12341 */ 12342 drm_crtc_get_hv_timing(&pipe_config->base.mode, 12343 &pipe_config->pipe_src_w, 12344 &pipe_config->pipe_src_h); 12345 12346 encoder_retry: 12347 /* Ensure the port clock defaults are reset when retrying. */ 12348 pipe_config->port_clock = 0; 12349 pipe_config->pixel_multiplier = 1; 12350 12351 /* Fill in default crtc timings, allow encoders to overwrite them. */ 12352 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 12353 CRTC_STEREO_DOUBLE); 12354 12355 /* Pass our mode to the connectors and the CRTC to give them a chance to 12356 * adjust it according to limitations or connector properties, and also 12357 * a chance to reject the mode entirely. 12358 */ 12359 for_each_connector_in_state(state, connector, connector_state, i) { 12360 if (connector_state->crtc != crtc) 12361 continue; 12362 12363 encoder = to_intel_encoder(connector_state->best_encoder); 12364 12365 if (!(encoder->compute_config(encoder, pipe_config))) { 12366 DRM_DEBUG_KMS("Encoder config failure\n"); 12367 goto fail; 12368 } 12369 } 12370 12371 /* Set default port clock if not overwritten by the encoder. Needs to be 12372 * done afterwards in case the encoder adjusts the mode. */ 12373 if (!pipe_config->port_clock) 12374 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 12375 * pipe_config->pixel_multiplier; 12376 12377 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 12378 if (ret < 0) { 12379 DRM_DEBUG_KMS("CRTC fixup failed\n"); 12380 goto fail; 12381 } 12382 12383 if (ret == RETRY) { 12384 if (WARN(!retry, "loop in pipe configuration computation\n")) { 12385 ret = -EINVAL; 12386 goto fail; 12387 } 12388 12389 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 12390 retry = false; 12391 goto encoder_retry; 12392 } 12393 12394 /* Dithering seems to not pass-through bits correctly when it should, so 12395 * only enable it on 6bpc panels. */ 12396 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 12397 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n", 12398 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 12399 12400 fail: 12401 return ret; 12402 } 12403 12404 static void 12405 intel_modeset_update_crtc_state(struct drm_atomic_state *state) 12406 { 12407 struct drm_crtc *crtc; 12408 struct drm_crtc_state *crtc_state; 12409 int i; 12410 12411 /* Double check state. */ 12412 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12413 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state); 12414 12415 /* Update hwmode for vblank functions */ 12416 if (crtc->state->active) 12417 crtc->hwmode = crtc->state->adjusted_mode; 12418 else 12419 crtc->hwmode.crtc_clock = 0; 12420 12421 /* 12422 * Update legacy state to satisfy fbc code. This can 12423 * be removed when fbc uses the atomic state. 12424 */ 12425 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) { 12426 struct drm_plane_state *plane_state = crtc->primary->state; 12427 12428 crtc->primary->fb = plane_state->fb; 12429 crtc->x = plane_state->src_x >> 16; 12430 crtc->y = plane_state->src_y >> 16; 12431 } 12432 } 12433 } 12434 12435 static bool intel_fuzzy_clock_check(int clock1, int clock2) 12436 { 12437 int diff; 12438 12439 if (clock1 == clock2) 12440 return true; 12441 12442 if (!clock1 || !clock2) 12443 return false; 12444 12445 diff = abs(clock1 - clock2); 12446 12447 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 12448 return true; 12449 12450 return false; 12451 } 12452 12453 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 12454 list_for_each_entry((intel_crtc), \ 12455 &(dev)->mode_config.crtc_list, \ 12456 base.head) \ 12457 for_each_if (mask & (1 <<(intel_crtc)->pipe)) 12458 12459 static bool 12460 intel_compare_m_n(unsigned int m, unsigned int n, 12461 unsigned int m2, unsigned int n2, 12462 bool exact) 12463 { 12464 if (m == m2 && n == n2) 12465 return true; 12466 12467 if (exact || !m || !n || !m2 || !n2) 12468 return false; 12469 12470 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX); 12471 12472 if (n > n2) { 12473 while (n > n2) { 12474 m2 <<= 1; 12475 n2 <<= 1; 12476 } 12477 } else if (n < n2) { 12478 while (n < n2) { 12479 m <<= 1; 12480 n <<= 1; 12481 } 12482 } 12483 12484 if (n != n2) 12485 return false; 12486 12487 return intel_fuzzy_clock_check(m, m2); 12488 } 12489 12490 static bool 12491 intel_compare_link_m_n(const struct intel_link_m_n *m_n, 12492 struct intel_link_m_n *m2_n2, 12493 bool adjust) 12494 { 12495 if (m_n->tu == m2_n2->tu && 12496 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, 12497 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) && 12498 intel_compare_m_n(m_n->link_m, m_n->link_n, 12499 m2_n2->link_m, m2_n2->link_n, !adjust)) { 12500 if (adjust) 12501 *m2_n2 = *m_n; 12502 12503 return true; 12504 } 12505 12506 return false; 12507 } 12508 12509 static bool 12510 intel_pipe_config_compare(struct drm_device *dev, 12511 struct intel_crtc_state *current_config, 12512 struct intel_crtc_state *pipe_config, 12513 bool adjust) 12514 { 12515 bool ret = true; 12516 12517 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \ 12518 do { \ 12519 if (!adjust) \ 12520 DRM_ERROR(fmt, ##__VA_ARGS__); \ 12521 else \ 12522 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \ 12523 } while (0) 12524 12525 #define PIPE_CONF_CHECK_X(name) \ 12526 if (current_config->name != pipe_config->name) { \ 12527 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12528 "(expected 0x%08x, found 0x%08x)\n", \ 12529 current_config->name, \ 12530 pipe_config->name); \ 12531 ret = false; \ 12532 } 12533 12534 #define PIPE_CONF_CHECK_I(name) \ 12535 if (current_config->name != pipe_config->name) { \ 12536 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12537 "(expected %i, found %i)\n", \ 12538 current_config->name, \ 12539 pipe_config->name); \ 12540 ret = false; \ 12541 } 12542 12543 #define PIPE_CONF_CHECK_M_N(name) \ 12544 if (!intel_compare_link_m_n(¤t_config->name, \ 12545 &pipe_config->name,\ 12546 adjust)) { \ 12547 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12548 "(expected tu %i gmch %i/%i link %i/%i, " \ 12549 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12550 current_config->name.tu, \ 12551 current_config->name.gmch_m, \ 12552 current_config->name.gmch_n, \ 12553 current_config->name.link_m, \ 12554 current_config->name.link_n, \ 12555 pipe_config->name.tu, \ 12556 pipe_config->name.gmch_m, \ 12557 pipe_config->name.gmch_n, \ 12558 pipe_config->name.link_m, \ 12559 pipe_config->name.link_n); \ 12560 ret = false; \ 12561 } 12562 12563 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \ 12564 if (!intel_compare_link_m_n(¤t_config->name, \ 12565 &pipe_config->name, adjust) && \ 12566 !intel_compare_link_m_n(¤t_config->alt_name, \ 12567 &pipe_config->name, adjust)) { \ 12568 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12569 "(expected tu %i gmch %i/%i link %i/%i, " \ 12570 "or tu %i gmch %i/%i link %i/%i, " \ 12571 "found tu %i, gmch %i/%i link %i/%i)\n", \ 12572 current_config->name.tu, \ 12573 current_config->name.gmch_m, \ 12574 current_config->name.gmch_n, \ 12575 current_config->name.link_m, \ 12576 current_config->name.link_n, \ 12577 current_config->alt_name.tu, \ 12578 current_config->alt_name.gmch_m, \ 12579 current_config->alt_name.gmch_n, \ 12580 current_config->alt_name.link_m, \ 12581 current_config->alt_name.link_n, \ 12582 pipe_config->name.tu, \ 12583 pipe_config->name.gmch_m, \ 12584 pipe_config->name.gmch_n, \ 12585 pipe_config->name.link_m, \ 12586 pipe_config->name.link_n); \ 12587 ret = false; \ 12588 } 12589 12590 /* This is required for BDW+ where there is only one set of registers for 12591 * switching between high and low RR. 12592 * This macro can be used whenever a comparison has to be made between one 12593 * hw state and multiple sw state variables. 12594 */ 12595 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 12596 if ((current_config->name != pipe_config->name) && \ 12597 (current_config->alt_name != pipe_config->name)) { \ 12598 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12599 "(expected %i or %i, found %i)\n", \ 12600 current_config->name, \ 12601 current_config->alt_name, \ 12602 pipe_config->name); \ 12603 ret = false; \ 12604 } 12605 12606 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 12607 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 12608 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \ 12609 "(expected %i, found %i)\n", \ 12610 current_config->name & (mask), \ 12611 pipe_config->name & (mask)); \ 12612 ret = false; \ 12613 } 12614 12615 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 12616 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 12617 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \ 12618 "(expected %i, found %i)\n", \ 12619 current_config->name, \ 12620 pipe_config->name); \ 12621 ret = false; \ 12622 } 12623 12624 #define PIPE_CONF_QUIRK(quirk) \ 12625 ((current_config->quirks | pipe_config->quirks) & (quirk)) 12626 12627 PIPE_CONF_CHECK_I(cpu_transcoder); 12628 12629 PIPE_CONF_CHECK_I(has_pch_encoder); 12630 PIPE_CONF_CHECK_I(fdi_lanes); 12631 PIPE_CONF_CHECK_M_N(fdi_m_n); 12632 12633 PIPE_CONF_CHECK_I(has_dp_encoder); 12634 PIPE_CONF_CHECK_I(lane_count); 12635 12636 if (INTEL_INFO(dev)->gen < 8) { 12637 PIPE_CONF_CHECK_M_N(dp_m_n); 12638 12639 if (current_config->has_drrs) 12640 PIPE_CONF_CHECK_M_N(dp_m2_n2); 12641 } else 12642 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12643 12644 PIPE_CONF_CHECK_I(has_dsi_encoder); 12645 12646 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12647 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12648 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12649 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12650 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12651 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12652 12653 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12654 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12655 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12656 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12657 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12658 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12659 12660 PIPE_CONF_CHECK_I(pixel_multiplier); 12661 PIPE_CONF_CHECK_I(has_hdmi_sink); 12662 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12663 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 12664 PIPE_CONF_CHECK_I(limited_color_range); 12665 PIPE_CONF_CHECK_I(has_infoframe); 12666 12667 PIPE_CONF_CHECK_I(has_audio); 12668 12669 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12670 DRM_MODE_FLAG_INTERLACE); 12671 12672 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12673 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12674 DRM_MODE_FLAG_PHSYNC); 12675 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12676 DRM_MODE_FLAG_NHSYNC); 12677 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12678 DRM_MODE_FLAG_PVSYNC); 12679 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12680 DRM_MODE_FLAG_NVSYNC); 12681 } 12682 12683 PIPE_CONF_CHECK_X(gmch_pfit.control); 12684 /* pfit ratios are autocomputed by the hw on gen4+ */ 12685 if (INTEL_INFO(dev)->gen < 4) 12686 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 12687 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits); 12688 12689 if (!adjust) { 12690 PIPE_CONF_CHECK_I(pipe_src_w); 12691 PIPE_CONF_CHECK_I(pipe_src_h); 12692 12693 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12694 if (current_config->pch_pfit.enabled) { 12695 PIPE_CONF_CHECK_X(pch_pfit.pos); 12696 PIPE_CONF_CHECK_X(pch_pfit.size); 12697 } 12698 12699 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12700 } 12701 12702 /* BDW+ don't expose a synchronous way to read the state */ 12703 if (IS_HASWELL(dev)) 12704 PIPE_CONF_CHECK_I(ips_enabled); 12705 12706 PIPE_CONF_CHECK_I(double_wide); 12707 12708 PIPE_CONF_CHECK_X(ddi_pll_sel); 12709 12710 PIPE_CONF_CHECK_I(shared_dpll); 12711 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12712 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12713 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12714 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12715 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12716 PIPE_CONF_CHECK_X(dpll_hw_state.spll); 12717 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12718 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12719 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12720 12721 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12722 PIPE_CONF_CHECK_I(pipe_bpp); 12723 12724 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12725 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12726 12727 #undef PIPE_CONF_CHECK_X 12728 #undef PIPE_CONF_CHECK_I 12729 #undef PIPE_CONF_CHECK_I_ALT 12730 #undef PIPE_CONF_CHECK_FLAGS 12731 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12732 #undef PIPE_CONF_QUIRK 12733 #undef INTEL_ERR_OR_DBG_KMS 12734 12735 return ret; 12736 } 12737 12738 static void check_wm_state(struct drm_device *dev) 12739 { 12740 struct drm_i915_private *dev_priv = dev->dev_private; 12741 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12742 struct intel_crtc *intel_crtc; 12743 int plane; 12744 12745 if (INTEL_INFO(dev)->gen < 9) 12746 return; 12747 12748 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12749 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12750 12751 for_each_intel_crtc(dev, intel_crtc) { 12752 struct skl_ddb_entry *hw_entry, *sw_entry; 12753 const enum i915_pipe pipe = intel_crtc->pipe; 12754 12755 if (!intel_crtc->active) 12756 continue; 12757 12758 /* planes */ 12759 for_each_plane(dev_priv, pipe, plane) { 12760 hw_entry = &hw_ddb.plane[pipe][plane]; 12761 sw_entry = &sw_ddb->plane[pipe][plane]; 12762 12763 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12764 continue; 12765 12766 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 12767 "(expected (%u,%u), found (%u,%u))\n", 12768 pipe_name(pipe), plane + 1, 12769 sw_entry->start, sw_entry->end, 12770 hw_entry->start, hw_entry->end); 12771 } 12772 12773 /* cursor */ 12774 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 12775 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 12776 12777 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12778 continue; 12779 12780 DRM_ERROR("mismatch in DDB state pipe %c cursor " 12781 "(expected (%u,%u), found (%u,%u))\n", 12782 pipe_name(pipe), 12783 sw_entry->start, sw_entry->end, 12784 hw_entry->start, hw_entry->end); 12785 } 12786 } 12787 12788 static void 12789 check_connector_state(struct drm_device *dev, 12790 struct drm_atomic_state *old_state) 12791 { 12792 struct drm_connector_state *old_conn_state; 12793 struct drm_connector *connector; 12794 int i; 12795 12796 for_each_connector_in_state(old_state, connector, old_conn_state, i) { 12797 struct drm_encoder *encoder = connector->encoder; 12798 struct drm_connector_state *state = connector->state; 12799 12800 /* This also checks the encoder/connector hw state with the 12801 * ->get_hw_state callbacks. */ 12802 intel_connector_check_state(to_intel_connector(connector)); 12803 12804 I915_STATE_WARN(state->best_encoder != encoder, 12805 "connector's atomic encoder doesn't match legacy encoder\n"); 12806 } 12807 } 12808 12809 static void 12810 check_encoder_state(struct drm_device *dev) 12811 { 12812 struct intel_encoder *encoder; 12813 struct intel_connector *connector; 12814 12815 for_each_intel_encoder(dev, encoder) { 12816 bool enabled = false; 12817 enum i915_pipe pipe; 12818 12819 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 12820 encoder->base.base.id, 12821 encoder->base.name); 12822 12823 for_each_intel_connector(dev, connector) { 12824 if (connector->base.state->best_encoder != &encoder->base) 12825 continue; 12826 enabled = true; 12827 12828 I915_STATE_WARN(connector->base.state->crtc != 12829 encoder->base.crtc, 12830 "connector's crtc doesn't match encoder crtc\n"); 12831 } 12832 12833 I915_STATE_WARN(!!encoder->base.crtc != enabled, 12834 "encoder's enabled state mismatch " 12835 "(expected %i, found %i)\n", 12836 !!encoder->base.crtc, enabled); 12837 12838 if (!encoder->base.crtc) { 12839 bool active; 12840 12841 active = encoder->get_hw_state(encoder, &pipe); 12842 I915_STATE_WARN(active, 12843 "encoder detached but still enabled on pipe %c.\n", 12844 pipe_name(pipe)); 12845 } 12846 } 12847 } 12848 12849 static void 12850 check_crtc_state(struct drm_device *dev, struct drm_atomic_state *old_state) 12851 { 12852 struct drm_i915_private *dev_priv = dev->dev_private; 12853 struct intel_encoder *encoder; 12854 struct drm_crtc_state *old_crtc_state; 12855 struct drm_crtc *crtc; 12856 int i; 12857 12858 for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { 12859 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12860 struct intel_crtc_state *pipe_config, *sw_config; 12861 bool active; 12862 12863 if (!needs_modeset(crtc->state) && 12864 !to_intel_crtc_state(crtc->state)->update_pipe) 12865 continue; 12866 12867 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state); 12868 pipe_config = to_intel_crtc_state(old_crtc_state); 12869 memset(pipe_config, 0, sizeof(*pipe_config)); 12870 pipe_config->base.crtc = crtc; 12871 pipe_config->base.state = old_state; 12872 12873 DRM_DEBUG_KMS("[CRTC:%d]\n", 12874 crtc->base.id); 12875 12876 active = dev_priv->display.get_pipe_config(intel_crtc, 12877 pipe_config); 12878 12879 /* hw state is inconsistent with the pipe quirk */ 12880 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12881 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12882 active = crtc->state->active; 12883 12884 I915_STATE_WARN(crtc->state->active != active, 12885 "crtc active state doesn't match with hw state " 12886 "(expected %i, found %i)\n", crtc->state->active, active); 12887 12888 I915_STATE_WARN(intel_crtc->active != crtc->state->active, 12889 "transitional active state does not match atomic hw state " 12890 "(expected %i, found %i)\n", crtc->state->active, intel_crtc->active); 12891 12892 for_each_encoder_on_crtc(dev, crtc, encoder) { 12893 enum i915_pipe pipe; 12894 12895 active = encoder->get_hw_state(encoder, &pipe); 12896 I915_STATE_WARN(active != crtc->state->active, 12897 "[ENCODER:%i] active %i with crtc active %i\n", 12898 encoder->base.base.id, active, crtc->state->active); 12899 12900 I915_STATE_WARN(active && intel_crtc->pipe != pipe, 12901 "Encoder connected to wrong pipe %c\n", 12902 pipe_name(pipe)); 12903 12904 if (active) 12905 encoder->get_config(encoder, pipe_config); 12906 } 12907 12908 if (!crtc->state->active) 12909 continue; 12910 12911 sw_config = to_intel_crtc_state(crtc->state); 12912 if (!intel_pipe_config_compare(dev, sw_config, 12913 pipe_config, false)) { 12914 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12915 intel_dump_pipe_config(intel_crtc, pipe_config, 12916 "[hw state]"); 12917 intel_dump_pipe_config(intel_crtc, sw_config, 12918 "[sw state]"); 12919 } 12920 } 12921 } 12922 12923 static void 12924 check_shared_dpll_state(struct drm_device *dev) 12925 { 12926 struct drm_i915_private *dev_priv = dev->dev_private; 12927 struct intel_crtc *crtc; 12928 struct intel_dpll_hw_state dpll_hw_state; 12929 int i; 12930 12931 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12932 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12933 int enabled_crtcs = 0, active_crtcs = 0; 12934 bool active; 12935 12936 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12937 12938 DRM_DEBUG_KMS("%s\n", pll->name); 12939 12940 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 12941 12942 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 12943 "more active pll users than references: %i vs %i\n", 12944 pll->active, hweight32(pll->config.crtc_mask)); 12945 I915_STATE_WARN(pll->active && !pll->on, 12946 "pll in active use but not on in sw tracking\n"); 12947 I915_STATE_WARN(pll->on && !pll->active, 12948 "pll in on but not on in use in sw tracking\n"); 12949 I915_STATE_WARN(pll->on != active, 12950 "pll on state mismatch (expected %i, found %i)\n", 12951 pll->on, active); 12952 12953 for_each_intel_crtc(dev, crtc) { 12954 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) 12955 enabled_crtcs++; 12956 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 12957 active_crtcs++; 12958 } 12959 I915_STATE_WARN(pll->active != active_crtcs, 12960 "pll active crtcs mismatch (expected %i, found %i)\n", 12961 pll->active, active_crtcs); 12962 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 12963 "pll enabled crtcs mismatch (expected %i, found %i)\n", 12964 hweight32(pll->config.crtc_mask), enabled_crtcs); 12965 12966 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 12967 sizeof(dpll_hw_state)), 12968 "pll hw state mismatch\n"); 12969 } 12970 } 12971 12972 static void 12973 intel_modeset_check_state(struct drm_device *dev, 12974 struct drm_atomic_state *old_state) 12975 { 12976 check_wm_state(dev); 12977 check_connector_state(dev, old_state); 12978 check_encoder_state(dev); 12979 check_crtc_state(dev, old_state); 12980 check_shared_dpll_state(dev); 12981 } 12982 12983 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 12984 int dotclock) 12985 { 12986 /* 12987 * FDI already provided one idea for the dotclock. 12988 * Yell if the encoder disagrees. 12989 */ 12990 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 12991 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12992 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 12993 } 12994 12995 static void update_scanline_offset(struct intel_crtc *crtc) 12996 { 12997 struct drm_device *dev = crtc->base.dev; 12998 12999 /* 13000 * The scanline counter increments at the leading edge of hsync. 13001 * 13002 * On most platforms it starts counting from vtotal-1 on the 13003 * first active line. That means the scanline counter value is 13004 * always one less than what we would expect. Ie. just after 13005 * start of vblank, which also occurs at start of hsync (on the 13006 * last active line), the scanline counter will read vblank_start-1. 13007 * 13008 * On gen2 the scanline counter starts counting from 1 instead 13009 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 13010 * to keep the value positive), instead of adding one. 13011 * 13012 * On HSW+ the behaviour of the scanline counter depends on the output 13013 * type. For DP ports it behaves like most other platforms, but on HDMI 13014 * there's an extra 1 line difference. So we need to add two instead of 13015 * one to the value. 13016 */ 13017 if (IS_GEN2(dev)) { 13018 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 13019 int vtotal; 13020 13021 vtotal = adjusted_mode->crtc_vtotal; 13022 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 13023 vtotal /= 2; 13024 13025 crtc->scanline_offset = vtotal - 1; 13026 } else if (HAS_DDI(dev) && 13027 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 13028 crtc->scanline_offset = 2; 13029 } else 13030 crtc->scanline_offset = 1; 13031 } 13032 13033 static void intel_modeset_clear_plls(struct drm_atomic_state *state) 13034 { 13035 struct drm_device *dev = state->dev; 13036 struct drm_i915_private *dev_priv = to_i915(dev); 13037 struct intel_shared_dpll_config *shared_dpll = NULL; 13038 struct drm_crtc *crtc; 13039 struct drm_crtc_state *crtc_state; 13040 int i; 13041 13042 if (!dev_priv->display.crtc_compute_clock) 13043 return; 13044 13045 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13046 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13047 int old_dpll = to_intel_crtc_state(crtc->state)->shared_dpll; 13048 13049 if (!needs_modeset(crtc_state)) 13050 continue; 13051 13052 to_intel_crtc_state(crtc_state)->shared_dpll = DPLL_ID_PRIVATE; 13053 13054 if (old_dpll == DPLL_ID_PRIVATE) 13055 continue; 13056 13057 if (!shared_dpll) 13058 shared_dpll = intel_atomic_get_shared_dpll_state(state); 13059 13060 shared_dpll[old_dpll].crtc_mask &= ~(1 << intel_crtc->pipe); 13061 } 13062 } 13063 13064 /* 13065 * This implements the workaround described in the "notes" section of the mode 13066 * set sequence documentation. When going from no pipes or single pipe to 13067 * multiple pipes, and planes are enabled after the pipe, we need to wait at 13068 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 13069 */ 13070 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state) 13071 { 13072 struct drm_crtc_state *crtc_state; 13073 struct intel_crtc *intel_crtc; 13074 struct drm_crtc *crtc; 13075 struct intel_crtc_state *first_crtc_state = NULL; 13076 struct intel_crtc_state *other_crtc_state = NULL; 13077 enum i915_pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE; 13078 int i; 13079 13080 /* look at all crtc's that are going to be enabled in during modeset */ 13081 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13082 intel_crtc = to_intel_crtc(crtc); 13083 13084 if (!crtc_state->active || !needs_modeset(crtc_state)) 13085 continue; 13086 13087 if (first_crtc_state) { 13088 other_crtc_state = to_intel_crtc_state(crtc_state); 13089 break; 13090 } else { 13091 first_crtc_state = to_intel_crtc_state(crtc_state); 13092 first_pipe = intel_crtc->pipe; 13093 } 13094 } 13095 13096 /* No workaround needed? */ 13097 if (!first_crtc_state) 13098 return 0; 13099 13100 /* w/a possibly needed, check how many crtc's are already enabled. */ 13101 for_each_intel_crtc(state->dev, intel_crtc) { 13102 struct intel_crtc_state *pipe_config; 13103 13104 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 13105 if (IS_ERR(pipe_config)) 13106 return PTR_ERR(pipe_config); 13107 13108 pipe_config->hsw_workaround_pipe = INVALID_PIPE; 13109 13110 if (!pipe_config->base.active || 13111 needs_modeset(&pipe_config->base)) 13112 continue; 13113 13114 /* 2 or more enabled crtcs means no need for w/a */ 13115 if (enabled_pipe != INVALID_PIPE) 13116 return 0; 13117 13118 enabled_pipe = intel_crtc->pipe; 13119 } 13120 13121 if (enabled_pipe != INVALID_PIPE) 13122 first_crtc_state->hsw_workaround_pipe = enabled_pipe; 13123 else if (other_crtc_state) 13124 other_crtc_state->hsw_workaround_pipe = first_pipe; 13125 13126 return 0; 13127 } 13128 13129 static int intel_modeset_all_pipes(struct drm_atomic_state *state) 13130 { 13131 struct drm_crtc *crtc; 13132 struct drm_crtc_state *crtc_state; 13133 int ret = 0; 13134 13135 /* add all active pipes to the state */ 13136 for_each_crtc(state->dev, crtc) { 13137 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13138 if (IS_ERR(crtc_state)) 13139 return PTR_ERR(crtc_state); 13140 13141 if (!crtc_state->active || needs_modeset(crtc_state)) 13142 continue; 13143 13144 crtc_state->mode_changed = true; 13145 13146 ret = drm_atomic_add_affected_connectors(state, crtc); 13147 if (ret) 13148 break; 13149 13150 ret = drm_atomic_add_affected_planes(state, crtc); 13151 if (ret) 13152 break; 13153 } 13154 13155 return ret; 13156 } 13157 13158 static int intel_modeset_checks(struct drm_atomic_state *state) 13159 { 13160 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13161 struct drm_i915_private *dev_priv = state->dev->dev_private; 13162 struct drm_crtc *crtc; 13163 struct drm_crtc_state *crtc_state; 13164 int ret = 0, i; 13165 13166 if (!check_digital_port_conflicts(state)) { 13167 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 13168 return -EINVAL; 13169 } 13170 13171 intel_state->modeset = true; 13172 intel_state->active_crtcs = dev_priv->active_crtcs; 13173 13174 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13175 if (crtc_state->active) 13176 intel_state->active_crtcs |= 1 << i; 13177 else 13178 intel_state->active_crtcs &= ~(1 << i); 13179 } 13180 13181 /* 13182 * See if the config requires any additional preparation, e.g. 13183 * to adjust global state with pipes off. We need to do this 13184 * here so we can get the modeset_pipe updated config for the new 13185 * mode set on this crtc. For other crtcs we need to use the 13186 * adjusted_mode bits in the crtc directly. 13187 */ 13188 if (dev_priv->display.modeset_calc_cdclk) { 13189 ret = dev_priv->display.modeset_calc_cdclk(state); 13190 13191 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) 13192 ret = intel_modeset_all_pipes(state); 13193 13194 if (ret < 0) 13195 return ret; 13196 13197 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n", 13198 intel_state->cdclk, intel_state->dev_cdclk); 13199 } else 13200 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq; 13201 13202 intel_modeset_clear_plls(state); 13203 13204 if (IS_HASWELL(dev_priv)) 13205 return haswell_mode_set_planes_workaround(state); 13206 13207 return 0; 13208 } 13209 13210 /* 13211 * Handle calculation of various watermark data at the end of the atomic check 13212 * phase. The code here should be run after the per-crtc and per-plane 'check' 13213 * handlers to ensure that all derived state has been updated. 13214 */ 13215 static void calc_watermark_data(struct drm_atomic_state *state) 13216 { 13217 struct drm_device *dev = state->dev; 13218 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13219 struct drm_crtc *crtc; 13220 struct drm_crtc_state *cstate; 13221 struct drm_plane *plane; 13222 struct drm_plane_state *pstate; 13223 13224 /* 13225 * Calculate watermark configuration details now that derived 13226 * plane/crtc state is all properly updated. 13227 */ 13228 drm_for_each_crtc(crtc, dev) { 13229 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?: 13230 crtc->state; 13231 13232 if (cstate->active) 13233 intel_state->wm_config.num_pipes_active++; 13234 } 13235 drm_for_each_legacy_plane(plane, dev) { 13236 pstate = drm_atomic_get_existing_plane_state(state, plane) ?: 13237 plane->state; 13238 13239 if (!to_intel_plane_state(pstate)->visible) 13240 continue; 13241 13242 intel_state->wm_config.sprites_enabled = true; 13243 if (pstate->crtc_w != pstate->src_w >> 16 || 13244 pstate->crtc_h != pstate->src_h >> 16) 13245 intel_state->wm_config.sprites_scaled = true; 13246 } 13247 } 13248 13249 /** 13250 * intel_atomic_check - validate state object 13251 * @dev: drm device 13252 * @state: state to validate 13253 */ 13254 static int intel_atomic_check(struct drm_device *dev, 13255 struct drm_atomic_state *state) 13256 { 13257 struct drm_i915_private *dev_priv = to_i915(dev); 13258 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13259 struct drm_crtc *crtc; 13260 struct drm_crtc_state *crtc_state; 13261 int ret, i; 13262 bool any_ms = false; 13263 13264 ret = drm_atomic_helper_check_modeset(dev, state); 13265 if (ret) 13266 return ret; 13267 13268 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13269 struct intel_crtc_state *pipe_config = 13270 to_intel_crtc_state(crtc_state); 13271 13272 memset(&to_intel_crtc(crtc)->atomic, 0, 13273 sizeof(struct intel_crtc_atomic_commit)); 13274 13275 /* Catch I915_MODE_FLAG_INHERITED */ 13276 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13277 crtc_state->mode_changed = true; 13278 13279 if (!crtc_state->enable) { 13280 if (needs_modeset(crtc_state)) 13281 any_ms = true; 13282 continue; 13283 } 13284 13285 if (!needs_modeset(crtc_state)) 13286 continue; 13287 13288 /* FIXME: For only active_changed we shouldn't need to do any 13289 * state recomputation at all. */ 13290 13291 ret = drm_atomic_add_affected_connectors(state, crtc); 13292 if (ret) 13293 return ret; 13294 13295 ret = intel_modeset_pipe_config(crtc, pipe_config); 13296 if (ret) 13297 return ret; 13298 13299 if (i915.fastboot && 13300 intel_pipe_config_compare(dev, 13301 to_intel_crtc_state(crtc->state), 13302 pipe_config, true)) { 13303 crtc_state->mode_changed = false; 13304 to_intel_crtc_state(crtc_state)->update_pipe = true; 13305 } 13306 13307 if (needs_modeset(crtc_state)) { 13308 any_ms = true; 13309 13310 ret = drm_atomic_add_affected_planes(state, crtc); 13311 if (ret) 13312 return ret; 13313 } 13314 13315 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13316 needs_modeset(crtc_state) ? 13317 "[modeset]" : "[fastset]"); 13318 } 13319 13320 if (any_ms) { 13321 ret = intel_modeset_checks(state); 13322 13323 if (ret) 13324 return ret; 13325 } else 13326 intel_state->cdclk = dev_priv->cdclk_freq; 13327 13328 ret = drm_atomic_helper_check_planes(dev, state); 13329 if (ret) 13330 return ret; 13331 13332 intel_fbc_choose_crtc(dev_priv, state); 13333 calc_watermark_data(state); 13334 13335 return 0; 13336 } 13337 13338 static int intel_atomic_prepare_commit(struct drm_device *dev, 13339 struct drm_atomic_state *state, 13340 bool async) 13341 { 13342 struct drm_i915_private *dev_priv = dev->dev_private; 13343 struct drm_plane_state *plane_state; 13344 struct drm_crtc_state *crtc_state; 13345 struct drm_plane *plane; 13346 struct drm_crtc *crtc; 13347 int i, ret; 13348 13349 if (async) { 13350 DRM_DEBUG_KMS("i915 does not yet support async commit\n"); 13351 return -EINVAL; 13352 } 13353 13354 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13355 if (state->legacy_cursor_update) 13356 continue; 13357 13358 ret = intel_crtc_wait_for_pending_flips(crtc); 13359 if (ret) 13360 return ret; 13361 13362 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2) 13363 flush_workqueue(dev_priv->wq); 13364 } 13365 13366 ret = mutex_lock_interruptible(&dev->struct_mutex); 13367 if (ret) 13368 return ret; 13369 13370 ret = drm_atomic_helper_prepare_planes(dev, state); 13371 if (!ret && !async && !i915_reset_in_progress(&dev_priv->gpu_error)) { 13372 u32 reset_counter; 13373 13374 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 13375 mutex_unlock(&dev->struct_mutex); 13376 13377 for_each_plane_in_state(state, plane, plane_state, i) { 13378 struct intel_plane_state *intel_plane_state = 13379 to_intel_plane_state(plane_state); 13380 13381 if (!intel_plane_state->wait_req) 13382 continue; 13383 13384 ret = __i915_wait_request(intel_plane_state->wait_req, 13385 reset_counter, true, 13386 NULL, NULL); 13387 13388 /* Swallow -EIO errors to allow updates during hw lockup. */ 13389 if (ret == -EIO) 13390 ret = 0; 13391 13392 if (ret) 13393 break; 13394 } 13395 13396 if (!ret) 13397 return 0; 13398 13399 mutex_lock(&dev->struct_mutex); 13400 drm_atomic_helper_cleanup_planes(dev, state); 13401 } 13402 13403 mutex_unlock(&dev->struct_mutex); 13404 return ret; 13405 } 13406 13407 static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13408 struct drm_i915_private *dev_priv, 13409 unsigned crtc_mask) 13410 { 13411 unsigned last_vblank_count[I915_MAX_PIPES]; 13412 enum i915_pipe pipe; 13413 int ret; 13414 13415 if (!crtc_mask) 13416 return; 13417 13418 for_each_pipe(dev_priv, pipe) { 13419 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13420 13421 if (!((1 << pipe) & crtc_mask)) 13422 continue; 13423 13424 ret = drm_crtc_vblank_get(crtc); 13425 if (WARN_ON(ret != 0)) { 13426 crtc_mask &= ~(1 << pipe); 13427 continue; 13428 } 13429 13430 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc); 13431 } 13432 13433 for_each_pipe(dev_priv, pipe) { 13434 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 13435 long lret; 13436 13437 if (!((1 << pipe) & crtc_mask)) 13438 continue; 13439 13440 lret = wait_event_timeout(dev->vblank[pipe].queue, 13441 last_vblank_count[pipe] != 13442 drm_crtc_vblank_count(crtc), 13443 msecs_to_jiffies(50)); 13444 13445 WARN_ON(!lret); 13446 13447 drm_crtc_vblank_put(crtc); 13448 } 13449 } 13450 13451 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state) 13452 { 13453 /* fb updated, need to unpin old fb */ 13454 if (crtc_state->fb_changed) 13455 return true; 13456 13457 /* wm changes, need vblank before final wm's */ 13458 if (crtc_state->wm_changed) 13459 return true; 13460 13461 /* 13462 * cxsr is re-enabled after vblank. 13463 * This is already handled by crtc_state->wm_changed, 13464 * but added for clarity. 13465 */ 13466 if (crtc_state->disable_cxsr) 13467 return true; 13468 13469 return false; 13470 } 13471 13472 /** 13473 * intel_atomic_commit - commit validated state object 13474 * @dev: DRM device 13475 * @state: the top-level driver state object 13476 * @async: asynchronous commit 13477 * 13478 * This function commits a top-level state object that has been validated 13479 * with drm_atomic_helper_check(). 13480 * 13481 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment 13482 * we can only handle plane-related operations and do not yet support 13483 * asynchronous commit. 13484 * 13485 * RETURNS 13486 * Zero for success or -errno. 13487 */ 13488 static int intel_atomic_commit(struct drm_device *dev, 13489 struct drm_atomic_state *state, 13490 bool async) 13491 { 13492 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13493 struct drm_i915_private *dev_priv = dev->dev_private; 13494 struct drm_crtc_state *crtc_state; 13495 struct drm_crtc *crtc; 13496 int ret = 0, i; 13497 bool hw_check = intel_state->modeset; 13498 unsigned long put_domains[I915_MAX_PIPES] = {}; 13499 unsigned crtc_vblank_mask = 0; 13500 13501 ret = intel_atomic_prepare_commit(dev, state, async); 13502 if (ret) { 13503 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13504 return ret; 13505 } 13506 13507 drm_atomic_helper_swap_state(dev, state); 13508 dev_priv->wm.config = to_intel_atomic_state(state)->wm_config; 13509 13510 if (intel_state->modeset) { 13511 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13512 sizeof(intel_state->min_pixclk)); 13513 dev_priv->active_crtcs = intel_state->active_crtcs; 13514 dev_priv->atomic_cdclk_freq = intel_state->cdclk; 13515 13516 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); 13517 } 13518 13519 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13520 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13521 13522 if (needs_modeset(crtc->state) || 13523 to_intel_crtc_state(crtc->state)->update_pipe) { 13524 hw_check = true; 13525 13526 put_domains[to_intel_crtc(crtc)->pipe] = 13527 modeset_get_crtc_power_domains(crtc, 13528 to_intel_crtc_state(crtc->state)); 13529 } 13530 13531 if (!needs_modeset(crtc->state)) 13532 continue; 13533 13534 intel_pre_plane_update(to_intel_crtc_state(crtc_state)); 13535 13536 if (crtc_state->active) { 13537 intel_crtc_disable_planes(crtc, crtc_state->plane_mask); 13538 dev_priv->display.crtc_disable(crtc); 13539 intel_crtc->active = false; 13540 intel_fbc_disable(intel_crtc); 13541 intel_disable_shared_dpll(intel_crtc); 13542 13543 /* 13544 * Underruns don't always raise 13545 * interrupts, so check manually. 13546 */ 13547 intel_check_cpu_fifo_underruns(dev_priv); 13548 intel_check_pch_fifo_underruns(dev_priv); 13549 13550 if (!crtc->state->active) 13551 intel_update_watermarks(crtc); 13552 } 13553 } 13554 13555 /* Only after disabling all output pipelines that will be changed can we 13556 * update the the output configuration. */ 13557 intel_modeset_update_crtc_state(state); 13558 13559 if (intel_state->modeset) { 13560 intel_shared_dpll_commit(state); 13561 13562 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13563 13564 if (dev_priv->display.modeset_commit_cdclk && 13565 intel_state->dev_cdclk != dev_priv->cdclk_freq) 13566 dev_priv->display.modeset_commit_cdclk(state); 13567 } 13568 13569 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 13570 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13572 bool modeset = needs_modeset(crtc->state); 13573 struct intel_crtc_state *pipe_config = 13574 to_intel_crtc_state(crtc->state); 13575 bool update_pipe = !modeset && pipe_config->update_pipe; 13576 13577 if (modeset && crtc->state->active) { 13578 update_scanline_offset(to_intel_crtc(crtc)); 13579 dev_priv->display.crtc_enable(crtc); 13580 } 13581 13582 if (!modeset) 13583 intel_pre_plane_update(to_intel_crtc_state(crtc_state)); 13584 13585 if (crtc->state->active && intel_crtc->atomic.update_fbc) 13586 intel_fbc_enable(intel_crtc); 13587 13588 if (crtc->state->active && 13589 (crtc->state->planes_changed || update_pipe)) 13590 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 13591 13592 if (pipe_config->base.active && needs_vblank_wait(pipe_config)) 13593 crtc_vblank_mask |= 1 << i; 13594 } 13595 13596 /* FIXME: add subpixel order */ 13597 13598 if (!state->legacy_cursor_update) 13599 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13600 13601 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13602 intel_post_plane_update(to_intel_crtc(crtc)); 13603 13604 if (put_domains[i]) 13605 modeset_put_power_domains(dev_priv, put_domains[i]); 13606 } 13607 13608 if (intel_state->modeset) 13609 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13610 13611 mutex_lock(&dev->struct_mutex); 13612 drm_atomic_helper_cleanup_planes(dev, state); 13613 mutex_unlock(&dev->struct_mutex); 13614 13615 if (hw_check) 13616 intel_modeset_check_state(dev, state); 13617 13618 drm_atomic_state_free(state); 13619 13620 /* As one of the primary mmio accessors, KMS has a high likelihood 13621 * of triggering bugs in unclaimed access. After we finish 13622 * modesetting, see if an error has been flagged, and if so 13623 * enable debugging for the next modeset - and hope we catch 13624 * the culprit. 13625 * 13626 * XXX note that we assume display power is on at this point. 13627 * This might hold true now but we need to add pm helper to check 13628 * unclaimed only when the hardware is on, as atomic commits 13629 * can happen also when the device is completely off. 13630 */ 13631 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 13632 13633 return 0; 13634 } 13635 13636 void intel_crtc_restore_mode(struct drm_crtc *crtc) 13637 { 13638 struct drm_device *dev = crtc->dev; 13639 struct drm_atomic_state *state; 13640 struct drm_crtc_state *crtc_state; 13641 int ret; 13642 13643 state = drm_atomic_state_alloc(dev); 13644 if (!state) { 13645 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13646 crtc->base.id); 13647 return; 13648 } 13649 13650 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc); 13651 13652 retry: 13653 crtc_state = drm_atomic_get_crtc_state(state, crtc); 13654 ret = PTR_ERR_OR_ZERO(crtc_state); 13655 if (!ret) { 13656 if (!crtc_state->active) 13657 goto out; 13658 13659 crtc_state->mode_changed = true; 13660 ret = drm_atomic_commit(state); 13661 } 13662 13663 if (ret == -EDEADLK) { 13664 drm_atomic_state_clear(state); 13665 drm_modeset_backoff(state->acquire_ctx); 13666 goto retry; 13667 } 13668 13669 if (ret) 13670 out: 13671 drm_atomic_state_free(state); 13672 } 13673 13674 #undef for_each_intel_crtc_masked 13675 13676 static const struct drm_crtc_funcs intel_crtc_funcs = { 13677 .gamma_set = intel_crtc_gamma_set, 13678 .set_config = drm_atomic_helper_set_config, 13679 .destroy = intel_crtc_destroy, 13680 .page_flip = intel_crtc_page_flip, 13681 .atomic_duplicate_state = intel_crtc_duplicate_state, 13682 .atomic_destroy_state = intel_crtc_destroy_state, 13683 }; 13684 13685 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 13686 struct intel_shared_dpll *pll, 13687 struct intel_dpll_hw_state *hw_state) 13688 { 13689 uint32_t val; 13690 13691 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS)) 13692 return false; 13693 13694 val = I915_READ(PCH_DPLL(pll->id)); 13695 hw_state->dpll = val; 13696 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 13697 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 13698 13699 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 13700 13701 return val & DPLL_VCO_ENABLE; 13702 } 13703 13704 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 13705 struct intel_shared_dpll *pll) 13706 { 13707 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 13708 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 13709 } 13710 13711 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 13712 struct intel_shared_dpll *pll) 13713 { 13714 /* PCH refclock must be enabled first */ 13715 ibx_assert_pch_refclk_enabled(dev_priv); 13716 13717 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13718 13719 /* Wait for the clocks to stabilize. */ 13720 POSTING_READ(PCH_DPLL(pll->id)); 13721 udelay(150); 13722 13723 /* The pixel multiplier can only be updated once the 13724 * DPLL is enabled and the clocks are stable. 13725 * 13726 * So write it again. 13727 */ 13728 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 13729 POSTING_READ(PCH_DPLL(pll->id)); 13730 udelay(200); 13731 } 13732 13733 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 13734 struct intel_shared_dpll *pll) 13735 { 13736 struct drm_device *dev = dev_priv->dev; 13737 struct intel_crtc *crtc; 13738 13739 /* Make sure no transcoder isn't still depending on us. */ 13740 for_each_intel_crtc(dev, crtc) { 13741 if (intel_crtc_to_shared_dpll(crtc) == pll) 13742 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 13743 } 13744 13745 I915_WRITE(PCH_DPLL(pll->id), 0); 13746 POSTING_READ(PCH_DPLL(pll->id)); 13747 udelay(200); 13748 } 13749 13750 static char *ibx_pch_dpll_names[] = { 13751 "PCH DPLL A", 13752 "PCH DPLL B", 13753 }; 13754 13755 static void ibx_pch_dpll_init(struct drm_device *dev) 13756 { 13757 struct drm_i915_private *dev_priv = dev->dev_private; 13758 int i; 13759 13760 dev_priv->num_shared_dpll = 2; 13761 13762 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13763 dev_priv->shared_dplls[i].id = i; 13764 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 13765 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 13766 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 13767 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 13768 dev_priv->shared_dplls[i].get_hw_state = 13769 ibx_pch_dpll_get_hw_state; 13770 } 13771 } 13772 13773 static void intel_shared_dpll_init(struct drm_device *dev) 13774 { 13775 struct drm_i915_private *dev_priv = dev->dev_private; 13776 13777 if (HAS_DDI(dev)) 13778 intel_ddi_pll_init(dev); 13779 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13780 ibx_pch_dpll_init(dev); 13781 else 13782 dev_priv->num_shared_dpll = 0; 13783 13784 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 13785 } 13786 13787 /** 13788 * intel_prepare_plane_fb - Prepare fb for usage on plane 13789 * @plane: drm plane to prepare for 13790 * @fb: framebuffer to prepare for presentation 13791 * 13792 * Prepares a framebuffer for usage on a display plane. Generally this 13793 * involves pinning the underlying object and updating the frontbuffer tracking 13794 * bits. Some older platforms need special physical address handling for 13795 * cursor planes. 13796 * 13797 * Must be called with struct_mutex held. 13798 * 13799 * Returns 0 on success, negative error code on failure. 13800 */ 13801 int 13802 intel_prepare_plane_fb(struct drm_plane *plane, 13803 struct drm_plane_state *new_state) 13804 { 13805 struct drm_device *dev = plane->dev; 13806 struct drm_framebuffer *fb = new_state->fb; 13807 struct intel_plane *intel_plane = to_intel_plane(plane); 13808 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13809 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 13810 int ret = 0; 13811 13812 if (!obj && !old_obj) 13813 return 0; 13814 13815 if (old_obj) { 13816 struct drm_crtc_state *crtc_state = 13817 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc); 13818 13819 /* Big Hammer, we also need to ensure that any pending 13820 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 13821 * current scanout is retired before unpinning the old 13822 * framebuffer. Note that we rely on userspace rendering 13823 * into the buffer attached to the pipe they are waiting 13824 * on. If not, userspace generates a GPU hang with IPEHR 13825 * point to the MI_WAIT_FOR_EVENT. 13826 * 13827 * This should only fail upon a hung GPU, in which case we 13828 * can safely continue. 13829 */ 13830 if (needs_modeset(crtc_state)) 13831 ret = i915_gem_object_wait_rendering(old_obj, true); 13832 13833 /* Swallow -EIO errors to allow updates during hw lockup. */ 13834 if (ret && ret != -EIO) 13835 return ret; 13836 } 13837 13838 /* For framebuffer backed by dmabuf, wait for fence */ 13839 #if 0 13840 if (obj && obj->base.dma_buf) { 13841 long lret; 13842 13843 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 13844 false, true, 13845 MAX_SCHEDULE_TIMEOUT); 13846 if (lret == -ERESTARTSYS) 13847 return lret; 13848 13849 WARN(lret < 0, "waiting returns %li\n", lret); 13850 } 13851 #endif 13852 13853 if (!obj) { 13854 ret = 0; 13855 } else if (plane->type == DRM_PLANE_TYPE_CURSOR && 13856 INTEL_INFO(dev)->cursor_needs_physical) { 13857 int align = IS_I830(dev) ? 16 * 1024 : 256; 13858 ret = i915_gem_object_attach_phys(obj, align); 13859 if (ret) 13860 DRM_DEBUG_KMS("failed to attach phys object\n"); 13861 } else { 13862 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state); 13863 } 13864 13865 if (ret == 0) { 13866 if (obj) { 13867 struct intel_plane_state *plane_state = 13868 to_intel_plane_state(new_state); 13869 13870 i915_gem_request_assign(&plane_state->wait_req, 13871 obj->last_write_req); 13872 } 13873 13874 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13875 } 13876 13877 return ret; 13878 } 13879 13880 /** 13881 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13882 * @plane: drm plane to clean up for 13883 * @fb: old framebuffer that was on plane 13884 * 13885 * Cleans up a framebuffer that has just been removed from a plane. 13886 * 13887 * Must be called with struct_mutex held. 13888 */ 13889 void 13890 intel_cleanup_plane_fb(struct drm_plane *plane, 13891 struct drm_plane_state *old_state) 13892 { 13893 struct drm_device *dev = plane->dev; 13894 struct intel_plane *intel_plane = to_intel_plane(plane); 13895 struct intel_plane_state *old_intel_state; 13896 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 13897 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 13898 13899 old_intel_state = to_intel_plane_state(old_state); 13900 13901 if (!obj && !old_obj) 13902 return; 13903 13904 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR || 13905 !INTEL_INFO(dev)->cursor_needs_physical)) 13906 intel_unpin_fb_obj(old_state->fb, old_state); 13907 13908 /* prepare_fb aborted? */ 13909 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) || 13910 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit))) 13911 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit); 13912 13913 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 13914 13915 } 13916 13917 int 13918 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13919 { 13920 int max_scale; 13921 struct drm_device *dev; 13922 struct drm_i915_private *dev_priv; 13923 int crtc_clock, cdclk; 13924 13925 if (!intel_crtc || !crtc_state->base.enable) 13926 return DRM_PLANE_HELPER_NO_SCALING; 13927 13928 dev = intel_crtc->base.dev; 13929 dev_priv = dev->dev_private; 13930 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13931 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 13932 13933 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock)) 13934 return DRM_PLANE_HELPER_NO_SCALING; 13935 13936 /* 13937 * skl max scale is lower of: 13938 * close to 3 but not 3, -1 is for that purpose 13939 * or 13940 * cdclk/crtc_clock 13941 */ 13942 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 13943 13944 return max_scale; 13945 } 13946 13947 static int 13948 intel_check_primary_plane(struct drm_plane *plane, 13949 struct intel_crtc_state *crtc_state, 13950 struct intel_plane_state *state) 13951 { 13952 struct drm_crtc *crtc = state->base.crtc; 13953 struct drm_framebuffer *fb = state->base.fb; 13954 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13955 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13956 bool can_position = false; 13957 13958 if (INTEL_INFO(plane->dev)->gen >= 9) { 13959 /* use scaler when colorkey is not required */ 13960 if (state->ckey.flags == I915_SET_COLORKEY_NONE) { 13961 min_scale = 1; 13962 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state); 13963 } 13964 can_position = true; 13965 } 13966 13967 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 13968 &state->dst, &state->clip, 13969 min_scale, max_scale, 13970 can_position, true, 13971 &state->visible); 13972 } 13973 13974 static void intel_begin_crtc_commit(struct drm_crtc *crtc, 13975 struct drm_crtc_state *old_crtc_state) 13976 { 13977 struct drm_device *dev = crtc->dev; 13978 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13979 struct intel_crtc_state *old_intel_state = 13980 to_intel_crtc_state(old_crtc_state); 13981 bool modeset = needs_modeset(crtc->state); 13982 13983 /* Perform vblank evasion around commit operation */ 13984 intel_pipe_update_start(intel_crtc); 13985 13986 if (modeset) 13987 return; 13988 13989 if (to_intel_crtc_state(crtc->state)->update_pipe) 13990 intel_update_pipe_config(intel_crtc, old_intel_state); 13991 else if (INTEL_INFO(dev)->gen >= 9) 13992 skl_detach_scalers(intel_crtc); 13993 } 13994 13995 static void intel_finish_crtc_commit(struct drm_crtc *crtc, 13996 struct drm_crtc_state *old_crtc_state) 13997 { 13998 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13999 14000 intel_pipe_update_end(intel_crtc); 14001 } 14002 14003 /** 14004 * intel_plane_destroy - destroy a plane 14005 * @plane: plane to destroy 14006 * 14007 * Common destruction function for all types of planes (primary, cursor, 14008 * sprite). 14009 */ 14010 void intel_plane_destroy(struct drm_plane *plane) 14011 { 14012 struct intel_plane *intel_plane = to_intel_plane(plane); 14013 drm_plane_cleanup(plane); 14014 kfree(intel_plane); 14015 } 14016 14017 const struct drm_plane_funcs intel_plane_funcs = { 14018 .update_plane = drm_atomic_helper_update_plane, 14019 .disable_plane = drm_atomic_helper_disable_plane, 14020 .destroy = intel_plane_destroy, 14021 .set_property = drm_atomic_helper_plane_set_property, 14022 .atomic_get_property = intel_plane_atomic_get_property, 14023 .atomic_set_property = intel_plane_atomic_set_property, 14024 .atomic_duplicate_state = intel_plane_duplicate_state, 14025 .atomic_destroy_state = intel_plane_destroy_state, 14026 14027 }; 14028 14029 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 14030 int pipe) 14031 { 14032 struct intel_plane *primary; 14033 struct intel_plane_state *state; 14034 const uint32_t *intel_primary_formats; 14035 unsigned int num_formats; 14036 14037 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 14038 if (primary == NULL) 14039 return NULL; 14040 14041 state = intel_create_plane_state(&primary->base); 14042 if (!state) { 14043 kfree(primary); 14044 return NULL; 14045 } 14046 primary->base.state = &state->base; 14047 14048 primary->can_scale = false; 14049 primary->max_downscale = 1; 14050 if (INTEL_INFO(dev)->gen >= 9) { 14051 primary->can_scale = true; 14052 state->scaler_id = -1; 14053 } 14054 primary->pipe = pipe; 14055 primary->plane = pipe; 14056 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe); 14057 primary->check_plane = intel_check_primary_plane; 14058 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 14059 primary->plane = !pipe; 14060 14061 if (INTEL_INFO(dev)->gen >= 9) { 14062 intel_primary_formats = skl_primary_formats; 14063 num_formats = ARRAY_SIZE(skl_primary_formats); 14064 14065 primary->update_plane = skylake_update_primary_plane; 14066 primary->disable_plane = skylake_disable_primary_plane; 14067 } else if (HAS_PCH_SPLIT(dev)) { 14068 intel_primary_formats = i965_primary_formats; 14069 num_formats = ARRAY_SIZE(i965_primary_formats); 14070 14071 primary->update_plane = ironlake_update_primary_plane; 14072 primary->disable_plane = i9xx_disable_primary_plane; 14073 } else if (INTEL_INFO(dev)->gen >= 4) { 14074 intel_primary_formats = i965_primary_formats; 14075 num_formats = ARRAY_SIZE(i965_primary_formats); 14076 14077 primary->update_plane = i9xx_update_primary_plane; 14078 primary->disable_plane = i9xx_disable_primary_plane; 14079 } else { 14080 intel_primary_formats = i8xx_primary_formats; 14081 num_formats = ARRAY_SIZE(i8xx_primary_formats); 14082 14083 primary->update_plane = i9xx_update_primary_plane; 14084 primary->disable_plane = i9xx_disable_primary_plane; 14085 } 14086 14087 drm_universal_plane_init(dev, &primary->base, 0, 14088 &intel_plane_funcs, 14089 intel_primary_formats, num_formats, 14090 DRM_PLANE_TYPE_PRIMARY, NULL); 14091 14092 if (INTEL_INFO(dev)->gen >= 4) 14093 intel_create_rotation_property(dev, primary); 14094 14095 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 14096 14097 return &primary->base; 14098 } 14099 14100 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 14101 { 14102 if (!dev->mode_config.rotation_property) { 14103 unsigned long flags = BIT(DRM_ROTATE_0) | 14104 BIT(DRM_ROTATE_180); 14105 14106 if (INTEL_INFO(dev)->gen >= 9) 14107 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); 14108 14109 dev->mode_config.rotation_property = 14110 drm_mode_create_rotation_property(dev, flags); 14111 } 14112 if (dev->mode_config.rotation_property) 14113 drm_object_attach_property(&plane->base.base, 14114 dev->mode_config.rotation_property, 14115 plane->base.state->rotation); 14116 } 14117 14118 static int 14119 intel_check_cursor_plane(struct drm_plane *plane, 14120 struct intel_crtc_state *crtc_state, 14121 struct intel_plane_state *state) 14122 { 14123 struct drm_crtc *crtc = crtc_state->base.crtc; 14124 struct drm_framebuffer *fb = state->base.fb; 14125 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 14126 enum i915_pipe pipe = to_intel_plane(plane)->pipe; 14127 unsigned stride; 14128 int ret; 14129 14130 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14131 &state->dst, &state->clip, 14132 DRM_PLANE_HELPER_NO_SCALING, 14133 DRM_PLANE_HELPER_NO_SCALING, 14134 true, true, &state->visible); 14135 if (ret) 14136 return ret; 14137 14138 /* if we want to turn off the cursor ignore width and height */ 14139 if (!obj) 14140 return 0; 14141 14142 /* Check for which cursor types we support */ 14143 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) { 14144 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 14145 state->base.crtc_w, state->base.crtc_h); 14146 return -EINVAL; 14147 } 14148 14149 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 14150 if (obj->base.size < stride * state->base.crtc_h) { 14151 DRM_DEBUG_KMS("buffer is too small\n"); 14152 return -ENOMEM; 14153 } 14154 14155 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 14156 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 14157 return -EINVAL; 14158 } 14159 14160 /* 14161 * There's something wrong with the cursor on CHV pipe C. 14162 * If it straddles the left edge of the screen then 14163 * moving it away from the edge or disabling it often 14164 * results in a pipe underrun, and often that can lead to 14165 * dead pipe (constant underrun reported, and it scans 14166 * out just a solid color). To recover from that, the 14167 * display power well must be turned off and on again. 14168 * Refuse the put the cursor into that compromised position. 14169 */ 14170 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C && 14171 state->visible && state->base.crtc_x < 0) { 14172 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); 14173 return -EINVAL; 14174 } 14175 14176 return 0; 14177 } 14178 14179 static void 14180 intel_disable_cursor_plane(struct drm_plane *plane, 14181 struct drm_crtc *crtc) 14182 { 14183 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14184 14185 intel_crtc->cursor_addr = 0; 14186 intel_crtc_update_cursor(crtc, NULL); 14187 } 14188 14189 static void 14190 intel_update_cursor_plane(struct drm_plane *plane, 14191 const struct intel_crtc_state *crtc_state, 14192 const struct intel_plane_state *state) 14193 { 14194 struct drm_crtc *crtc = crtc_state->base.crtc; 14195 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14196 struct drm_device *dev = plane->dev; 14197 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 14198 uint32_t addr; 14199 14200 if (!obj) 14201 addr = 0; 14202 else if (!INTEL_INFO(dev)->cursor_needs_physical) 14203 addr = i915_gem_obj_ggtt_offset(obj); 14204 else 14205 addr = obj->phys_handle->busaddr; 14206 14207 intel_crtc->cursor_addr = addr; 14208 intel_crtc_update_cursor(crtc, state); 14209 } 14210 14211 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 14212 int pipe) 14213 { 14214 struct intel_plane *cursor; 14215 struct intel_plane_state *state; 14216 14217 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 14218 if (cursor == NULL) 14219 return NULL; 14220 14221 state = intel_create_plane_state(&cursor->base); 14222 if (!state) { 14223 kfree(cursor); 14224 return NULL; 14225 } 14226 cursor->base.state = &state->base; 14227 14228 cursor->can_scale = false; 14229 cursor->max_downscale = 1; 14230 cursor->pipe = pipe; 14231 cursor->plane = pipe; 14232 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe); 14233 cursor->check_plane = intel_check_cursor_plane; 14234 cursor->update_plane = intel_update_cursor_plane; 14235 cursor->disable_plane = intel_disable_cursor_plane; 14236 14237 drm_universal_plane_init(dev, &cursor->base, 0, 14238 &intel_plane_funcs, 14239 intel_cursor_formats, 14240 ARRAY_SIZE(intel_cursor_formats), 14241 DRM_PLANE_TYPE_CURSOR, NULL); 14242 14243 if (INTEL_INFO(dev)->gen >= 4) { 14244 if (!dev->mode_config.rotation_property) 14245 dev->mode_config.rotation_property = 14246 drm_mode_create_rotation_property(dev, 14247 BIT(DRM_ROTATE_0) | 14248 BIT(DRM_ROTATE_180)); 14249 if (dev->mode_config.rotation_property) 14250 drm_object_attach_property(&cursor->base.base, 14251 dev->mode_config.rotation_property, 14252 state->base.rotation); 14253 } 14254 14255 if (INTEL_INFO(dev)->gen >=9) 14256 state->scaler_id = -1; 14257 14258 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 14259 14260 return &cursor->base; 14261 } 14262 14263 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 14264 struct intel_crtc_state *crtc_state) 14265 { 14266 int i; 14267 struct intel_scaler *intel_scaler; 14268 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 14269 14270 for (i = 0; i < intel_crtc->num_scalers; i++) { 14271 intel_scaler = &scaler_state->scalers[i]; 14272 intel_scaler->in_use = 0; 14273 intel_scaler->mode = PS_SCALER_MODE_DYN; 14274 } 14275 14276 scaler_state->scaler_id = -1; 14277 } 14278 14279 static void intel_crtc_init(struct drm_device *dev, int pipe) 14280 { 14281 struct drm_i915_private *dev_priv = dev->dev_private; 14282 struct intel_crtc *intel_crtc; 14283 struct intel_crtc_state *crtc_state = NULL; 14284 struct drm_plane *primary = NULL; 14285 struct drm_plane *cursor = NULL; 14286 int i, ret; 14287 14288 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 14289 if (intel_crtc == NULL) 14290 return; 14291 14292 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 14293 if (!crtc_state) 14294 goto fail; 14295 intel_crtc->config = crtc_state; 14296 intel_crtc->base.state = &crtc_state->base; 14297 crtc_state->base.crtc = &intel_crtc->base; 14298 14299 /* initialize shared scalers */ 14300 if (INTEL_INFO(dev)->gen >= 9) { 14301 if (pipe == PIPE_C) 14302 intel_crtc->num_scalers = 1; 14303 else 14304 intel_crtc->num_scalers = SKL_NUM_SCALERS; 14305 14306 skl_init_scalers(dev, intel_crtc, crtc_state); 14307 } 14308 14309 primary = intel_primary_plane_create(dev, pipe); 14310 if (!primary) 14311 goto fail; 14312 14313 cursor = intel_cursor_plane_create(dev, pipe); 14314 if (!cursor) 14315 goto fail; 14316 14317 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14318 cursor, &intel_crtc_funcs, NULL); 14319 if (ret) 14320 goto fail; 14321 14322 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 14323 for (i = 0; i < 256; i++) { 14324 intel_crtc->lut_r[i] = i; 14325 intel_crtc->lut_g[i] = i; 14326 intel_crtc->lut_b[i] = i; 14327 } 14328 14329 /* 14330 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 14331 * is hooked to pipe B. Hence we want plane A feeding pipe B. 14332 */ 14333 intel_crtc->pipe = pipe; 14334 intel_crtc->plane = pipe; 14335 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 14336 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 14337 intel_crtc->plane = !pipe; 14338 } 14339 14340 intel_crtc->cursor_base = ~0; 14341 intel_crtc->cursor_cntl = ~0; 14342 intel_crtc->cursor_size = ~0; 14343 14344 intel_crtc->wm.cxsr_allowed = true; 14345 14346 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 14347 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 14348 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 14349 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 14350 14351 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 14352 14353 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 14354 return; 14355 14356 fail: 14357 if (primary) 14358 drm_plane_cleanup(primary); 14359 if (cursor) 14360 drm_plane_cleanup(cursor); 14361 kfree(crtc_state); 14362 kfree(intel_crtc); 14363 } 14364 14365 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 14366 { 14367 struct drm_encoder *encoder = connector->base.encoder; 14368 struct drm_device *dev = connector->base.dev; 14369 14370 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 14371 14372 if (!encoder || WARN_ON(!encoder->crtc)) 14373 return INVALID_PIPE; 14374 14375 return to_intel_crtc(encoder->crtc)->pipe; 14376 } 14377 14378 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 14379 struct drm_file *file) 14380 { 14381 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 14382 struct drm_crtc *drmmode_crtc; 14383 struct intel_crtc *crtc; 14384 14385 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14386 14387 if (!drmmode_crtc) { 14388 DRM_ERROR("no such CRTC id\n"); 14389 return -ENOENT; 14390 } 14391 14392 crtc = to_intel_crtc(drmmode_crtc); 14393 pipe_from_crtc_id->pipe = crtc->pipe; 14394 14395 return 0; 14396 } 14397 14398 static int intel_encoder_clones(struct intel_encoder *encoder) 14399 { 14400 struct drm_device *dev = encoder->base.dev; 14401 struct intel_encoder *source_encoder; 14402 int index_mask = 0; 14403 int entry = 0; 14404 14405 for_each_intel_encoder(dev, source_encoder) { 14406 if (encoders_cloneable(encoder, source_encoder)) 14407 index_mask |= (1 << entry); 14408 14409 entry++; 14410 } 14411 14412 return index_mask; 14413 } 14414 14415 static bool has_edp_a(struct drm_device *dev) 14416 { 14417 struct drm_i915_private *dev_priv = dev->dev_private; 14418 14419 if (!IS_MOBILE(dev)) 14420 return false; 14421 14422 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 14423 return false; 14424 14425 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 14426 return false; 14427 14428 return true; 14429 } 14430 14431 static bool intel_crt_present(struct drm_device *dev) 14432 { 14433 struct drm_i915_private *dev_priv = dev->dev_private; 14434 14435 if (INTEL_INFO(dev)->gen >= 9) 14436 return false; 14437 14438 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 14439 return false; 14440 14441 if (IS_CHERRYVIEW(dev)) 14442 return false; 14443 14444 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED) 14445 return false; 14446 14447 /* DDI E can't be used if DDI A requires 4 lanes */ 14448 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES) 14449 return false; 14450 14451 if (!dev_priv->vbt.int_crt_support) 14452 return false; 14453 14454 return true; 14455 } 14456 14457 static void intel_setup_outputs(struct drm_device *dev) 14458 { 14459 struct drm_i915_private *dev_priv = dev->dev_private; 14460 struct intel_encoder *encoder; 14461 bool dpd_is_edp = false; 14462 14463 intel_lvds_init(dev); 14464 14465 if (intel_crt_present(dev)) 14466 intel_crt_init(dev); 14467 14468 if (IS_BROXTON(dev)) { 14469 /* 14470 * FIXME: Broxton doesn't support port detection via the 14471 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 14472 * detect the ports. 14473 */ 14474 intel_ddi_init(dev, PORT_A); 14475 intel_ddi_init(dev, PORT_B); 14476 intel_ddi_init(dev, PORT_C); 14477 } else if (HAS_DDI(dev)) { 14478 int found; 14479 14480 /* 14481 * Haswell uses DDI functions to detect digital outputs. 14482 * On SKL pre-D0 the strap isn't connected, so we assume 14483 * it's there. 14484 */ 14485 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14486 /* WaIgnoreDDIAStrap: skl */ 14487 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14488 intel_ddi_init(dev, PORT_A); 14489 14490 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 14491 * register */ 14492 found = I915_READ(SFUSE_STRAP); 14493 14494 if (found & SFUSE_STRAP_DDIB_DETECTED) 14495 intel_ddi_init(dev, PORT_B); 14496 if (found & SFUSE_STRAP_DDIC_DETECTED) 14497 intel_ddi_init(dev, PORT_C); 14498 if (found & SFUSE_STRAP_DDID_DETECTED) 14499 intel_ddi_init(dev, PORT_D); 14500 /* 14501 * On SKL we don't have a way to detect DDI-E so we rely on VBT. 14502 */ 14503 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) && 14504 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp || 14505 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi || 14506 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi)) 14507 intel_ddi_init(dev, PORT_E); 14508 14509 } else if (HAS_PCH_SPLIT(dev)) { 14510 int found; 14511 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 14512 14513 if (has_edp_a(dev)) 14514 intel_dp_init(dev, DP_A, PORT_A); 14515 14516 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 14517 /* PCH SDVOB multiplex with HDMIB */ 14518 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B); 14519 if (!found) 14520 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 14521 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 14522 intel_dp_init(dev, PCH_DP_B, PORT_B); 14523 } 14524 14525 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 14526 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 14527 14528 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 14529 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 14530 14531 if (I915_READ(PCH_DP_C) & DP_DETECTED) 14532 intel_dp_init(dev, PCH_DP_C, PORT_C); 14533 14534 if (I915_READ(PCH_DP_D) & DP_DETECTED) 14535 intel_dp_init(dev, PCH_DP_D, PORT_D); 14536 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14537 /* 14538 * The DP_DETECTED bit is the latched state of the DDC 14539 * SDA pin at boot. However since eDP doesn't require DDC 14540 * (no way to plug in a DP->HDMI dongle) the DDC pins for 14541 * eDP ports may have been muxed to an alternate function. 14542 * Thus we can't rely on the DP_DETECTED bit alone to detect 14543 * eDP ports. Consult the VBT as well as DP_DETECTED to 14544 * detect eDP ports. 14545 */ 14546 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14547 !intel_dp_is_edp(dev, PORT_B)) 14548 intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14549 if (I915_READ(VLV_DP_B) & DP_DETECTED || 14550 intel_dp_is_edp(dev, PORT_B)) 14551 intel_dp_init(dev, VLV_DP_B, PORT_B); 14552 14553 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14554 !intel_dp_is_edp(dev, PORT_C)) 14555 intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14556 if (I915_READ(VLV_DP_C) & DP_DETECTED || 14557 intel_dp_is_edp(dev, PORT_C)) 14558 intel_dp_init(dev, VLV_DP_C, PORT_C); 14559 14560 if (IS_CHERRYVIEW(dev)) { 14561 /* eDP not supported on port D, so don't check VBT */ 14562 if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14563 intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14564 if (I915_READ(CHV_DP_D) & DP_DETECTED) 14565 intel_dp_init(dev, CHV_DP_D, PORT_D); 14566 } 14567 14568 intel_dsi_init(dev); 14569 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) { 14570 bool found = false; 14571 14572 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14573 DRM_DEBUG_KMS("probing SDVOB\n"); 14574 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B); 14575 if (!found && IS_G4X(dev)) { 14576 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14577 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14578 } 14579 14580 if (!found && IS_G4X(dev)) 14581 intel_dp_init(dev, DP_B, PORT_B); 14582 } 14583 14584 /* Before G4X SDVOC doesn't have its own detect register */ 14585 14586 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14587 DRM_DEBUG_KMS("probing SDVOC\n"); 14588 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C); 14589 } 14590 14591 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14592 14593 if (IS_G4X(dev)) { 14594 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14595 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14596 } 14597 if (IS_G4X(dev)) 14598 intel_dp_init(dev, DP_C, PORT_C); 14599 } 14600 14601 if (IS_G4X(dev) && 14602 (I915_READ(DP_D) & DP_DETECTED)) 14603 intel_dp_init(dev, DP_D, PORT_D); 14604 } else if (IS_GEN2(dev)) 14605 intel_dvo_init(dev); 14606 14607 if (SUPPORTS_TV(dev)) 14608 intel_tv_init(dev); 14609 14610 intel_psr_init(dev); 14611 14612 for_each_intel_encoder(dev, encoder) { 14613 encoder->base.possible_crtcs = encoder->crtc_mask; 14614 encoder->base.possible_clones = 14615 intel_encoder_clones(encoder); 14616 } 14617 14618 intel_init_pch_refclk(dev); 14619 14620 drm_helper_move_panel_connectors_to_head(dev); 14621 } 14622 14623 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14624 { 14625 struct drm_device *dev = fb->dev; 14626 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14627 14628 drm_framebuffer_cleanup(fb); 14629 mutex_lock(&dev->struct_mutex); 14630 WARN_ON(!intel_fb->obj->framebuffer_references--); 14631 drm_gem_object_unreference(&intel_fb->obj->base); 14632 mutex_unlock(&dev->struct_mutex); 14633 kfree(intel_fb); 14634 } 14635 14636 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14637 struct drm_file *file, 14638 unsigned int *handle) 14639 { 14640 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14641 struct drm_i915_gem_object *obj = intel_fb->obj; 14642 14643 if (obj->userptr.mm) { 14644 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n"); 14645 return -EINVAL; 14646 } 14647 14648 return drm_gem_handle_create(file, &obj->base, handle); 14649 } 14650 14651 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb, 14652 struct drm_file *file, 14653 unsigned flags, unsigned color, 14654 struct drm_clip_rect *clips, 14655 unsigned num_clips) 14656 { 14657 struct drm_device *dev = fb->dev; 14658 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14659 struct drm_i915_gem_object *obj = intel_fb->obj; 14660 14661 mutex_lock(&dev->struct_mutex); 14662 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB); 14663 mutex_unlock(&dev->struct_mutex); 14664 14665 return 0; 14666 } 14667 14668 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14669 .destroy = intel_user_framebuffer_destroy, 14670 .create_handle = intel_user_framebuffer_create_handle, 14671 .dirty = intel_user_framebuffer_dirty, 14672 }; 14673 14674 static 14675 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14676 uint32_t pixel_format) 14677 { 14678 u32 gen = INTEL_INFO(dev)->gen; 14679 14680 if (gen >= 9) { 14681 int cpp = drm_format_plane_cpp(pixel_format, 0); 14682 14683 /* "The stride in bytes must not exceed the of the size of 8K 14684 * pixels and 32K bytes." 14685 */ 14686 return min(8192 * cpp, 32768); 14687 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 14688 return 32*1024; 14689 } else if (gen >= 4) { 14690 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14691 return 16*1024; 14692 else 14693 return 32*1024; 14694 } else if (gen >= 3) { 14695 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14696 return 8*1024; 14697 else 14698 return 16*1024; 14699 } else { 14700 /* XXX DSPC is limited to 4k tiled */ 14701 return 8*1024; 14702 } 14703 } 14704 14705 static int intel_framebuffer_init(struct drm_device *dev, 14706 struct intel_framebuffer *intel_fb, 14707 struct drm_mode_fb_cmd2 *mode_cmd, 14708 struct drm_i915_gem_object *obj) 14709 { 14710 struct drm_i915_private *dev_priv = to_i915(dev); 14711 unsigned int aligned_height; 14712 int ret; 14713 u32 pitch_limit, stride_alignment; 14714 14715 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 14716 14717 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14718 /* Enforce that fb modifier and tiling mode match, but only for 14719 * X-tiled. This is needed for FBC. */ 14720 if (!!(obj->tiling_mode == I915_TILING_X) != 14721 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 14722 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 14723 return -EINVAL; 14724 } 14725 } else { 14726 if (obj->tiling_mode == I915_TILING_X) 14727 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14728 else if (obj->tiling_mode == I915_TILING_Y) { 14729 DRM_DEBUG("No Y tiling for legacy addfb\n"); 14730 return -EINVAL; 14731 } 14732 } 14733 14734 /* Passed in modifier sanity checking. */ 14735 switch (mode_cmd->modifier[0]) { 14736 case I915_FORMAT_MOD_Y_TILED: 14737 case I915_FORMAT_MOD_Yf_TILED: 14738 if (INTEL_INFO(dev)->gen < 9) { 14739 DRM_DEBUG("Unsupported tiling 0x%lx!\n", 14740 mode_cmd->modifier[0]); 14741 return -EINVAL; 14742 } 14743 case DRM_FORMAT_MOD_NONE: 14744 case I915_FORMAT_MOD_X_TILED: 14745 break; 14746 default: 14747 DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", 14748 mode_cmd->modifier[0]); 14749 return -EINVAL; 14750 } 14751 14752 stride_alignment = intel_fb_stride_alignment(dev_priv, 14753 mode_cmd->modifier[0], 14754 mode_cmd->pixel_format); 14755 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14756 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 14757 mode_cmd->pitches[0], stride_alignment); 14758 return -EINVAL; 14759 } 14760 14761 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 14762 mode_cmd->pixel_format); 14763 if (mode_cmd->pitches[0] > pitch_limit) { 14764 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 14765 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14766 "tiled" : "linear", 14767 mode_cmd->pitches[0], pitch_limit); 14768 return -EINVAL; 14769 } 14770 14771 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 14772 mode_cmd->pitches[0] != obj->stride) { 14773 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 14774 mode_cmd->pitches[0], obj->stride); 14775 return -EINVAL; 14776 } 14777 14778 /* Reject formats not supported by any plane early. */ 14779 switch (mode_cmd->pixel_format) { 14780 case DRM_FORMAT_C8: 14781 case DRM_FORMAT_RGB565: 14782 case DRM_FORMAT_XRGB8888: 14783 case DRM_FORMAT_ARGB8888: 14784 break; 14785 case DRM_FORMAT_XRGB1555: 14786 if (INTEL_INFO(dev)->gen > 3) { 14787 DRM_DEBUG("unsupported pixel format: %s\n", 14788 drm_get_format_name(mode_cmd->pixel_format)); 14789 return -EINVAL; 14790 } 14791 break; 14792 case DRM_FORMAT_ABGR8888: 14793 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && 14794 INTEL_INFO(dev)->gen < 9) { 14795 DRM_DEBUG("unsupported pixel format: %s\n", 14796 drm_get_format_name(mode_cmd->pixel_format)); 14797 return -EINVAL; 14798 } 14799 break; 14800 case DRM_FORMAT_XBGR8888: 14801 case DRM_FORMAT_XRGB2101010: 14802 case DRM_FORMAT_XBGR2101010: 14803 if (INTEL_INFO(dev)->gen < 4) { 14804 DRM_DEBUG("unsupported pixel format: %s\n", 14805 drm_get_format_name(mode_cmd->pixel_format)); 14806 return -EINVAL; 14807 } 14808 break; 14809 case DRM_FORMAT_ABGR2101010: 14810 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) { 14811 DRM_DEBUG("unsupported pixel format: %s\n", 14812 drm_get_format_name(mode_cmd->pixel_format)); 14813 return -EINVAL; 14814 } 14815 break; 14816 case DRM_FORMAT_YUYV: 14817 case DRM_FORMAT_UYVY: 14818 case DRM_FORMAT_YVYU: 14819 case DRM_FORMAT_VYUY: 14820 if (INTEL_INFO(dev)->gen < 5) { 14821 DRM_DEBUG("unsupported pixel format: %s\n", 14822 drm_get_format_name(mode_cmd->pixel_format)); 14823 return -EINVAL; 14824 } 14825 break; 14826 default: 14827 DRM_DEBUG("unsupported pixel format: %s\n", 14828 drm_get_format_name(mode_cmd->pixel_format)); 14829 return -EINVAL; 14830 } 14831 14832 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14833 if (mode_cmd->offsets[0] != 0) 14834 return -EINVAL; 14835 14836 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 14837 mode_cmd->pixel_format, 14838 mode_cmd->modifier[0]); 14839 /* FIXME drm helper for size checks (especially planar formats)? */ 14840 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 14841 return -EINVAL; 14842 14843 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 14844 intel_fb->obj = obj; 14845 14846 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 14847 if (ret) { 14848 DRM_ERROR("framebuffer init failed %d\n", ret); 14849 return ret; 14850 } 14851 14852 intel_fb->obj->framebuffer_references++; 14853 14854 return 0; 14855 } 14856 14857 static struct drm_framebuffer * 14858 intel_user_framebuffer_create(struct drm_device *dev, 14859 struct drm_file *filp, 14860 const struct drm_mode_fb_cmd2 *user_mode_cmd) 14861 { 14862 struct drm_framebuffer *fb; 14863 struct drm_i915_gem_object *obj; 14864 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd; 14865 14866 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14867 mode_cmd.handles[0])); 14868 if (&obj->base == NULL) 14869 return ERR_PTR(-ENOENT); 14870 14871 fb = intel_framebuffer_create(dev, &mode_cmd, obj); 14872 if (IS_ERR(fb)) 14873 drm_gem_object_unreference_unlocked(&obj->base); 14874 14875 return fb; 14876 } 14877 14878 #ifndef CONFIG_DRM_FBDEV_EMULATION 14879 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14880 { 14881 } 14882 #endif 14883 14884 static const struct drm_mode_config_funcs intel_mode_funcs = { 14885 .fb_create = intel_user_framebuffer_create, 14886 .output_poll_changed = intel_fbdev_output_poll_changed, 14887 .atomic_check = intel_atomic_check, 14888 .atomic_commit = intel_atomic_commit, 14889 .atomic_state_alloc = intel_atomic_state_alloc, 14890 .atomic_state_clear = intel_atomic_state_clear, 14891 }; 14892 14893 /* Set up chip specific display functions */ 14894 static void intel_init_display(struct drm_device *dev) 14895 { 14896 struct drm_i915_private *dev_priv = dev->dev_private; 14897 14898 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 14899 dev_priv->display.find_dpll = g4x_find_best_dpll; 14900 else if (IS_CHERRYVIEW(dev)) 14901 dev_priv->display.find_dpll = chv_find_best_dpll; 14902 else if (IS_VALLEYVIEW(dev)) 14903 dev_priv->display.find_dpll = vlv_find_best_dpll; 14904 else if (IS_PINEVIEW(dev)) 14905 dev_priv->display.find_dpll = pnv_find_best_dpll; 14906 else 14907 dev_priv->display.find_dpll = i9xx_find_best_dpll; 14908 14909 if (INTEL_INFO(dev)->gen >= 9) { 14910 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14911 dev_priv->display.get_initial_plane_config = 14912 skylake_get_initial_plane_config; 14913 dev_priv->display.crtc_compute_clock = 14914 haswell_crtc_compute_clock; 14915 dev_priv->display.crtc_enable = haswell_crtc_enable; 14916 dev_priv->display.crtc_disable = haswell_crtc_disable; 14917 } else if (HAS_DDI(dev)) { 14918 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14919 dev_priv->display.get_initial_plane_config = 14920 ironlake_get_initial_plane_config; 14921 dev_priv->display.crtc_compute_clock = 14922 haswell_crtc_compute_clock; 14923 dev_priv->display.crtc_enable = haswell_crtc_enable; 14924 dev_priv->display.crtc_disable = haswell_crtc_disable; 14925 } else if (HAS_PCH_SPLIT(dev)) { 14926 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14927 dev_priv->display.get_initial_plane_config = 14928 ironlake_get_initial_plane_config; 14929 dev_priv->display.crtc_compute_clock = 14930 ironlake_crtc_compute_clock; 14931 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14932 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14933 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 14934 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14935 dev_priv->display.get_initial_plane_config = 14936 i9xx_get_initial_plane_config; 14937 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14938 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14939 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14940 } else { 14941 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14942 dev_priv->display.get_initial_plane_config = 14943 i9xx_get_initial_plane_config; 14944 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14945 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14946 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14947 } 14948 14949 /* Returns the core display clock speed */ 14950 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 14951 dev_priv->display.get_display_clock_speed = 14952 skylake_get_display_clock_speed; 14953 else if (IS_BROXTON(dev)) 14954 dev_priv->display.get_display_clock_speed = 14955 broxton_get_display_clock_speed; 14956 else if (IS_BROADWELL(dev)) 14957 dev_priv->display.get_display_clock_speed = 14958 broadwell_get_display_clock_speed; 14959 else if (IS_HASWELL(dev)) 14960 dev_priv->display.get_display_clock_speed = 14961 haswell_get_display_clock_speed; 14962 else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 14963 dev_priv->display.get_display_clock_speed = 14964 valleyview_get_display_clock_speed; 14965 else if (IS_GEN5(dev)) 14966 dev_priv->display.get_display_clock_speed = 14967 ilk_get_display_clock_speed; 14968 else if (IS_I945G(dev) || IS_BROADWATER(dev) || 14969 IS_GEN6(dev) || IS_IVYBRIDGE(dev)) 14970 dev_priv->display.get_display_clock_speed = 14971 i945_get_display_clock_speed; 14972 else if (IS_GM45(dev)) 14973 dev_priv->display.get_display_clock_speed = 14974 gm45_get_display_clock_speed; 14975 else if (IS_CRESTLINE(dev)) 14976 dev_priv->display.get_display_clock_speed = 14977 i965gm_get_display_clock_speed; 14978 else if (IS_PINEVIEW(dev)) 14979 dev_priv->display.get_display_clock_speed = 14980 pnv_get_display_clock_speed; 14981 else if (IS_G33(dev) || IS_G4X(dev)) 14982 dev_priv->display.get_display_clock_speed = 14983 g33_get_display_clock_speed; 14984 else if (IS_I915G(dev)) 14985 dev_priv->display.get_display_clock_speed = 14986 i915_get_display_clock_speed; 14987 else if (IS_I945GM(dev) || IS_845G(dev)) 14988 dev_priv->display.get_display_clock_speed = 14989 i9xx_misc_get_display_clock_speed; 14990 else if (IS_I915GM(dev)) 14991 dev_priv->display.get_display_clock_speed = 14992 i915gm_get_display_clock_speed; 14993 else if (IS_I865G(dev)) 14994 dev_priv->display.get_display_clock_speed = 14995 i865_get_display_clock_speed; 14996 else if (IS_I85X(dev)) 14997 dev_priv->display.get_display_clock_speed = 14998 i85x_get_display_clock_speed; 14999 else { /* 830 */ 15000 WARN(!IS_I830(dev), "Unknown platform. Assuming 133 MHz CDCLK\n"); 15001 dev_priv->display.get_display_clock_speed = 15002 i830_get_display_clock_speed; 15003 } 15004 15005 if (IS_GEN5(dev)) { 15006 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 15007 } else if (IS_GEN6(dev)) { 15008 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 15009 } else if (IS_IVYBRIDGE(dev)) { 15010 /* FIXME: detect B0+ stepping and use auto training */ 15011 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15012 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 15013 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15014 if (IS_BROADWELL(dev)) { 15015 dev_priv->display.modeset_commit_cdclk = 15016 broadwell_modeset_commit_cdclk; 15017 dev_priv->display.modeset_calc_cdclk = 15018 broadwell_modeset_calc_cdclk; 15019 } 15020 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 15021 dev_priv->display.modeset_commit_cdclk = 15022 valleyview_modeset_commit_cdclk; 15023 dev_priv->display.modeset_calc_cdclk = 15024 valleyview_modeset_calc_cdclk; 15025 } else if (IS_BROXTON(dev)) { 15026 dev_priv->display.modeset_commit_cdclk = 15027 broxton_modeset_commit_cdclk; 15028 dev_priv->display.modeset_calc_cdclk = 15029 broxton_modeset_calc_cdclk; 15030 } 15031 15032 switch (INTEL_INFO(dev)->gen) { 15033 case 2: 15034 dev_priv->display.queue_flip = intel_gen2_queue_flip; 15035 break; 15036 15037 case 3: 15038 dev_priv->display.queue_flip = intel_gen3_queue_flip; 15039 break; 15040 15041 case 4: 15042 case 5: 15043 dev_priv->display.queue_flip = intel_gen4_queue_flip; 15044 break; 15045 15046 case 6: 15047 dev_priv->display.queue_flip = intel_gen6_queue_flip; 15048 break; 15049 case 7: 15050 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 15051 dev_priv->display.queue_flip = intel_gen7_queue_flip; 15052 break; 15053 case 9: 15054 /* Drop through - unsupported since execlist only. */ 15055 default: 15056 /* Default just returns -ENODEV to indicate unsupported */ 15057 dev_priv->display.queue_flip = intel_default_queue_flip; 15058 } 15059 15060 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 15061 } 15062 15063 /* 15064 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 15065 * resume, or other times. This quirk makes sure that's the case for 15066 * affected systems. 15067 */ 15068 static void quirk_pipea_force(struct drm_device *dev) 15069 { 15070 struct drm_i915_private *dev_priv = dev->dev_private; 15071 15072 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15073 DRM_INFO("applying pipe a force quirk\n"); 15074 } 15075 15076 static void quirk_pipeb_force(struct drm_device *dev) 15077 { 15078 struct drm_i915_private *dev_priv = dev->dev_private; 15079 15080 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15081 DRM_INFO("applying pipe b force quirk\n"); 15082 } 15083 15084 /* 15085 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 15086 */ 15087 static void quirk_ssc_force_disable(struct drm_device *dev) 15088 { 15089 struct drm_i915_private *dev_priv = dev->dev_private; 15090 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15091 DRM_INFO("applying lvds SSC disable quirk\n"); 15092 } 15093 15094 /* 15095 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 15096 * brightness value 15097 */ 15098 static void quirk_invert_brightness(struct drm_device *dev) 15099 { 15100 struct drm_i915_private *dev_priv = dev->dev_private; 15101 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15102 DRM_INFO("applying inverted panel brightness quirk\n"); 15103 } 15104 15105 /* Some VBT's incorrectly indicate no backlight is present */ 15106 static void quirk_backlight_present(struct drm_device *dev) 15107 { 15108 struct drm_i915_private *dev_priv = dev->dev_private; 15109 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15110 DRM_INFO("applying backlight present quirk\n"); 15111 } 15112 15113 struct intel_quirk { 15114 int device; 15115 int subsystem_vendor; 15116 int subsystem_device; 15117 void (*hook)(struct drm_device *dev); 15118 }; 15119 15120 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 15121 struct intel_dmi_quirk { 15122 void (*hook)(struct drm_device *dev); 15123 const struct dmi_system_id (*dmi_id_list)[]; 15124 }; 15125 15126 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 15127 { 15128 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 15129 return 1; 15130 } 15131 15132 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 15133 { 15134 .dmi_id_list = &(const struct dmi_system_id[]) { 15135 { 15136 .callback = intel_dmi_reverse_brightness, 15137 .ident = "NCR Corporation", 15138 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 15139 DMI_MATCH(DMI_PRODUCT_NAME, ""), 15140 }, 15141 }, 15142 { } /* terminating entry */ 15143 }, 15144 .hook = quirk_invert_brightness, 15145 }, 15146 }; 15147 15148 static struct intel_quirk intel_quirks[] = { 15149 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 15150 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 15151 15152 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 15153 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 15154 15155 /* 830 needs to leave pipe A & dpll A up */ 15156 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 15157 15158 /* 830 needs to leave pipe B & dpll B up */ 15159 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 15160 15161 /* Lenovo U160 cannot use SSC on LVDS */ 15162 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 15163 15164 /* Sony Vaio Y cannot use SSC on LVDS */ 15165 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 15166 15167 /* Acer Aspire 5734Z must invert backlight brightness */ 15168 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 15169 15170 /* Acer/eMachines G725 */ 15171 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 15172 15173 /* Acer/eMachines e725 */ 15174 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 15175 15176 /* Acer/Packard Bell NCL20 */ 15177 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 15178 15179 /* Acer Aspire 4736Z */ 15180 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 15181 15182 /* Acer Aspire 5336 */ 15183 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 15184 15185 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 15186 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 15187 15188 /* Acer C720 Chromebook (Core i3 4005U) */ 15189 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 15190 15191 /* Apple Macbook 2,1 (Core 2 T7400) */ 15192 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 15193 15194 /* Apple Macbook 4,1 */ 15195 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present }, 15196 15197 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 15198 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 15199 15200 /* HP Chromebook 14 (Celeron 2955U) */ 15201 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 15202 15203 /* Dell Chromebook 11 */ 15204 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 15205 15206 /* Dell Chromebook 11 (2015 version) */ 15207 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present }, 15208 }; 15209 15210 static void intel_init_quirks(struct drm_device *dev) 15211 { 15212 struct pci_dev *d = dev->pdev; 15213 int i; 15214 15215 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 15216 struct intel_quirk *q = &intel_quirks[i]; 15217 15218 if (d->device == q->device && 15219 (d->subsystem_vendor == q->subsystem_vendor || 15220 q->subsystem_vendor == PCI_ANY_ID) && 15221 (d->subsystem_device == q->subsystem_device || 15222 q->subsystem_device == PCI_ANY_ID)) 15223 q->hook(dev); 15224 } 15225 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 15226 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 15227 intel_dmi_quirks[i].hook(dev); 15228 } 15229 } 15230 15231 /* Disable the VGA plane that we never use */ 15232 static void i915_disable_vga(struct drm_device *dev) 15233 { 15234 struct drm_i915_private *dev_priv = dev->dev_private; 15235 u8 sr1; 15236 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15237 15238 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 15239 #if 0 15240 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 15241 #endif 15242 outb(VGA_SR_INDEX, SR01); 15243 sr1 = inb(VGA_SR_DATA); 15244 outb(VGA_SR_DATA, sr1 | 1 << 5); 15245 #if 0 15246 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 15247 #endif 15248 udelay(300); 15249 15250 I915_WRITE(vga_reg, VGA_DISP_DISABLE); 15251 POSTING_READ(vga_reg); 15252 } 15253 15254 void intel_modeset_init_hw(struct drm_device *dev) 15255 { 15256 struct drm_i915_private *dev_priv = dev->dev_private; 15257 15258 intel_update_cdclk(dev); 15259 15260 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15261 15262 intel_init_clock_gating(dev); 15263 intel_enable_gt_powersave(dev); 15264 } 15265 15266 /* 15267 * Calculate what we think the watermarks should be for the state we've read 15268 * out of the hardware and then immediately program those watermarks so that 15269 * we ensure the hardware settings match our internal state. 15270 * 15271 * We can calculate what we think WM's should be by creating a duplicate of the 15272 * current state (which was constructed during hardware readout) and running it 15273 * through the atomic check code to calculate new watermark values in the 15274 * state object. 15275 */ 15276 static void sanitize_watermarks(struct drm_device *dev) 15277 { 15278 struct drm_i915_private *dev_priv = to_i915(dev); 15279 struct drm_atomic_state *state; 15280 struct drm_crtc *crtc; 15281 struct drm_crtc_state *cstate; 15282 struct drm_modeset_acquire_ctx ctx; 15283 int ret; 15284 int i; 15285 15286 /* Only supported on platforms that use atomic watermark design */ 15287 if (!dev_priv->display.program_watermarks) 15288 return; 15289 15290 /* 15291 * We need to hold connection_mutex before calling duplicate_state so 15292 * that the connector loop is protected. 15293 */ 15294 drm_modeset_acquire_init(&ctx, 0); 15295 retry: 15296 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15297 if (ret == -EDEADLK) { 15298 drm_modeset_backoff(&ctx); 15299 goto retry; 15300 } else if (WARN_ON(ret)) { 15301 goto fail; 15302 } 15303 15304 state = drm_atomic_helper_duplicate_state(dev, &ctx); 15305 if (WARN_ON(IS_ERR(state))) 15306 goto fail; 15307 15308 ret = intel_atomic_check(dev, state); 15309 if (ret) { 15310 /* 15311 * If we fail here, it means that the hardware appears to be 15312 * programmed in a way that shouldn't be possible, given our 15313 * understanding of watermark requirements. This might mean a 15314 * mistake in the hardware readout code or a mistake in the 15315 * watermark calculations for a given platform. Raise a WARN 15316 * so that this is noticeable. 15317 * 15318 * If this actually happens, we'll have to just leave the 15319 * BIOS-programmed watermarks untouched and hope for the best. 15320 */ 15321 WARN(true, "Could not determine valid watermarks for inherited state\n"); 15322 goto fail; 15323 } 15324 15325 /* Write calculated watermark values back */ 15326 to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config; 15327 for_each_crtc_in_state(state, crtc, cstate, i) { 15328 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15329 15330 dev_priv->display.program_watermarks(cs); 15331 } 15332 15333 drm_atomic_state_free(state); 15334 fail: 15335 drm_modeset_drop_locks(&ctx); 15336 drm_modeset_acquire_fini(&ctx); 15337 } 15338 15339 void intel_modeset_init(struct drm_device *dev) 15340 { 15341 struct drm_i915_private *dev_priv = dev->dev_private; 15342 int sprite, ret; 15343 enum i915_pipe pipe; 15344 struct intel_crtc *crtc; 15345 15346 drm_mode_config_init(dev); 15347 15348 dev->mode_config.min_width = 0; 15349 dev->mode_config.min_height = 0; 15350 15351 dev->mode_config.preferred_depth = 24; 15352 dev->mode_config.prefer_shadow = 1; 15353 15354 dev->mode_config.allow_fb_modifiers = true; 15355 15356 dev->mode_config.funcs = &intel_mode_funcs; 15357 15358 intel_init_quirks(dev); 15359 15360 intel_init_pm(dev); 15361 15362 if (INTEL_INFO(dev)->num_pipes == 0) 15363 return; 15364 15365 /* 15366 * There may be no VBT; and if the BIOS enabled SSC we can 15367 * just keep using it to avoid unnecessary flicker. Whereas if the 15368 * BIOS isn't using it, don't assume it will work even if the VBT 15369 * indicates as much. 15370 */ 15371 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { 15372 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15373 DREF_SSC1_ENABLE); 15374 15375 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { 15376 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", 15377 bios_lvds_use_ssc ? "en" : "dis", 15378 dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); 15379 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; 15380 } 15381 } 15382 15383 intel_init_display(dev); 15384 intel_init_audio(dev); 15385 15386 if (IS_GEN2(dev)) { 15387 dev->mode_config.max_width = 2048; 15388 dev->mode_config.max_height = 2048; 15389 } else if (IS_GEN3(dev)) { 15390 dev->mode_config.max_width = 4096; 15391 dev->mode_config.max_height = 4096; 15392 } else { 15393 dev->mode_config.max_width = 8192; 15394 dev->mode_config.max_height = 8192; 15395 } 15396 15397 if (IS_845G(dev) || IS_I865G(dev)) { 15398 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 15399 dev->mode_config.cursor_height = 1023; 15400 } else if (IS_GEN2(dev)) { 15401 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 15402 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 15403 } else { 15404 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 15405 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 15406 } 15407 15408 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 15409 15410 DRM_DEBUG_KMS("%d display pipe%s available.\n", 15411 INTEL_INFO(dev)->num_pipes, 15412 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 15413 15414 for_each_pipe(dev_priv, pipe) { 15415 intel_crtc_init(dev, pipe); 15416 for_each_sprite(dev_priv, pipe, sprite) { 15417 ret = intel_plane_init(dev, pipe, sprite); 15418 if (ret) 15419 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 15420 pipe_name(pipe), sprite_name(pipe, sprite), ret); 15421 } 15422 } 15423 15424 intel_update_czclk(dev_priv); 15425 intel_update_cdclk(dev); 15426 15427 intel_shared_dpll_init(dev); 15428 15429 /* Just disable it once at startup */ 15430 i915_disable_vga(dev); 15431 intel_setup_outputs(dev); 15432 15433 drm_modeset_lock_all(dev); 15434 intel_modeset_setup_hw_state(dev); 15435 drm_modeset_unlock_all(dev); 15436 15437 for_each_intel_crtc(dev, crtc) { 15438 struct intel_initial_plane_config plane_config = {}; 15439 15440 if (!crtc->active) 15441 continue; 15442 15443 /* 15444 * Note that reserving the BIOS fb up front prevents us 15445 * from stuffing other stolen allocations like the ring 15446 * on top. This prevents some ugliness at boot time, and 15447 * can even allow for smooth boot transitions if the BIOS 15448 * fb is large enough for the active pipe configuration. 15449 */ 15450 dev_priv->display.get_initial_plane_config(crtc, 15451 &plane_config); 15452 15453 /* 15454 * If the fb is shared between multiple heads, we'll 15455 * just get the first one. 15456 */ 15457 intel_find_initial_plane_obj(crtc, &plane_config); 15458 } 15459 15460 /* 15461 * Make sure hardware watermarks really match the state we read out. 15462 * Note that we need to do this after reconstructing the BIOS fb's 15463 * since the watermark calculation done here will use pstate->fb. 15464 */ 15465 sanitize_watermarks(dev); 15466 } 15467 15468 static void intel_enable_pipe_a(struct drm_device *dev) 15469 { 15470 struct intel_connector *connector; 15471 struct drm_connector *crt = NULL; 15472 struct intel_load_detect_pipe load_detect_temp; 15473 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 15474 15475 /* We can't just switch on the pipe A, we need to set things up with a 15476 * proper mode and output configuration. As a gross hack, enable pipe A 15477 * by enabling the load detect pipe once. */ 15478 for_each_intel_connector(dev, connector) { 15479 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 15480 crt = &connector->base; 15481 break; 15482 } 15483 } 15484 15485 if (!crt) 15486 return; 15487 15488 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 15489 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 15490 } 15491 15492 static bool 15493 intel_check_plane_mapping(struct intel_crtc *crtc) 15494 { 15495 struct drm_device *dev = crtc->base.dev; 15496 struct drm_i915_private *dev_priv = dev->dev_private; 15497 u32 val; 15498 15499 if (INTEL_INFO(dev)->num_pipes == 1) 15500 return true; 15501 15502 val = I915_READ(DSPCNTR(!crtc->plane)); 15503 15504 if ((val & DISPLAY_PLANE_ENABLE) && 15505 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 15506 return false; 15507 15508 return true; 15509 } 15510 15511 static bool intel_crtc_has_encoders(struct intel_crtc *crtc) 15512 { 15513 struct drm_device *dev = crtc->base.dev; 15514 struct intel_encoder *encoder; 15515 15516 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15517 return true; 15518 15519 return false; 15520 } 15521 15522 static bool intel_encoder_has_connectors(struct intel_encoder *encoder) 15523 { 15524 struct drm_device *dev = encoder->base.dev; 15525 struct intel_connector *connector; 15526 15527 for_each_connector_on_encoder(dev, &encoder->base, connector) 15528 return true; 15529 15530 return false; 15531 } 15532 15533 static void intel_sanitize_crtc(struct intel_crtc *crtc) 15534 { 15535 struct drm_device *dev = crtc->base.dev; 15536 struct drm_i915_private *dev_priv = dev->dev_private; 15537 i915_reg_t reg = PIPECONF(crtc->config->cpu_transcoder); 15538 15539 /* Clear any frame start delays used for debugging left by the BIOS */ 15540 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 15541 15542 /* restore vblank interrupts to correct state */ 15543 drm_crtc_vblank_reset(&crtc->base); 15544 if (crtc->active) { 15545 struct intel_plane *plane; 15546 15547 drm_crtc_vblank_on(&crtc->base); 15548 15549 /* Disable everything but the primary plane */ 15550 for_each_intel_plane_on_crtc(dev, crtc, plane) { 15551 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) 15552 continue; 15553 15554 plane->disable_plane(&plane->base, &crtc->base); 15555 } 15556 } 15557 15558 /* We need to sanitize the plane -> pipe mapping first because this will 15559 * disable the crtc (and hence change the state) if it is wrong. Note 15560 * that gen4+ has a fixed plane -> pipe mapping. */ 15561 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15562 bool plane; 15563 15564 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15565 crtc->base.base.id); 15566 15567 /* Pipe has the wrong plane attached and the plane is active. 15568 * Temporarily change the plane mapping and disable everything 15569 * ... */ 15570 plane = crtc->plane; 15571 to_intel_plane_state(crtc->base.primary->state)->visible = true; 15572 crtc->plane = !plane; 15573 intel_crtc_disable_noatomic(&crtc->base); 15574 crtc->plane = plane; 15575 } 15576 15577 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 15578 crtc->pipe == PIPE_A && !crtc->active) { 15579 /* BIOS forgot to enable pipe A, this mostly happens after 15580 * resume. Force-enable the pipe to fix this, the update_dpms 15581 * call below we restore the pipe to the right state, but leave 15582 * the required bits on. */ 15583 intel_enable_pipe_a(dev); 15584 } 15585 15586 /* Adjust the state of the output pipe according to whether we 15587 * have active connectors/encoders. */ 15588 if (!intel_crtc_has_encoders(crtc)) 15589 intel_crtc_disable_noatomic(&crtc->base); 15590 15591 if (crtc->active != crtc->base.state->active) { 15592 struct intel_encoder *encoder; 15593 15594 /* This can happen either due to bugs in the get_hw_state 15595 * functions or because of calls to intel_crtc_disable_noatomic, 15596 * or because the pipe is force-enabled due to the 15597 * pipe A quirk. */ 15598 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 15599 crtc->base.base.id, 15600 crtc->base.state->enable ? "enabled" : "disabled", 15601 crtc->active ? "enabled" : "disabled"); 15602 15603 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, NULL) < 0); 15604 crtc->base.state->active = crtc->active; 15605 crtc->base.enabled = crtc->active; 15606 crtc->base.state->connector_mask = 0; 15607 crtc->base.state->encoder_mask = 0; 15608 15609 /* Because we only establish the connector -> encoder -> 15610 * crtc links if something is active, this means the 15611 * crtc is now deactivated. Break the links. connector 15612 * -> encoder links are only establish when things are 15613 * actually up, hence no need to break them. */ 15614 WARN_ON(crtc->active); 15615 15616 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 15617 encoder->base.crtc = NULL; 15618 } 15619 15620 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 15621 /* 15622 * We start out with underrun reporting disabled to avoid races. 15623 * For correct bookkeeping mark this on active crtcs. 15624 * 15625 * Also on gmch platforms we dont have any hardware bits to 15626 * disable the underrun reporting. Which means we need to start 15627 * out with underrun reporting disabled also on inactive pipes, 15628 * since otherwise we'll complain about the garbage we read when 15629 * e.g. coming up after runtime pm. 15630 * 15631 * No protection against concurrent access is required - at 15632 * worst a fifo underrun happens which also sets this to false. 15633 */ 15634 crtc->cpu_fifo_underrun_disabled = true; 15635 crtc->pch_fifo_underrun_disabled = true; 15636 } 15637 } 15638 15639 static void intel_sanitize_encoder(struct intel_encoder *encoder) 15640 { 15641 struct intel_connector *connector; 15642 struct drm_device *dev = encoder->base.dev; 15643 15644 /* We need to check both for a crtc link (meaning that the 15645 * encoder is active and trying to read from a pipe) and the 15646 * pipe itself being active. */ 15647 bool has_active_crtc = encoder->base.crtc && 15648 to_intel_crtc(encoder->base.crtc)->active; 15649 15650 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) { 15651 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 15652 encoder->base.base.id, 15653 encoder->base.name); 15654 15655 /* Connector is active, but has no active pipe. This is 15656 * fallout from our resume register restoring. Disable 15657 * the encoder manually again. */ 15658 if (encoder->base.crtc) { 15659 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 15660 encoder->base.base.id, 15661 encoder->base.name); 15662 encoder->disable(encoder); 15663 if (encoder->post_disable) 15664 encoder->post_disable(encoder); 15665 } 15666 encoder->base.crtc = NULL; 15667 15668 /* Inconsistent output/port/pipe state happens presumably due to 15669 * a bug in one of the get_hw_state functions. Or someplace else 15670 * in our code, like the register restore mess on resume. Clamp 15671 * things to off as a safer default. */ 15672 for_each_intel_connector(dev, connector) { 15673 if (connector->encoder != encoder) 15674 continue; 15675 connector->base.dpms = DRM_MODE_DPMS_OFF; 15676 connector->base.encoder = NULL; 15677 } 15678 } 15679 /* Enabled encoders without active connectors will be fixed in 15680 * the crtc fixup. */ 15681 } 15682 15683 void i915_redisable_vga_power_on(struct drm_device *dev) 15684 { 15685 struct drm_i915_private *dev_priv = dev->dev_private; 15686 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15687 15688 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15689 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 15690 i915_disable_vga(dev); 15691 } 15692 } 15693 15694 void i915_redisable_vga(struct drm_device *dev) 15695 { 15696 struct drm_i915_private *dev_priv = dev->dev_private; 15697 15698 /* This function can be called both from intel_modeset_setup_hw_state or 15699 * at a very early point in our resume sequence, where the power well 15700 * structures are not yet restored. Since this function is at a very 15701 * paranoid "someone might have enabled VGA while we were not looking" 15702 * level, just check if the power well is enabled instead of trying to 15703 * follow the "don't touch the power well if we don't need it" policy 15704 * the rest of the driver uses. */ 15705 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA)) 15706 return; 15707 15708 i915_redisable_vga_power_on(dev); 15709 15710 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA); 15711 } 15712 15713 static bool primary_get_hw_state(struct intel_plane *plane) 15714 { 15715 struct drm_i915_private *dev_priv = to_i915(plane->base.dev); 15716 15717 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE; 15718 } 15719 15720 /* FIXME read out full plane state for all planes */ 15721 static void readout_plane_state(struct intel_crtc *crtc) 15722 { 15723 struct drm_plane *primary = crtc->base.primary; 15724 struct intel_plane_state *plane_state = 15725 to_intel_plane_state(primary->state); 15726 15727 plane_state->visible = crtc->active && 15728 primary_get_hw_state(to_intel_plane(primary)); 15729 15730 if (plane_state->visible) 15731 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15732 } 15733 15734 static void intel_modeset_readout_hw_state(struct drm_device *dev) 15735 { 15736 struct drm_i915_private *dev_priv = dev->dev_private; 15737 enum i915_pipe pipe; 15738 struct intel_crtc *crtc; 15739 struct intel_encoder *encoder; 15740 struct intel_connector *connector; 15741 int i; 15742 15743 dev_priv->active_crtcs = 0; 15744 15745 for_each_intel_crtc(dev, crtc) { 15746 struct intel_crtc_state *crtc_state = crtc->config; 15747 int pixclk = 0; 15748 15749 __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base); 15750 memset(crtc_state, 0, sizeof(*crtc_state)); 15751 crtc_state->base.crtc = &crtc->base; 15752 15753 crtc_state->base.active = crtc_state->base.enable = 15754 dev_priv->display.get_pipe_config(crtc, crtc_state); 15755 15756 crtc->base.enabled = crtc_state->base.enable; 15757 crtc->active = crtc_state->base.active; 15758 15759 if (crtc_state->base.active) { 15760 dev_priv->active_crtcs |= 1 << crtc->pipe; 15761 15762 if (IS_BROADWELL(dev_priv)) { 15763 pixclk = ilk_pipe_pixel_rate(crtc_state); 15764 15765 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ 15766 if (crtc_state->ips_enabled) 15767 pixclk = DIV_ROUND_UP(pixclk * 100, 95); 15768 } else if (IS_VALLEYVIEW(dev_priv) || 15769 IS_CHERRYVIEW(dev_priv) || 15770 IS_BROXTON(dev_priv)) 15771 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 15772 else 15773 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15774 } 15775 15776 dev_priv->min_pixclk[crtc->pipe] = pixclk; 15777 15778 readout_plane_state(crtc); 15779 15780 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15781 crtc->base.base.id, 15782 crtc->active ? "enabled" : "disabled"); 15783 } 15784 15785 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15786 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15787 15788 pll->on = pll->get_hw_state(dev_priv, pll, 15789 &pll->config.hw_state); 15790 pll->active = 0; 15791 pll->config.crtc_mask = 0; 15792 for_each_intel_crtc(dev, crtc) { 15793 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 15794 pll->active++; 15795 pll->config.crtc_mask |= 1 << crtc->pipe; 15796 } 15797 } 15798 15799 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15800 pll->name, pll->config.crtc_mask, pll->on); 15801 15802 if (pll->config.crtc_mask) 15803 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 15804 } 15805 15806 for_each_intel_encoder(dev, encoder) { 15807 pipe = 0; 15808 15809 if (encoder->get_hw_state(encoder, &pipe)) { 15810 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15811 encoder->base.crtc = &crtc->base; 15812 encoder->get_config(encoder, crtc->config); 15813 } else { 15814 encoder->base.crtc = NULL; 15815 } 15816 15817 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15818 encoder->base.base.id, 15819 encoder->base.name, 15820 encoder->base.crtc ? "enabled" : "disabled", 15821 pipe_name(pipe)); 15822 } 15823 15824 for_each_intel_connector(dev, connector) { 15825 if (connector->get_hw_state(connector)) { 15826 connector->base.dpms = DRM_MODE_DPMS_ON; 15827 15828 encoder = connector->encoder; 15829 connector->base.encoder = &encoder->base; 15830 15831 if (encoder->base.crtc && 15832 encoder->base.crtc->state->active) { 15833 /* 15834 * This has to be done during hardware readout 15835 * because anything calling .crtc_disable may 15836 * rely on the connector_mask being accurate. 15837 */ 15838 encoder->base.crtc->state->connector_mask |= 15839 1 << drm_connector_index(&connector->base); 15840 encoder->base.crtc->state->encoder_mask |= 15841 1 << drm_encoder_index(&encoder->base); 15842 } 15843 15844 } else { 15845 connector->base.dpms = DRM_MODE_DPMS_OFF; 15846 connector->base.encoder = NULL; 15847 } 15848 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15849 connector->base.base.id, 15850 connector->base.name, 15851 connector->base.encoder ? "enabled" : "disabled"); 15852 } 15853 15854 for_each_intel_crtc(dev, crtc) { 15855 crtc->base.hwmode = crtc->config->base.adjusted_mode; 15856 15857 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); 15858 if (crtc->base.state->active) { 15859 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config); 15860 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config); 15861 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); 15862 15863 /* 15864 * The initial mode needs to be set in order to keep 15865 * the atomic core happy. It wants a valid mode if the 15866 * crtc's enabled, so we do the above call. 15867 * 15868 * At this point some state updated by the connectors 15869 * in their ->detect() callback has not run yet, so 15870 * no recalculation can be done yet. 15871 * 15872 * Even if we could do a recalculation and modeset 15873 * right now it would cause a double modeset if 15874 * fbdev or userspace chooses a different initial mode. 15875 * 15876 * If that happens, someone indicated they wanted a 15877 * mode change, which means it's safe to do a full 15878 * recalculation. 15879 */ 15880 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED; 15881 15882 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode); 15883 update_scanline_offset(crtc); 15884 } 15885 } 15886 } 15887 15888 /* Scan out the current hw modeset state, 15889 * and sanitizes it to the current state 15890 */ 15891 static void 15892 intel_modeset_setup_hw_state(struct drm_device *dev) 15893 { 15894 struct drm_i915_private *dev_priv = dev->dev_private; 15895 enum i915_pipe pipe; 15896 struct intel_crtc *crtc; 15897 struct intel_encoder *encoder; 15898 int i; 15899 15900 intel_modeset_readout_hw_state(dev); 15901 15902 /* HW state is read out, now we need to sanitize this mess. */ 15903 for_each_intel_encoder(dev, encoder) { 15904 intel_sanitize_encoder(encoder); 15905 } 15906 15907 for_each_pipe(dev_priv, pipe) { 15908 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15909 intel_sanitize_crtc(crtc); 15910 intel_dump_pipe_config(crtc, crtc->config, 15911 "[setup_hw_state]"); 15912 } 15913 15914 intel_modeset_update_connector_atomic_state(dev); 15915 15916 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15917 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15918 15919 if (!pll->on || pll->active) 15920 continue; 15921 15922 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15923 15924 pll->disable(dev_priv, pll); 15925 pll->on = false; 15926 } 15927 15928 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 15929 vlv_wm_get_hw_state(dev); 15930 else if (IS_GEN9(dev)) 15931 skl_wm_get_hw_state(dev); 15932 else if (HAS_PCH_SPLIT(dev)) 15933 ilk_wm_get_hw_state(dev); 15934 15935 for_each_intel_crtc(dev, crtc) { 15936 unsigned long put_domains; 15937 15938 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config); 15939 if (WARN_ON(put_domains)) 15940 modeset_put_power_domains(dev_priv, put_domains); 15941 } 15942 intel_display_set_init_power(dev_priv, false); 15943 15944 intel_fbc_init_pipe_state(dev_priv); 15945 } 15946 15947 void intel_display_resume(struct drm_device *dev) 15948 { 15949 struct drm_i915_private *dev_priv = to_i915(dev); 15950 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 15951 struct drm_modeset_acquire_ctx ctx; 15952 int ret; 15953 bool setup = false; 15954 15955 dev_priv->modeset_restore_state = NULL; 15956 15957 /* 15958 * This is a cludge because with real atomic modeset mode_config.mutex 15959 * won't be taken. Unfortunately some probed state like 15960 * audio_codec_enable is still protected by mode_config.mutex, so lock 15961 * it here for now. 15962 */ 15963 mutex_lock(&dev->mode_config.mutex); 15964 drm_modeset_acquire_init(&ctx, 0); 15965 15966 retry: 15967 ret = drm_modeset_lock_all_ctx(dev, &ctx); 15968 15969 if (ret == 0 && !setup) { 15970 setup = true; 15971 15972 intel_modeset_setup_hw_state(dev); 15973 i915_redisable_vga(dev); 15974 } 15975 15976 if (ret == 0 && state) { 15977 struct drm_crtc_state *crtc_state; 15978 struct drm_crtc *crtc; 15979 int i; 15980 15981 state->acquire_ctx = &ctx; 15982 15983 for_each_crtc_in_state(state, crtc, crtc_state, i) { 15984 /* 15985 * Force recalculation even if we restore 15986 * current state. With fast modeset this may not result 15987 * in a modeset when the state is compatible. 15988 */ 15989 crtc_state->mode_changed = true; 15990 } 15991 15992 ret = drm_atomic_commit(state); 15993 } 15994 15995 if (ret == -EDEADLK) { 15996 drm_modeset_backoff(&ctx); 15997 goto retry; 15998 } 15999 16000 drm_modeset_drop_locks(&ctx); 16001 drm_modeset_acquire_fini(&ctx); 16002 mutex_unlock(&dev->mode_config.mutex); 16003 16004 if (ret) { 16005 DRM_ERROR("Restoring old state failed with %i\n", ret); 16006 drm_atomic_state_free(state); 16007 } 16008 } 16009 16010 void intel_modeset_gem_init(struct drm_device *dev) 16011 { 16012 struct drm_crtc *c; 16013 struct drm_i915_gem_object *obj; 16014 int ret; 16015 16016 intel_init_gt_powersave(dev); 16017 16018 intel_modeset_init_hw(dev); 16019 16020 intel_setup_overlay(dev); 16021 16022 /* 16023 * Make sure any fbs we allocated at startup are properly 16024 * pinned & fenced. When we do the allocation it's too early 16025 * for this. 16026 */ 16027 for_each_crtc(dev, c) { 16028 obj = intel_fb_obj(c->primary->fb); 16029 if (obj == NULL) 16030 continue; 16031 16032 mutex_lock(&dev->struct_mutex); 16033 ret = intel_pin_and_fence_fb_obj(c->primary, 16034 c->primary->fb, 16035 c->primary->state); 16036 mutex_unlock(&dev->struct_mutex); 16037 if (ret) { 16038 DRM_ERROR("failed to pin boot fb on pipe %d\n", 16039 to_intel_crtc(c)->pipe); 16040 drm_framebuffer_unreference(c->primary->fb); 16041 c->primary->fb = NULL; 16042 c->primary->crtc = c->primary->state->crtc = NULL; 16043 update_state_fb(c->primary); 16044 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16045 } 16046 } 16047 16048 intel_backlight_register(dev); 16049 } 16050 16051 void intel_connector_unregister(struct intel_connector *intel_connector) 16052 { 16053 struct drm_connector *connector = &intel_connector->base; 16054 16055 intel_panel_destroy_backlight(connector); 16056 drm_connector_unregister(connector); 16057 } 16058 16059 void intel_modeset_cleanup(struct drm_device *dev) 16060 { 16061 struct drm_i915_private *dev_priv = dev->dev_private; 16062 struct intel_connector *connector; 16063 16064 intel_disable_gt_powersave(dev); 16065 16066 intel_backlight_unregister(dev); 16067 16068 /* 16069 * Interrupts and polling as the first thing to avoid creating havoc. 16070 * Too much stuff here (turning of connectors, ...) would 16071 * experience fancy races otherwise. 16072 */ 16073 intel_irq_uninstall(dev_priv); 16074 16075 /* 16076 * Due to the hpd irq storm handling the hotplug work can re-arm the 16077 * poll handlers. Hence disable polling after hpd handling is shut down. 16078 */ 16079 drm_kms_helper_poll_fini(dev); 16080 16081 intel_unregister_dsm_handler(); 16082 16083 intel_fbc_global_disable(dev_priv); 16084 16085 /* flush any delayed tasks or pending work */ 16086 flush_scheduled_work(); 16087 16088 /* destroy the backlight and sysfs files before encoders/connectors */ 16089 for_each_intel_connector(dev, connector) 16090 connector->unregister(connector); 16091 16092 drm_mode_config_cleanup(dev); 16093 16094 intel_cleanup_overlay(dev); 16095 16096 intel_cleanup_gt_powersave(dev); 16097 16098 intel_teardown_gmbus(dev); 16099 } 16100 16101 /* 16102 * Return which encoder is currently attached for connector. 16103 */ 16104 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 16105 { 16106 return &intel_attached_encoder(connector)->base; 16107 } 16108 16109 void intel_connector_attach_encoder(struct intel_connector *connector, 16110 struct intel_encoder *encoder) 16111 { 16112 connector->encoder = encoder; 16113 drm_mode_connector_attach_encoder(&connector->base, 16114 &encoder->base); 16115 } 16116 16117 /* 16118 * set vga decode state - true == enable VGA decode 16119 */ 16120 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 16121 { 16122 struct drm_i915_private *dev_priv = dev->dev_private; 16123 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 16124 u16 gmch_ctrl; 16125 16126 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 16127 DRM_ERROR("failed to read control word\n"); 16128 return -EIO; 16129 } 16130 16131 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 16132 return 0; 16133 16134 if (state) 16135 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 16136 else 16137 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 16138 16139 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 16140 DRM_ERROR("failed to write control word\n"); 16141 return -EIO; 16142 } 16143 16144 return 0; 16145 } 16146 16147 #if 0 16148 struct intel_display_error_state { 16149 16150 u32 power_well_driver; 16151 16152 int num_transcoders; 16153 16154 struct intel_cursor_error_state { 16155 u32 control; 16156 u32 position; 16157 u32 base; 16158 u32 size; 16159 } cursor[I915_MAX_PIPES]; 16160 16161 struct intel_pipe_error_state { 16162 bool power_domain_on; 16163 u32 source; 16164 u32 stat; 16165 } pipe[I915_MAX_PIPES]; 16166 16167 struct intel_plane_error_state { 16168 u32 control; 16169 u32 stride; 16170 u32 size; 16171 u32 pos; 16172 u32 addr; 16173 u32 surface; 16174 u32 tile_offset; 16175 } plane[I915_MAX_PIPES]; 16176 16177 struct intel_transcoder_error_state { 16178 bool power_domain_on; 16179 enum transcoder cpu_transcoder; 16180 16181 u32 conf; 16182 16183 u32 htotal; 16184 u32 hblank; 16185 u32 hsync; 16186 u32 vtotal; 16187 u32 vblank; 16188 u32 vsync; 16189 } transcoder[4]; 16190 }; 16191 16192 struct intel_display_error_state * 16193 intel_display_capture_error_state(struct drm_device *dev) 16194 { 16195 struct drm_i915_private *dev_priv = dev->dev_private; 16196 struct intel_display_error_state *error; 16197 int transcoders[] = { 16198 TRANSCODER_A, 16199 TRANSCODER_B, 16200 TRANSCODER_C, 16201 TRANSCODER_EDP, 16202 }; 16203 int i; 16204 16205 if (INTEL_INFO(dev)->num_pipes == 0) 16206 return NULL; 16207 16208 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16209 if (error == NULL) 16210 return NULL; 16211 16212 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16213 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16214 16215 for_each_pipe(dev_priv, i) { 16216 error->pipe[i].power_domain_on = 16217 __intel_display_power_is_enabled(dev_priv, 16218 POWER_DOMAIN_PIPE(i)); 16219 if (!error->pipe[i].power_domain_on) 16220 continue; 16221 16222 error->cursor[i].control = I915_READ(CURCNTR(i)); 16223 error->cursor[i].position = I915_READ(CURPOS(i)); 16224 error->cursor[i].base = I915_READ(CURBASE(i)); 16225 16226 error->plane[i].control = I915_READ(DSPCNTR(i)); 16227 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16228 if (INTEL_INFO(dev)->gen <= 3) { 16229 error->plane[i].size = I915_READ(DSPSIZE(i)); 16230 error->plane[i].pos = I915_READ(DSPPOS(i)); 16231 } 16232 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16233 error->plane[i].addr = I915_READ(DSPADDR(i)); 16234 if (INTEL_INFO(dev)->gen >= 4) { 16235 error->plane[i].surface = I915_READ(DSPSURF(i)); 16236 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16237 } 16238 16239 error->pipe[i].source = I915_READ(PIPESRC(i)); 16240 16241 if (HAS_GMCH_DISPLAY(dev)) 16242 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16243 } 16244 16245 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16246 if (HAS_DDI(dev_priv->dev)) 16247 error->num_transcoders++; /* Account for eDP. */ 16248 16249 for (i = 0; i < error->num_transcoders; i++) { 16250 enum transcoder cpu_transcoder = transcoders[i]; 16251 16252 error->transcoder[i].power_domain_on = 16253 __intel_display_power_is_enabled(dev_priv, 16254 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 16255 if (!error->transcoder[i].power_domain_on) 16256 continue; 16257 16258 error->transcoder[i].cpu_transcoder = cpu_transcoder; 16259 16260 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 16261 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 16262 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 16263 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 16264 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 16265 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 16266 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 16267 } 16268 16269 return error; 16270 } 16271 16272 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 16273 16274 void 16275 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 16276 struct drm_device *dev, 16277 struct intel_display_error_state *error) 16278 { 16279 struct drm_i915_private *dev_priv = dev->dev_private; 16280 int i; 16281 16282 if (!error) 16283 return; 16284 16285 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 16286 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16287 err_printf(m, "PWR_WELL_CTL2: %08x\n", 16288 error->power_well_driver); 16289 for_each_pipe(dev_priv, i) { 16290 err_printf(m, "Pipe [%d]:\n", i); 16291 err_printf(m, " Power: %s\n", 16292 onoff(error->pipe[i].power_domain_on)); 16293 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 16294 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 16295 16296 err_printf(m, "Plane [%d]:\n", i); 16297 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 16298 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 16299 if (INTEL_INFO(dev)->gen <= 3) { 16300 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 16301 err_printf(m, " POS: %08x\n", error->plane[i].pos); 16302 } 16303 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16304 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 16305 if (INTEL_INFO(dev)->gen >= 4) { 16306 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 16307 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 16308 } 16309 16310 err_printf(m, "Cursor [%d]:\n", i); 16311 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 16312 err_printf(m, " POS: %08x\n", error->cursor[i].position); 16313 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 16314 } 16315 16316 for (i = 0; i < error->num_transcoders; i++) { 16317 err_printf(m, "CPU transcoder: %c\n", 16318 transcoder_name(error->transcoder[i].cpu_transcoder)); 16319 err_printf(m, " Power: %s\n", 16320 onoff(error->transcoder[i].power_domain_on)); 16321 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 16322 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 16323 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 16324 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 16325 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 16326 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 16327 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 16328 } 16329 } 16330 #endif 16331