1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats for gen <= 3 */ 45 static const uint32_t i8xx_primary_formats[] = { 46 DRM_FORMAT_C8, 47 DRM_FORMAT_RGB565, 48 DRM_FORMAT_XRGB1555, 49 DRM_FORMAT_XRGB8888, 50 }; 51 52 /* Primary plane formats for gen >= 4 */ 53 static const uint32_t i965_primary_formats[] = { 54 DRM_FORMAT_C8, 55 DRM_FORMAT_RGB565, 56 DRM_FORMAT_XRGB8888, 57 DRM_FORMAT_XBGR8888, 58 DRM_FORMAT_XRGB2101010, 59 DRM_FORMAT_XBGR2101010, 60 }; 61 62 static const uint32_t skl_primary_formats[] = { 63 DRM_FORMAT_C8, 64 DRM_FORMAT_RGB565, 65 DRM_FORMAT_XRGB8888, 66 DRM_FORMAT_XBGR8888, 67 DRM_FORMAT_ARGB8888, 68 DRM_FORMAT_ABGR8888, 69 DRM_FORMAT_XRGB2101010, 70 DRM_FORMAT_XBGR2101010, 71 }; 72 73 /* Cursor formats */ 74 static const uint32_t intel_cursor_formats[] = { 75 DRM_FORMAT_ARGB8888, 76 }; 77 78 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 79 80 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 81 struct intel_crtc_state *pipe_config); 82 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 83 struct intel_crtc_state *pipe_config); 84 85 static int intel_set_mode(struct drm_crtc *crtc, 86 struct drm_atomic_state *state, 87 bool force_restore); 88 static int intel_framebuffer_init(struct drm_device *dev, 89 struct intel_framebuffer *ifb, 90 struct drm_mode_fb_cmd2 *mode_cmd, 91 struct drm_i915_gem_object *obj); 92 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 93 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 94 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 95 struct intel_link_m_n *m_n, 96 struct intel_link_m_n *m2_n2); 97 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 98 static void haswell_set_pipeconf(struct drm_crtc *crtc); 99 static void intel_set_pipe_csc(struct drm_crtc *crtc); 100 static void vlv_prepare_pll(struct intel_crtc *crtc, 101 const struct intel_crtc_state *pipe_config); 102 static void chv_prepare_pll(struct intel_crtc *crtc, 103 const struct intel_crtc_state *pipe_config); 104 static void intel_begin_crtc_commit(struct drm_crtc *crtc); 105 static void intel_finish_crtc_commit(struct drm_crtc *crtc); 106 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 107 struct intel_crtc_state *crtc_state); 108 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 109 int num_connectors); 110 static void intel_crtc_enable_planes(struct drm_crtc *crtc); 111 static void intel_crtc_disable_planes(struct drm_crtc *crtc); 112 113 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 114 { 115 if (!connector->mst_port) 116 return connector->encoder; 117 else 118 return &connector->mst_port->mst_encoders[pipe]->base; 119 } 120 121 typedef struct { 122 int min, max; 123 } intel_range_t; 124 125 typedef struct { 126 int dot_limit; 127 int p2_slow, p2_fast; 128 } intel_p2_t; 129 130 typedef struct intel_limit intel_limit_t; 131 struct intel_limit { 132 intel_range_t dot, vco, n, m, m1, m2, p, p1; 133 intel_p2_t p2; 134 }; 135 136 int 137 intel_pch_rawclk(struct drm_device *dev) 138 { 139 struct drm_i915_private *dev_priv = dev->dev_private; 140 141 WARN_ON(!HAS_PCH_SPLIT(dev)); 142 143 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 144 } 145 146 static inline u32 /* units of 100MHz */ 147 intel_fdi_link_freq(struct drm_device *dev) 148 { 149 if (IS_GEN5(dev)) { 150 struct drm_i915_private *dev_priv = dev->dev_private; 151 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 152 } else 153 return 27; 154 } 155 156 static const intel_limit_t intel_limits_i8xx_dac = { 157 .dot = { .min = 25000, .max = 350000 }, 158 .vco = { .min = 908000, .max = 1512000 }, 159 .n = { .min = 2, .max = 16 }, 160 .m = { .min = 96, .max = 140 }, 161 .m1 = { .min = 18, .max = 26 }, 162 .m2 = { .min = 6, .max = 16 }, 163 .p = { .min = 4, .max = 128 }, 164 .p1 = { .min = 2, .max = 33 }, 165 .p2 = { .dot_limit = 165000, 166 .p2_slow = 4, .p2_fast = 2 }, 167 }; 168 169 static const intel_limit_t intel_limits_i8xx_dvo = { 170 .dot = { .min = 25000, .max = 350000 }, 171 .vco = { .min = 908000, .max = 1512000 }, 172 .n = { .min = 2, .max = 16 }, 173 .m = { .min = 96, .max = 140 }, 174 .m1 = { .min = 18, .max = 26 }, 175 .m2 = { .min = 6, .max = 16 }, 176 .p = { .min = 4, .max = 128 }, 177 .p1 = { .min = 2, .max = 33 }, 178 .p2 = { .dot_limit = 165000, 179 .p2_slow = 4, .p2_fast = 4 }, 180 }; 181 182 static const intel_limit_t intel_limits_i8xx_lvds = { 183 .dot = { .min = 25000, .max = 350000 }, 184 .vco = { .min = 908000, .max = 1512000 }, 185 .n = { .min = 2, .max = 16 }, 186 .m = { .min = 96, .max = 140 }, 187 .m1 = { .min = 18, .max = 26 }, 188 .m2 = { .min = 6, .max = 16 }, 189 .p = { .min = 4, .max = 128 }, 190 .p1 = { .min = 1, .max = 6 }, 191 .p2 = { .dot_limit = 165000, 192 .p2_slow = 14, .p2_fast = 7 }, 193 }; 194 195 static const intel_limit_t intel_limits_i9xx_sdvo = { 196 .dot = { .min = 20000, .max = 400000 }, 197 .vco = { .min = 1400000, .max = 2800000 }, 198 .n = { .min = 1, .max = 6 }, 199 .m = { .min = 70, .max = 120 }, 200 .m1 = { .min = 8, .max = 18 }, 201 .m2 = { .min = 3, .max = 7 }, 202 .p = { .min = 5, .max = 80 }, 203 .p1 = { .min = 1, .max = 8 }, 204 .p2 = { .dot_limit = 200000, 205 .p2_slow = 10, .p2_fast = 5 }, 206 }; 207 208 static const intel_limit_t intel_limits_i9xx_lvds = { 209 .dot = { .min = 20000, .max = 400000 }, 210 .vco = { .min = 1400000, .max = 2800000 }, 211 .n = { .min = 1, .max = 6 }, 212 .m = { .min = 70, .max = 120 }, 213 .m1 = { .min = 8, .max = 18 }, 214 .m2 = { .min = 3, .max = 7 }, 215 .p = { .min = 7, .max = 98 }, 216 .p1 = { .min = 1, .max = 8 }, 217 .p2 = { .dot_limit = 112000, 218 .p2_slow = 14, .p2_fast = 7 }, 219 }; 220 221 222 static const intel_limit_t intel_limits_g4x_sdvo = { 223 .dot = { .min = 25000, .max = 270000 }, 224 .vco = { .min = 1750000, .max = 3500000}, 225 .n = { .min = 1, .max = 4 }, 226 .m = { .min = 104, .max = 138 }, 227 .m1 = { .min = 17, .max = 23 }, 228 .m2 = { .min = 5, .max = 11 }, 229 .p = { .min = 10, .max = 30 }, 230 .p1 = { .min = 1, .max = 3}, 231 .p2 = { .dot_limit = 270000, 232 .p2_slow = 10, 233 .p2_fast = 10 234 }, 235 }; 236 237 static const intel_limit_t intel_limits_g4x_hdmi = { 238 .dot = { .min = 22000, .max = 400000 }, 239 .vco = { .min = 1750000, .max = 3500000}, 240 .n = { .min = 1, .max = 4 }, 241 .m = { .min = 104, .max = 138 }, 242 .m1 = { .min = 16, .max = 23 }, 243 .m2 = { .min = 5, .max = 11 }, 244 .p = { .min = 5, .max = 80 }, 245 .p1 = { .min = 1, .max = 8}, 246 .p2 = { .dot_limit = 165000, 247 .p2_slow = 10, .p2_fast = 5 }, 248 }; 249 250 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 251 .dot = { .min = 20000, .max = 115000 }, 252 .vco = { .min = 1750000, .max = 3500000 }, 253 .n = { .min = 1, .max = 3 }, 254 .m = { .min = 104, .max = 138 }, 255 .m1 = { .min = 17, .max = 23 }, 256 .m2 = { .min = 5, .max = 11 }, 257 .p = { .min = 28, .max = 112 }, 258 .p1 = { .min = 2, .max = 8 }, 259 .p2 = { .dot_limit = 0, 260 .p2_slow = 14, .p2_fast = 14 261 }, 262 }; 263 264 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 265 .dot = { .min = 80000, .max = 224000 }, 266 .vco = { .min = 1750000, .max = 3500000 }, 267 .n = { .min = 1, .max = 3 }, 268 .m = { .min = 104, .max = 138 }, 269 .m1 = { .min = 17, .max = 23 }, 270 .m2 = { .min = 5, .max = 11 }, 271 .p = { .min = 14, .max = 42 }, 272 .p1 = { .min = 2, .max = 6 }, 273 .p2 = { .dot_limit = 0, 274 .p2_slow = 7, .p2_fast = 7 275 }, 276 }; 277 278 static const intel_limit_t intel_limits_pineview_sdvo = { 279 .dot = { .min = 20000, .max = 400000}, 280 .vco = { .min = 1700000, .max = 3500000 }, 281 /* Pineview's Ncounter is a ring counter */ 282 .n = { .min = 3, .max = 6 }, 283 .m = { .min = 2, .max = 256 }, 284 /* Pineview only has one combined m divider, which we treat as m2. */ 285 .m1 = { .min = 0, .max = 0 }, 286 .m2 = { .min = 0, .max = 254 }, 287 .p = { .min = 5, .max = 80 }, 288 .p1 = { .min = 1, .max = 8 }, 289 .p2 = { .dot_limit = 200000, 290 .p2_slow = 10, .p2_fast = 5 }, 291 }; 292 293 static const intel_limit_t intel_limits_pineview_lvds = { 294 .dot = { .min = 20000, .max = 400000 }, 295 .vco = { .min = 1700000, .max = 3500000 }, 296 .n = { .min = 3, .max = 6 }, 297 .m = { .min = 2, .max = 256 }, 298 .m1 = { .min = 0, .max = 0 }, 299 .m2 = { .min = 0, .max = 254 }, 300 .p = { .min = 7, .max = 112 }, 301 .p1 = { .min = 1, .max = 8 }, 302 .p2 = { .dot_limit = 112000, 303 .p2_slow = 14, .p2_fast = 14 }, 304 }; 305 306 /* Ironlake / Sandybridge 307 * 308 * We calculate clock using (register_value + 2) for N/M1/M2, so here 309 * the range value for them is (actual_value - 2). 310 */ 311 static const intel_limit_t intel_limits_ironlake_dac = { 312 .dot = { .min = 25000, .max = 350000 }, 313 .vco = { .min = 1760000, .max = 3510000 }, 314 .n = { .min = 1, .max = 5 }, 315 .m = { .min = 79, .max = 127 }, 316 .m1 = { .min = 12, .max = 22 }, 317 .m2 = { .min = 5, .max = 9 }, 318 .p = { .min = 5, .max = 80 }, 319 .p1 = { .min = 1, .max = 8 }, 320 .p2 = { .dot_limit = 225000, 321 .p2_slow = 10, .p2_fast = 5 }, 322 }; 323 324 static const intel_limit_t intel_limits_ironlake_single_lvds = { 325 .dot = { .min = 25000, .max = 350000 }, 326 .vco = { .min = 1760000, .max = 3510000 }, 327 .n = { .min = 1, .max = 3 }, 328 .m = { .min = 79, .max = 118 }, 329 .m1 = { .min = 12, .max = 22 }, 330 .m2 = { .min = 5, .max = 9 }, 331 .p = { .min = 28, .max = 112 }, 332 .p1 = { .min = 2, .max = 8 }, 333 .p2 = { .dot_limit = 225000, 334 .p2_slow = 14, .p2_fast = 14 }, 335 }; 336 337 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 338 .dot = { .min = 25000, .max = 350000 }, 339 .vco = { .min = 1760000, .max = 3510000 }, 340 .n = { .min = 1, .max = 3 }, 341 .m = { .min = 79, .max = 127 }, 342 .m1 = { .min = 12, .max = 22 }, 343 .m2 = { .min = 5, .max = 9 }, 344 .p = { .min = 14, .max = 56 }, 345 .p1 = { .min = 2, .max = 8 }, 346 .p2 = { .dot_limit = 225000, 347 .p2_slow = 7, .p2_fast = 7 }, 348 }; 349 350 /* LVDS 100mhz refclk limits. */ 351 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 352 .dot = { .min = 25000, .max = 350000 }, 353 .vco = { .min = 1760000, .max = 3510000 }, 354 .n = { .min = 1, .max = 2 }, 355 .m = { .min = 79, .max = 126 }, 356 .m1 = { .min = 12, .max = 22 }, 357 .m2 = { .min = 5, .max = 9 }, 358 .p = { .min = 28, .max = 112 }, 359 .p1 = { .min = 2, .max = 8 }, 360 .p2 = { .dot_limit = 225000, 361 .p2_slow = 14, .p2_fast = 14 }, 362 }; 363 364 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 365 .dot = { .min = 25000, .max = 350000 }, 366 .vco = { .min = 1760000, .max = 3510000 }, 367 .n = { .min = 1, .max = 3 }, 368 .m = { .min = 79, .max = 126 }, 369 .m1 = { .min = 12, .max = 22 }, 370 .m2 = { .min = 5, .max = 9 }, 371 .p = { .min = 14, .max = 42 }, 372 .p1 = { .min = 2, .max = 6 }, 373 .p2 = { .dot_limit = 225000, 374 .p2_slow = 7, .p2_fast = 7 }, 375 }; 376 377 static const intel_limit_t intel_limits_vlv = { 378 /* 379 * These are the data rate limits (measured in fast clocks) 380 * since those are the strictest limits we have. The fast 381 * clock and actual rate limits are more relaxed, so checking 382 * them would make no difference. 383 */ 384 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 385 .vco = { .min = 4000000, .max = 6000000 }, 386 .n = { .min = 1, .max = 7 }, 387 .m1 = { .min = 2, .max = 3 }, 388 .m2 = { .min = 11, .max = 156 }, 389 .p1 = { .min = 2, .max = 3 }, 390 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 391 }; 392 393 static const intel_limit_t intel_limits_chv = { 394 /* 395 * These are the data rate limits (measured in fast clocks) 396 * since those are the strictest limits we have. The fast 397 * clock and actual rate limits are more relaxed, so checking 398 * them would make no difference. 399 */ 400 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 401 .vco = { .min = 4800000, .max = 6480000 }, 402 .n = { .min = 1, .max = 1 }, 403 .m1 = { .min = 2, .max = 2 }, 404 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 405 .p1 = { .min = 2, .max = 4 }, 406 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 407 }; 408 409 static const intel_limit_t intel_limits_bxt = { 410 /* FIXME: find real dot limits */ 411 .dot = { .min = 0, .max = INT_MAX }, 412 .vco = { .min = 4800000, .max = 6480000 }, 413 .n = { .min = 1, .max = 1 }, 414 .m1 = { .min = 2, .max = 2 }, 415 /* FIXME: find real m2 limits */ 416 .m2 = { .min = 2 << 22, .max = 255 << 22 }, 417 .p1 = { .min = 2, .max = 4 }, 418 .p2 = { .p2_slow = 1, .p2_fast = 20 }, 419 }; 420 421 static void vlv_clock(int refclk, intel_clock_t *clock) 422 { 423 clock->m = clock->m1 * clock->m2; 424 clock->p = clock->p1 * clock->p2; 425 if (WARN_ON(clock->n == 0 || clock->p == 0)) 426 return; 427 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 428 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 429 } 430 431 /** 432 * Returns whether any output on the specified pipe is of the specified type 433 */ 434 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 435 { 436 struct drm_device *dev = crtc->base.dev; 437 struct intel_encoder *encoder; 438 439 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 440 if (encoder->type == type) 441 return true; 442 443 return false; 444 } 445 446 /** 447 * Returns whether any output on the specified pipe will have the specified 448 * type after a staged modeset is complete, i.e., the same as 449 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 450 * encoder->crtc. 451 */ 452 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 453 int type) 454 { 455 struct drm_atomic_state *state = crtc_state->base.state; 456 struct drm_connector *connector; 457 struct drm_connector_state *connector_state; 458 struct intel_encoder *encoder; 459 int i, num_connectors = 0; 460 461 for_each_connector_in_state(state, connector, connector_state, i) { 462 if (connector_state->crtc != crtc_state->base.crtc) 463 continue; 464 465 num_connectors++; 466 467 encoder = to_intel_encoder(connector_state->best_encoder); 468 if (encoder->type == type) 469 return true; 470 } 471 472 WARN_ON(num_connectors == 0); 473 474 return false; 475 } 476 477 static const intel_limit_t * 478 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 479 { 480 struct drm_device *dev = crtc_state->base.crtc->dev; 481 const intel_limit_t *limit; 482 483 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 484 if (intel_is_dual_link_lvds(dev)) { 485 if (refclk == 100000) 486 limit = &intel_limits_ironlake_dual_lvds_100m; 487 else 488 limit = &intel_limits_ironlake_dual_lvds; 489 } else { 490 if (refclk == 100000) 491 limit = &intel_limits_ironlake_single_lvds_100m; 492 else 493 limit = &intel_limits_ironlake_single_lvds; 494 } 495 } else 496 limit = &intel_limits_ironlake_dac; 497 498 return limit; 499 } 500 501 static const intel_limit_t * 502 intel_g4x_limit(struct intel_crtc_state *crtc_state) 503 { 504 struct drm_device *dev = crtc_state->base.crtc->dev; 505 const intel_limit_t *limit; 506 507 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 508 if (intel_is_dual_link_lvds(dev)) 509 limit = &intel_limits_g4x_dual_channel_lvds; 510 else 511 limit = &intel_limits_g4x_single_channel_lvds; 512 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 513 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 514 limit = &intel_limits_g4x_hdmi; 515 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 516 limit = &intel_limits_g4x_sdvo; 517 } else /* The option is for other outputs */ 518 limit = &intel_limits_i9xx_sdvo; 519 520 return limit; 521 } 522 523 static const intel_limit_t * 524 intel_limit(struct intel_crtc_state *crtc_state, int refclk) 525 { 526 struct drm_device *dev = crtc_state->base.crtc->dev; 527 const intel_limit_t *limit; 528 529 if (IS_BROXTON(dev)) 530 limit = &intel_limits_bxt; 531 else if (HAS_PCH_SPLIT(dev)) 532 limit = intel_ironlake_limit(crtc_state, refclk); 533 else if (IS_G4X(dev)) { 534 limit = intel_g4x_limit(crtc_state); 535 } else if (IS_PINEVIEW(dev)) { 536 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 537 limit = &intel_limits_pineview_lvds; 538 else 539 limit = &intel_limits_pineview_sdvo; 540 } else if (IS_CHERRYVIEW(dev)) { 541 limit = &intel_limits_chv; 542 } else if (IS_VALLEYVIEW(dev)) { 543 limit = &intel_limits_vlv; 544 } else if (!IS_GEN2(dev)) { 545 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 546 limit = &intel_limits_i9xx_lvds; 547 else 548 limit = &intel_limits_i9xx_sdvo; 549 } else { 550 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 551 limit = &intel_limits_i8xx_lvds; 552 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 553 limit = &intel_limits_i8xx_dvo; 554 else 555 limit = &intel_limits_i8xx_dac; 556 } 557 return limit; 558 } 559 560 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 561 static void pineview_clock(int refclk, intel_clock_t *clock) 562 { 563 clock->m = clock->m2 + 2; 564 clock->p = clock->p1 * clock->p2; 565 if (WARN_ON(clock->n == 0 || clock->p == 0)) 566 return; 567 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 568 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 569 } 570 571 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 572 { 573 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 574 } 575 576 static void i9xx_clock(int refclk, intel_clock_t *clock) 577 { 578 clock->m = i9xx_dpll_compute_m(clock); 579 clock->p = clock->p1 * clock->p2; 580 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 581 return; 582 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 583 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 584 } 585 586 static void chv_clock(int refclk, intel_clock_t *clock) 587 { 588 clock->m = clock->m1 * clock->m2; 589 clock->p = clock->p1 * clock->p2; 590 if (WARN_ON(clock->n == 0 || clock->p == 0)) 591 return; 592 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 593 clock->n << 22); 594 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 595 } 596 597 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 598 /** 599 * Returns whether the given set of divisors are valid for a given refclk with 600 * the given connectors. 601 */ 602 603 static bool intel_PLL_is_valid(struct drm_device *dev, 604 const intel_limit_t *limit, 605 const intel_clock_t *clock) 606 { 607 if (clock->n < limit->n.min || limit->n.max < clock->n) 608 INTELPllInvalid("n out of range\n"); 609 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 610 INTELPllInvalid("p1 out of range\n"); 611 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 612 INTELPllInvalid("m2 out of range\n"); 613 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 614 INTELPllInvalid("m1 out of range\n"); 615 616 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) 617 if (clock->m1 <= clock->m2) 618 INTELPllInvalid("m1 <= m2\n"); 619 620 if (!IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) { 621 if (clock->p < limit->p.min || limit->p.max < clock->p) 622 INTELPllInvalid("p out of range\n"); 623 if (clock->m < limit->m.min || limit->m.max < clock->m) 624 INTELPllInvalid("m out of range\n"); 625 } 626 627 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 628 INTELPllInvalid("vco out of range\n"); 629 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 630 * connector, etc., rather than just a single range. 631 */ 632 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 633 INTELPllInvalid("dot out of range\n"); 634 635 return true; 636 } 637 638 static bool 639 i9xx_find_best_dpll(const intel_limit_t *limit, 640 struct intel_crtc_state *crtc_state, 641 int target, int refclk, intel_clock_t *match_clock, 642 intel_clock_t *best_clock) 643 { 644 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 645 struct drm_device *dev = crtc->base.dev; 646 intel_clock_t clock; 647 int err = target; 648 649 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 650 /* 651 * For LVDS just rely on its current settings for dual-channel. 652 * We haven't figured out how to reliably set up different 653 * single/dual channel state, if we even can. 654 */ 655 if (intel_is_dual_link_lvds(dev)) 656 clock.p2 = limit->p2.p2_fast; 657 else 658 clock.p2 = limit->p2.p2_slow; 659 } else { 660 if (target < limit->p2.dot_limit) 661 clock.p2 = limit->p2.p2_slow; 662 else 663 clock.p2 = limit->p2.p2_fast; 664 } 665 666 memset(best_clock, 0, sizeof(*best_clock)); 667 668 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 669 clock.m1++) { 670 for (clock.m2 = limit->m2.min; 671 clock.m2 <= limit->m2.max; clock.m2++) { 672 if (clock.m2 >= clock.m1) 673 break; 674 for (clock.n = limit->n.min; 675 clock.n <= limit->n.max; clock.n++) { 676 for (clock.p1 = limit->p1.min; 677 clock.p1 <= limit->p1.max; clock.p1++) { 678 int this_err; 679 680 i9xx_clock(refclk, &clock); 681 if (!intel_PLL_is_valid(dev, limit, 682 &clock)) 683 continue; 684 if (match_clock && 685 clock.p != match_clock->p) 686 continue; 687 688 this_err = abs(clock.dot - target); 689 if (this_err < err) { 690 *best_clock = clock; 691 err = this_err; 692 } 693 } 694 } 695 } 696 } 697 698 return (err != target); 699 } 700 701 static bool 702 pnv_find_best_dpll(const intel_limit_t *limit, 703 struct intel_crtc_state *crtc_state, 704 int target, int refclk, intel_clock_t *match_clock, 705 intel_clock_t *best_clock) 706 { 707 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 708 struct drm_device *dev = crtc->base.dev; 709 intel_clock_t clock; 710 int err = target; 711 712 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 713 /* 714 * For LVDS just rely on its current settings for dual-channel. 715 * We haven't figured out how to reliably set up different 716 * single/dual channel state, if we even can. 717 */ 718 if (intel_is_dual_link_lvds(dev)) 719 clock.p2 = limit->p2.p2_fast; 720 else 721 clock.p2 = limit->p2.p2_slow; 722 } else { 723 if (target < limit->p2.dot_limit) 724 clock.p2 = limit->p2.p2_slow; 725 else 726 clock.p2 = limit->p2.p2_fast; 727 } 728 729 memset(best_clock, 0, sizeof(*best_clock)); 730 731 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 732 clock.m1++) { 733 for (clock.m2 = limit->m2.min; 734 clock.m2 <= limit->m2.max; clock.m2++) { 735 for (clock.n = limit->n.min; 736 clock.n <= limit->n.max; clock.n++) { 737 for (clock.p1 = limit->p1.min; 738 clock.p1 <= limit->p1.max; clock.p1++) { 739 int this_err; 740 741 pineview_clock(refclk, &clock); 742 if (!intel_PLL_is_valid(dev, limit, 743 &clock)) 744 continue; 745 if (match_clock && 746 clock.p != match_clock->p) 747 continue; 748 749 this_err = abs(clock.dot - target); 750 if (this_err < err) { 751 *best_clock = clock; 752 err = this_err; 753 } 754 } 755 } 756 } 757 } 758 759 return (err != target); 760 } 761 762 static bool 763 g4x_find_best_dpll(const intel_limit_t *limit, 764 struct intel_crtc_state *crtc_state, 765 int target, int refclk, intel_clock_t *match_clock, 766 intel_clock_t *best_clock) 767 { 768 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 769 struct drm_device *dev = crtc->base.dev; 770 intel_clock_t clock; 771 int max_n; 772 bool found; 773 /* approximately equals target * 0.00585 */ 774 int err_most = (target >> 8) + (target >> 9); 775 found = false; 776 777 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 778 if (intel_is_dual_link_lvds(dev)) 779 clock.p2 = limit->p2.p2_fast; 780 else 781 clock.p2 = limit->p2.p2_slow; 782 } else { 783 if (target < limit->p2.dot_limit) 784 clock.p2 = limit->p2.p2_slow; 785 else 786 clock.p2 = limit->p2.p2_fast; 787 } 788 789 memset(best_clock, 0, sizeof(*best_clock)); 790 max_n = limit->n.max; 791 /* based on hardware requirement, prefer smaller n to precision */ 792 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 793 /* based on hardware requirement, prefere larger m1,m2 */ 794 for (clock.m1 = limit->m1.max; 795 clock.m1 >= limit->m1.min; clock.m1--) { 796 for (clock.m2 = limit->m2.max; 797 clock.m2 >= limit->m2.min; clock.m2--) { 798 for (clock.p1 = limit->p1.max; 799 clock.p1 >= limit->p1.min; clock.p1--) { 800 int this_err; 801 802 i9xx_clock(refclk, &clock); 803 if (!intel_PLL_is_valid(dev, limit, 804 &clock)) 805 continue; 806 807 this_err = abs(clock.dot - target); 808 if (this_err < err_most) { 809 *best_clock = clock; 810 err_most = this_err; 811 max_n = clock.n; 812 found = true; 813 } 814 } 815 } 816 } 817 } 818 return found; 819 } 820 821 /* 822 * Check if the calculated PLL configuration is more optimal compared to the 823 * best configuration and error found so far. Return the calculated error. 824 */ 825 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 826 const intel_clock_t *calculated_clock, 827 const intel_clock_t *best_clock, 828 unsigned int best_error_ppm, 829 unsigned int *error_ppm) 830 { 831 /* 832 * For CHV ignore the error and consider only the P value. 833 * Prefer a bigger P value based on HW requirements. 834 */ 835 if (IS_CHERRYVIEW(dev)) { 836 *error_ppm = 0; 837 838 return calculated_clock->p > best_clock->p; 839 } 840 841 if (WARN_ON_ONCE(!target_freq)) 842 return false; 843 844 *error_ppm = div_u64(1000000ULL * 845 abs(target_freq - calculated_clock->dot), 846 target_freq); 847 /* 848 * Prefer a better P value over a better (smaller) error if the error 849 * is small. Ensure this preference for future configurations too by 850 * setting the error to 0. 851 */ 852 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 853 *error_ppm = 0; 854 855 return true; 856 } 857 858 return *error_ppm + 10 < best_error_ppm; 859 } 860 861 static bool 862 vlv_find_best_dpll(const intel_limit_t *limit, 863 struct intel_crtc_state *crtc_state, 864 int target, int refclk, intel_clock_t *match_clock, 865 intel_clock_t *best_clock) 866 { 867 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 868 struct drm_device *dev = crtc->base.dev; 869 intel_clock_t clock; 870 unsigned int bestppm = 1000000; 871 /* min update 19.2 MHz */ 872 int max_n = min(limit->n.max, refclk / 19200); 873 bool found = false; 874 875 target *= 5; /* fast clock */ 876 877 memset(best_clock, 0, sizeof(*best_clock)); 878 879 /* based on hardware requirement, prefer smaller n to precision */ 880 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 881 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 882 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 883 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 884 clock.p = clock.p1 * clock.p2; 885 /* based on hardware requirement, prefer bigger m1,m2 values */ 886 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 887 unsigned int ppm; 888 889 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 890 refclk * clock.m1); 891 892 vlv_clock(refclk, &clock); 893 894 if (!intel_PLL_is_valid(dev, limit, 895 &clock)) 896 continue; 897 898 if (!vlv_PLL_is_optimal(dev, target, 899 &clock, 900 best_clock, 901 bestppm, &ppm)) 902 continue; 903 904 *best_clock = clock; 905 bestppm = ppm; 906 found = true; 907 } 908 } 909 } 910 } 911 912 return found; 913 } 914 915 static bool 916 chv_find_best_dpll(const intel_limit_t *limit, 917 struct intel_crtc_state *crtc_state, 918 int target, int refclk, intel_clock_t *match_clock, 919 intel_clock_t *best_clock) 920 { 921 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 922 struct drm_device *dev = crtc->base.dev; 923 unsigned int best_error_ppm; 924 intel_clock_t clock; 925 uint64_t m2; 926 int found = false; 927 928 memset(best_clock, 0, sizeof(*best_clock)); 929 best_error_ppm = 1000000; 930 931 /* 932 * Based on hardware doc, the n always set to 1, and m1 always 933 * set to 2. If requires to support 200Mhz refclk, we need to 934 * revisit this because n may not 1 anymore. 935 */ 936 clock.n = 1, clock.m1 = 2; 937 target *= 5; /* fast clock */ 938 939 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 940 for (clock.p2 = limit->p2.p2_fast; 941 clock.p2 >= limit->p2.p2_slow; 942 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 943 unsigned int error_ppm; 944 945 clock.p = clock.p1 * clock.p2; 946 947 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 948 clock.n) << 22, refclk * clock.m1); 949 950 if (m2 > INT_MAX/clock.m1) 951 continue; 952 953 clock.m2 = m2; 954 955 chv_clock(refclk, &clock); 956 957 if (!intel_PLL_is_valid(dev, limit, &clock)) 958 continue; 959 960 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 961 best_error_ppm, &error_ppm)) 962 continue; 963 964 *best_clock = clock; 965 best_error_ppm = error_ppm; 966 found = true; 967 } 968 } 969 970 return found; 971 } 972 973 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 974 intel_clock_t *best_clock) 975 { 976 int refclk = i9xx_get_refclk(crtc_state, 0); 977 978 return chv_find_best_dpll(intel_limit(crtc_state, refclk), crtc_state, 979 target_clock, refclk, NULL, best_clock); 980 } 981 982 bool intel_crtc_active(struct drm_crtc *crtc) 983 { 984 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 985 986 /* Be paranoid as we can arrive here with only partial 987 * state retrieved from the hardware during setup. 988 * 989 * We can ditch the adjusted_mode.crtc_clock check as soon 990 * as Haswell has gained clock readout/fastboot support. 991 * 992 * We can ditch the crtc->primary->fb check as soon as we can 993 * properly reconstruct framebuffers. 994 * 995 * FIXME: The intel_crtc->active here should be switched to 996 * crtc->state->active once we have proper CRTC states wired up 997 * for atomic. 998 */ 999 return intel_crtc->active && crtc->primary->state->fb && 1000 intel_crtc->config->base.adjusted_mode.crtc_clock; 1001 } 1002 1003 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1004 enum i915_pipe pipe) 1005 { 1006 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1007 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1008 1009 return intel_crtc->config->cpu_transcoder; 1010 } 1011 1012 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 1013 { 1014 struct drm_i915_private *dev_priv = dev->dev_private; 1015 u32 reg = PIPEDSL(pipe); 1016 u32 line1, line2; 1017 u32 line_mask; 1018 1019 if (IS_GEN2(dev)) 1020 line_mask = DSL_LINEMASK_GEN2; 1021 else 1022 line_mask = DSL_LINEMASK_GEN3; 1023 1024 line1 = I915_READ(reg) & line_mask; 1025 mdelay(5); 1026 line2 = I915_READ(reg) & line_mask; 1027 1028 return line1 == line2; 1029 } 1030 1031 /* 1032 * intel_wait_for_pipe_off - wait for pipe to turn off 1033 * @crtc: crtc whose pipe to wait for 1034 * 1035 * After disabling a pipe, we can't wait for vblank in the usual way, 1036 * spinning on the vblank interrupt status bit, since we won't actually 1037 * see an interrupt when the pipe is disabled. 1038 * 1039 * On Gen4 and above: 1040 * wait for the pipe register state bit to turn off 1041 * 1042 * Otherwise: 1043 * wait for the display line value to settle (it usually 1044 * ends up stopping at the start of the next frame). 1045 * 1046 */ 1047 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1048 { 1049 struct drm_device *dev = crtc->base.dev; 1050 struct drm_i915_private *dev_priv = dev->dev_private; 1051 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1052 enum i915_pipe pipe = crtc->pipe; 1053 1054 if (INTEL_INFO(dev)->gen >= 4) { 1055 int reg = PIPECONF(cpu_transcoder); 1056 1057 /* Wait for the Pipe State to go off */ 1058 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1059 100)) 1060 WARN(1, "pipe_off wait timed out\n"); 1061 } else { 1062 /* Wait for the display line to settle */ 1063 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1064 WARN(1, "pipe_off wait timed out\n"); 1065 } 1066 } 1067 1068 /* 1069 * ibx_digital_port_connected - is the specified port connected? 1070 * @dev_priv: i915 private structure 1071 * @port: the port to test 1072 * 1073 * Returns true if @port is connected, false otherwise. 1074 */ 1075 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 1076 struct intel_digital_port *port) 1077 { 1078 u32 bit; 1079 1080 if (HAS_PCH_IBX(dev_priv->dev)) { 1081 switch (port->port) { 1082 case PORT_B: 1083 bit = SDE_PORTB_HOTPLUG; 1084 break; 1085 case PORT_C: 1086 bit = SDE_PORTC_HOTPLUG; 1087 break; 1088 case PORT_D: 1089 bit = SDE_PORTD_HOTPLUG; 1090 break; 1091 default: 1092 return true; 1093 } 1094 } else { 1095 switch (port->port) { 1096 case PORT_B: 1097 bit = SDE_PORTB_HOTPLUG_CPT; 1098 break; 1099 case PORT_C: 1100 bit = SDE_PORTC_HOTPLUG_CPT; 1101 break; 1102 case PORT_D: 1103 bit = SDE_PORTD_HOTPLUG_CPT; 1104 break; 1105 default: 1106 return true; 1107 } 1108 } 1109 1110 return I915_READ(SDEISR) & bit; 1111 } 1112 1113 static const char *state_string(bool enabled) 1114 { 1115 return enabled ? "on" : "off"; 1116 } 1117 1118 /* Only for pre-ILK configs */ 1119 void assert_pll(struct drm_i915_private *dev_priv, 1120 enum i915_pipe pipe, bool state) 1121 { 1122 int reg; 1123 u32 val; 1124 bool cur_state; 1125 1126 reg = DPLL(pipe); 1127 val = I915_READ(reg); 1128 cur_state = !!(val & DPLL_VCO_ENABLE); 1129 I915_STATE_WARN(cur_state != state, 1130 "PLL state assertion failure (expected %s, current %s)\n", 1131 state_string(state), state_string(cur_state)); 1132 } 1133 1134 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1135 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1136 { 1137 u32 val; 1138 bool cur_state; 1139 1140 mutex_lock(&dev_priv->sb_lock); 1141 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1142 mutex_unlock(&dev_priv->sb_lock); 1143 1144 cur_state = val & DSI_PLL_VCO_EN; 1145 I915_STATE_WARN(cur_state != state, 1146 "DSI PLL state assertion failure (expected %s, current %s)\n", 1147 state_string(state), state_string(cur_state)); 1148 } 1149 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1150 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1151 1152 struct intel_shared_dpll * 1153 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1154 { 1155 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1156 1157 if (crtc->config->shared_dpll < 0) 1158 return NULL; 1159 1160 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1161 } 1162 1163 /* For ILK+ */ 1164 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1165 struct intel_shared_dpll *pll, 1166 bool state) 1167 { 1168 bool cur_state; 1169 struct intel_dpll_hw_state hw_state; 1170 1171 if (WARN (!pll, 1172 "asserting DPLL %s with no DPLL\n", state_string(state))) 1173 return; 1174 1175 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1176 I915_STATE_WARN(cur_state != state, 1177 "%s assertion failure (expected %s, current %s)\n", 1178 pll->name, state_string(state), state_string(cur_state)); 1179 } 1180 1181 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1182 enum i915_pipe pipe, bool state) 1183 { 1184 int reg; 1185 u32 val; 1186 bool cur_state; 1187 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1188 pipe); 1189 1190 if (HAS_DDI(dev_priv->dev)) { 1191 /* DDI does not have a specific FDI_TX register */ 1192 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1193 val = I915_READ(reg); 1194 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1195 } else { 1196 reg = FDI_TX_CTL(pipe); 1197 val = I915_READ(reg); 1198 cur_state = !!(val & FDI_TX_ENABLE); 1199 } 1200 I915_STATE_WARN(cur_state != state, 1201 "FDI TX state assertion failure (expected %s, current %s)\n", 1202 state_string(state), state_string(cur_state)); 1203 } 1204 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1205 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1206 1207 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1208 enum i915_pipe pipe, bool state) 1209 { 1210 int reg; 1211 u32 val; 1212 bool cur_state; 1213 1214 reg = FDI_RX_CTL(pipe); 1215 val = I915_READ(reg); 1216 cur_state = !!(val & FDI_RX_ENABLE); 1217 I915_STATE_WARN(cur_state != state, 1218 "FDI RX state assertion failure (expected %s, current %s)\n", 1219 state_string(state), state_string(cur_state)); 1220 } 1221 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1222 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1223 1224 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1225 enum i915_pipe pipe) 1226 { 1227 int reg; 1228 u32 val; 1229 1230 /* ILK FDI PLL is always enabled */ 1231 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1232 return; 1233 1234 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1235 if (HAS_DDI(dev_priv->dev)) 1236 return; 1237 1238 reg = FDI_TX_CTL(pipe); 1239 val = I915_READ(reg); 1240 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1241 } 1242 1243 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1244 enum i915_pipe pipe, bool state) 1245 { 1246 int reg; 1247 u32 val; 1248 bool cur_state; 1249 1250 reg = FDI_RX_CTL(pipe); 1251 val = I915_READ(reg); 1252 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1253 I915_STATE_WARN(cur_state != state, 1254 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1255 state_string(state), state_string(cur_state)); 1256 } 1257 1258 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1259 enum i915_pipe pipe) 1260 { 1261 struct drm_device *dev = dev_priv->dev; 1262 int pp_reg; 1263 u32 val; 1264 enum i915_pipe panel_pipe = PIPE_A; 1265 bool locked = true; 1266 1267 if (WARN_ON(HAS_DDI(dev))) 1268 return; 1269 1270 if (HAS_PCH_SPLIT(dev)) { 1271 u32 port_sel; 1272 1273 pp_reg = PCH_PP_CONTROL; 1274 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1275 1276 if (port_sel == PANEL_PORT_SELECT_LVDS && 1277 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1278 panel_pipe = PIPE_B; 1279 /* XXX: else fix for eDP */ 1280 } else if (IS_VALLEYVIEW(dev)) { 1281 /* presumably write lock depends on pipe, not port select */ 1282 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1283 panel_pipe = pipe; 1284 } else { 1285 pp_reg = PP_CONTROL; 1286 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1287 panel_pipe = PIPE_B; 1288 } 1289 1290 val = I915_READ(pp_reg); 1291 if (!(val & PANEL_POWER_ON) || 1292 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1293 locked = false; 1294 1295 I915_STATE_WARN(panel_pipe == pipe && locked, 1296 "panel assertion failure, pipe %c regs locked\n", 1297 pipe_name(pipe)); 1298 } 1299 1300 static void assert_cursor(struct drm_i915_private *dev_priv, 1301 enum i915_pipe pipe, bool state) 1302 { 1303 struct drm_device *dev = dev_priv->dev; 1304 bool cur_state; 1305 1306 if (IS_845G(dev) || IS_I865G(dev)) 1307 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1308 else 1309 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1310 1311 I915_STATE_WARN(cur_state != state, 1312 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1313 pipe_name(pipe), state_string(state), state_string(cur_state)); 1314 } 1315 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1316 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1317 1318 void assert_pipe(struct drm_i915_private *dev_priv, 1319 enum i915_pipe pipe, bool state) 1320 { 1321 int reg; 1322 u32 val; 1323 bool cur_state; 1324 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1325 pipe); 1326 1327 /* if we need the pipe quirk it must be always on */ 1328 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1329 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1330 state = true; 1331 1332 if (!intel_display_power_is_enabled(dev_priv, 1333 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1334 cur_state = false; 1335 } else { 1336 reg = PIPECONF(cpu_transcoder); 1337 val = I915_READ(reg); 1338 cur_state = !!(val & PIPECONF_ENABLE); 1339 } 1340 1341 I915_STATE_WARN(cur_state != state, 1342 "pipe %c assertion failure (expected %s, current %s)\n", 1343 pipe_name(pipe), state_string(state), state_string(cur_state)); 1344 } 1345 1346 static void assert_plane(struct drm_i915_private *dev_priv, 1347 enum plane plane, bool state) 1348 { 1349 int reg; 1350 u32 val; 1351 bool cur_state; 1352 1353 reg = DSPCNTR(plane); 1354 val = I915_READ(reg); 1355 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1356 I915_STATE_WARN(cur_state != state, 1357 "plane %c assertion failure (expected %s, current %s)\n", 1358 plane_name(plane), state_string(state), state_string(cur_state)); 1359 } 1360 1361 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1362 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1363 1364 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1365 enum i915_pipe pipe) 1366 { 1367 struct drm_device *dev = dev_priv->dev; 1368 int reg, i; 1369 u32 val; 1370 int cur_pipe; 1371 1372 /* Primary planes are fixed to pipes on gen4+ */ 1373 if (INTEL_INFO(dev)->gen >= 4) { 1374 reg = DSPCNTR(pipe); 1375 val = I915_READ(reg); 1376 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1377 "plane %c assertion failure, should be disabled but not\n", 1378 plane_name(pipe)); 1379 return; 1380 } 1381 1382 /* Need to check both planes against the pipe */ 1383 for_each_pipe(dev_priv, i) { 1384 reg = DSPCNTR(i); 1385 val = I915_READ(reg); 1386 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1387 DISPPLANE_SEL_PIPE_SHIFT; 1388 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1389 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1390 plane_name(i), pipe_name(pipe)); 1391 } 1392 } 1393 1394 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1395 enum i915_pipe pipe) 1396 { 1397 struct drm_device *dev = dev_priv->dev; 1398 int reg, sprite; 1399 u32 val; 1400 1401 if (INTEL_INFO(dev)->gen >= 9) { 1402 for_each_sprite(dev_priv, pipe, sprite) { 1403 val = I915_READ(PLANE_CTL(pipe, sprite)); 1404 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1405 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1406 sprite, pipe_name(pipe)); 1407 } 1408 } else if (IS_VALLEYVIEW(dev)) { 1409 for_each_sprite(dev_priv, pipe, sprite) { 1410 reg = SPCNTR(pipe, sprite); 1411 val = I915_READ(reg); 1412 I915_STATE_WARN(val & SP_ENABLE, 1413 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1414 sprite_name(pipe, sprite), pipe_name(pipe)); 1415 } 1416 } else if (INTEL_INFO(dev)->gen >= 7) { 1417 reg = SPRCTL(pipe); 1418 val = I915_READ(reg); 1419 I915_STATE_WARN(val & SPRITE_ENABLE, 1420 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1421 plane_name(pipe), pipe_name(pipe)); 1422 } else if (INTEL_INFO(dev)->gen >= 5) { 1423 reg = DVSCNTR(pipe); 1424 val = I915_READ(reg); 1425 I915_STATE_WARN(val & DVS_ENABLE, 1426 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1427 plane_name(pipe), pipe_name(pipe)); 1428 } 1429 } 1430 1431 static void assert_vblank_disabled(struct drm_crtc *crtc) 1432 { 1433 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1434 drm_crtc_vblank_put(crtc); 1435 } 1436 1437 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1438 { 1439 u32 val; 1440 bool enabled; 1441 1442 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1443 1444 val = I915_READ(PCH_DREF_CONTROL); 1445 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1446 DREF_SUPERSPREAD_SOURCE_MASK)); 1447 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1448 } 1449 1450 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1451 enum i915_pipe pipe) 1452 { 1453 int reg; 1454 u32 val; 1455 bool enabled; 1456 1457 reg = PCH_TRANSCONF(pipe); 1458 val = I915_READ(reg); 1459 enabled = !!(val & TRANS_ENABLE); 1460 I915_STATE_WARN(enabled, 1461 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1462 pipe_name(pipe)); 1463 } 1464 1465 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1466 enum i915_pipe pipe, u32 port_sel, u32 val) 1467 { 1468 if ((val & DP_PORT_EN) == 0) 1469 return false; 1470 1471 if (HAS_PCH_CPT(dev_priv->dev)) { 1472 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1473 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1474 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1475 return false; 1476 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1477 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1478 return false; 1479 } else { 1480 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1481 return false; 1482 } 1483 return true; 1484 } 1485 1486 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1487 enum i915_pipe pipe, u32 val) 1488 { 1489 if ((val & SDVO_ENABLE) == 0) 1490 return false; 1491 1492 if (HAS_PCH_CPT(dev_priv->dev)) { 1493 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1494 return false; 1495 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1496 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1497 return false; 1498 } else { 1499 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1500 return false; 1501 } 1502 return true; 1503 } 1504 1505 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1506 enum i915_pipe pipe, u32 val) 1507 { 1508 if ((val & LVDS_PORT_EN) == 0) 1509 return false; 1510 1511 if (HAS_PCH_CPT(dev_priv->dev)) { 1512 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1513 return false; 1514 } else { 1515 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1516 return false; 1517 } 1518 return true; 1519 } 1520 1521 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1522 enum i915_pipe pipe, u32 val) 1523 { 1524 if ((val & ADPA_DAC_ENABLE) == 0) 1525 return false; 1526 if (HAS_PCH_CPT(dev_priv->dev)) { 1527 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1528 return false; 1529 } else { 1530 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1531 return false; 1532 } 1533 return true; 1534 } 1535 1536 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1537 enum i915_pipe pipe, int reg, u32 port_sel) 1538 { 1539 u32 val = I915_READ(reg); 1540 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1541 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1542 reg, pipe_name(pipe)); 1543 1544 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1545 && (val & DP_PIPEB_SELECT), 1546 "IBX PCH dp port still using transcoder B\n"); 1547 } 1548 1549 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1550 enum i915_pipe pipe, int reg) 1551 { 1552 u32 val = I915_READ(reg); 1553 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1554 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1555 reg, pipe_name(pipe)); 1556 1557 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1558 && (val & SDVO_PIPE_B_SELECT), 1559 "IBX PCH hdmi port still using transcoder B\n"); 1560 } 1561 1562 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1563 enum i915_pipe pipe) 1564 { 1565 int reg; 1566 u32 val; 1567 1568 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1569 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1570 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1571 1572 reg = PCH_ADPA; 1573 val = I915_READ(reg); 1574 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1575 "PCH VGA enabled on transcoder %c, should be disabled\n", 1576 pipe_name(pipe)); 1577 1578 reg = PCH_LVDS; 1579 val = I915_READ(reg); 1580 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1581 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1582 pipe_name(pipe)); 1583 1584 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1585 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1586 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1587 } 1588 1589 static void intel_init_dpio(struct drm_device *dev) 1590 { 1591 struct drm_i915_private *dev_priv = dev->dev_private; 1592 1593 if (!IS_VALLEYVIEW(dev)) 1594 return; 1595 1596 /* 1597 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1598 * CHV x1 PHY (DP/HDMI D) 1599 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1600 */ 1601 if (IS_CHERRYVIEW(dev)) { 1602 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1603 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1604 } else { 1605 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1606 } 1607 } 1608 1609 static void vlv_enable_pll(struct intel_crtc *crtc, 1610 const struct intel_crtc_state *pipe_config) 1611 { 1612 struct drm_device *dev = crtc->base.dev; 1613 struct drm_i915_private *dev_priv = dev->dev_private; 1614 int reg = DPLL(crtc->pipe); 1615 u32 dpll = pipe_config->dpll_hw_state.dpll; 1616 1617 assert_pipe_disabled(dev_priv, crtc->pipe); 1618 1619 /* No really, not for ILK+ */ 1620 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1621 1622 /* PLL is protected by panel, make sure we can write it */ 1623 if (IS_MOBILE(dev_priv->dev)) 1624 assert_panel_unlocked(dev_priv, crtc->pipe); 1625 1626 I915_WRITE(reg, dpll); 1627 POSTING_READ(reg); 1628 udelay(150); 1629 1630 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1631 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1632 1633 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1634 POSTING_READ(DPLL_MD(crtc->pipe)); 1635 1636 /* We do this three times for luck */ 1637 I915_WRITE(reg, dpll); 1638 POSTING_READ(reg); 1639 udelay(150); /* wait for warmup */ 1640 I915_WRITE(reg, dpll); 1641 POSTING_READ(reg); 1642 udelay(150); /* wait for warmup */ 1643 I915_WRITE(reg, dpll); 1644 POSTING_READ(reg); 1645 udelay(150); /* wait for warmup */ 1646 } 1647 1648 static void chv_enable_pll(struct intel_crtc *crtc, 1649 const struct intel_crtc_state *pipe_config) 1650 { 1651 struct drm_device *dev = crtc->base.dev; 1652 struct drm_i915_private *dev_priv = dev->dev_private; 1653 int pipe = crtc->pipe; 1654 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1655 u32 tmp; 1656 1657 assert_pipe_disabled(dev_priv, crtc->pipe); 1658 1659 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1660 1661 mutex_lock(&dev_priv->sb_lock); 1662 1663 /* Enable back the 10bit clock to display controller */ 1664 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1665 tmp |= DPIO_DCLKP_EN; 1666 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1667 1668 mutex_unlock(&dev_priv->sb_lock); 1669 1670 /* 1671 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1672 */ 1673 udelay(1); 1674 1675 /* Enable PLL */ 1676 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1677 1678 /* Check PLL is locked */ 1679 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1680 DRM_ERROR("PLL %d failed to lock\n", pipe); 1681 1682 /* not sure when this should be written */ 1683 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1684 POSTING_READ(DPLL_MD(pipe)); 1685 } 1686 1687 static int intel_num_dvo_pipes(struct drm_device *dev) 1688 { 1689 struct intel_crtc *crtc; 1690 int count = 0; 1691 1692 for_each_intel_crtc(dev, crtc) 1693 count += crtc->active && 1694 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1695 1696 return count; 1697 } 1698 1699 static void i9xx_enable_pll(struct intel_crtc *crtc) 1700 { 1701 struct drm_device *dev = crtc->base.dev; 1702 struct drm_i915_private *dev_priv = dev->dev_private; 1703 int reg = DPLL(crtc->pipe); 1704 u32 dpll = crtc->config->dpll_hw_state.dpll; 1705 1706 assert_pipe_disabled(dev_priv, crtc->pipe); 1707 1708 /* No really, not for ILK+ */ 1709 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1710 1711 /* PLL is protected by panel, make sure we can write it */ 1712 if (IS_MOBILE(dev) && !IS_I830(dev)) 1713 assert_panel_unlocked(dev_priv, crtc->pipe); 1714 1715 /* Enable DVO 2x clock on both PLLs if necessary */ 1716 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1717 /* 1718 * It appears to be important that we don't enable this 1719 * for the current pipe before otherwise configuring the 1720 * PLL. No idea how this should be handled if multiple 1721 * DVO outputs are enabled simultaneosly. 1722 */ 1723 dpll |= DPLL_DVO_2X_MODE; 1724 I915_WRITE(DPLL(!crtc->pipe), 1725 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1726 } 1727 1728 /* Wait for the clocks to stabilize. */ 1729 POSTING_READ(reg); 1730 udelay(150); 1731 1732 if (INTEL_INFO(dev)->gen >= 4) { 1733 I915_WRITE(DPLL_MD(crtc->pipe), 1734 crtc->config->dpll_hw_state.dpll_md); 1735 } else { 1736 /* The pixel multiplier can only be updated once the 1737 * DPLL is enabled and the clocks are stable. 1738 * 1739 * So write it again. 1740 */ 1741 I915_WRITE(reg, dpll); 1742 } 1743 1744 /* We do this three times for luck */ 1745 I915_WRITE(reg, dpll); 1746 POSTING_READ(reg); 1747 udelay(150); /* wait for warmup */ 1748 I915_WRITE(reg, dpll); 1749 POSTING_READ(reg); 1750 udelay(150); /* wait for warmup */ 1751 I915_WRITE(reg, dpll); 1752 POSTING_READ(reg); 1753 udelay(150); /* wait for warmup */ 1754 } 1755 1756 /** 1757 * i9xx_disable_pll - disable a PLL 1758 * @dev_priv: i915 private structure 1759 * @pipe: pipe PLL to disable 1760 * 1761 * Disable the PLL for @pipe, making sure the pipe is off first. 1762 * 1763 * Note! This is for pre-ILK only. 1764 */ 1765 static void i9xx_disable_pll(struct intel_crtc *crtc) 1766 { 1767 struct drm_device *dev = crtc->base.dev; 1768 struct drm_i915_private *dev_priv = dev->dev_private; 1769 enum i915_pipe pipe = crtc->pipe; 1770 1771 /* Disable DVO 2x clock on both PLLs if necessary */ 1772 if (IS_I830(dev) && 1773 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1774 intel_num_dvo_pipes(dev) == 1) { 1775 I915_WRITE(DPLL(PIPE_B), 1776 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1777 I915_WRITE(DPLL(PIPE_A), 1778 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1779 } 1780 1781 /* Don't disable pipe or pipe PLLs if needed */ 1782 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1783 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1784 return; 1785 1786 /* Make sure the pipe isn't still relying on us */ 1787 assert_pipe_disabled(dev_priv, pipe); 1788 1789 I915_WRITE(DPLL(pipe), 0); 1790 POSTING_READ(DPLL(pipe)); 1791 } 1792 1793 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1794 { 1795 u32 val = 0; 1796 1797 /* Make sure the pipe isn't still relying on us */ 1798 assert_pipe_disabled(dev_priv, pipe); 1799 1800 /* 1801 * Leave integrated clock source and reference clock enabled for pipe B. 1802 * The latter is needed for VGA hotplug / manual detection. 1803 */ 1804 if (pipe == PIPE_B) 1805 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1806 I915_WRITE(DPLL(pipe), val); 1807 POSTING_READ(DPLL(pipe)); 1808 1809 } 1810 1811 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1812 { 1813 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1814 u32 val; 1815 1816 /* Make sure the pipe isn't still relying on us */ 1817 assert_pipe_disabled(dev_priv, pipe); 1818 1819 /* Set PLL en = 0 */ 1820 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; 1821 if (pipe != PIPE_A) 1822 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1823 I915_WRITE(DPLL(pipe), val); 1824 POSTING_READ(DPLL(pipe)); 1825 1826 mutex_lock(&dev_priv->sb_lock); 1827 1828 /* Disable 10bit clock to display controller */ 1829 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1830 val &= ~DPIO_DCLKP_EN; 1831 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1832 1833 /* disable left/right clock distribution */ 1834 if (pipe != PIPE_B) { 1835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1836 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1837 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1838 } else { 1839 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1840 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1842 } 1843 1844 mutex_unlock(&dev_priv->sb_lock); 1845 } 1846 1847 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1848 struct intel_digital_port *dport, 1849 unsigned int expected_mask) 1850 { 1851 u32 port_mask; 1852 int dpll_reg; 1853 1854 switch (dport->port) { 1855 case PORT_B: 1856 port_mask = DPLL_PORTB_READY_MASK; 1857 dpll_reg = DPLL(0); 1858 break; 1859 case PORT_C: 1860 port_mask = DPLL_PORTC_READY_MASK; 1861 dpll_reg = DPLL(0); 1862 expected_mask <<= 4; 1863 break; 1864 case PORT_D: 1865 port_mask = DPLL_PORTD_READY_MASK; 1866 dpll_reg = DPIO_PHY_STATUS; 1867 break; 1868 default: 1869 BUG(); 1870 } 1871 1872 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1873 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1874 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1875 } 1876 1877 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1878 { 1879 struct drm_device *dev = crtc->base.dev; 1880 struct drm_i915_private *dev_priv = dev->dev_private; 1881 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1882 1883 if (WARN_ON(pll == NULL)) 1884 return; 1885 1886 WARN_ON(!pll->config.crtc_mask); 1887 if (pll->active == 0) { 1888 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1889 WARN_ON(pll->on); 1890 assert_shared_dpll_disabled(dev_priv, pll); 1891 1892 pll->mode_set(dev_priv, pll); 1893 } 1894 } 1895 1896 /** 1897 * intel_enable_shared_dpll - enable PCH PLL 1898 * @dev_priv: i915 private structure 1899 * @pipe: pipe PLL to enable 1900 * 1901 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1902 * drives the transcoder clock. 1903 */ 1904 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1905 { 1906 struct drm_device *dev = crtc->base.dev; 1907 struct drm_i915_private *dev_priv = dev->dev_private; 1908 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1909 1910 if (WARN_ON(pll == NULL)) 1911 return; 1912 1913 if (WARN_ON(pll->config.crtc_mask == 0)) 1914 return; 1915 1916 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1917 pll->name, pll->active, pll->on, 1918 crtc->base.base.id); 1919 1920 if (pll->active++) { 1921 WARN_ON(!pll->on); 1922 assert_shared_dpll_enabled(dev_priv, pll); 1923 return; 1924 } 1925 WARN_ON(pll->on); 1926 1927 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1928 1929 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1930 pll->enable(dev_priv, pll); 1931 pll->on = true; 1932 } 1933 1934 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1935 { 1936 struct drm_device *dev = crtc->base.dev; 1937 struct drm_i915_private *dev_priv = dev->dev_private; 1938 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1939 1940 /* PCH only available on ILK+ */ 1941 BUG_ON(INTEL_INFO(dev)->gen < 5); 1942 if (WARN_ON(pll == NULL)) 1943 return; 1944 1945 if (WARN_ON(pll->config.crtc_mask == 0)) 1946 return; 1947 1948 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1949 pll->name, pll->active, pll->on, 1950 crtc->base.base.id); 1951 1952 if (WARN_ON(pll->active == 0)) { 1953 assert_shared_dpll_disabled(dev_priv, pll); 1954 return; 1955 } 1956 1957 assert_shared_dpll_enabled(dev_priv, pll); 1958 WARN_ON(!pll->on); 1959 if (--pll->active) 1960 return; 1961 1962 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1963 pll->disable(dev_priv, pll); 1964 pll->on = false; 1965 1966 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1967 } 1968 1969 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1970 enum i915_pipe pipe) 1971 { 1972 struct drm_device *dev = dev_priv->dev; 1973 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1975 uint32_t reg, val, pipeconf_val; 1976 1977 /* PCH only available on ILK+ */ 1978 BUG_ON(!HAS_PCH_SPLIT(dev)); 1979 1980 /* Make sure PCH DPLL is enabled */ 1981 assert_shared_dpll_enabled(dev_priv, 1982 intel_crtc_to_shared_dpll(intel_crtc)); 1983 1984 /* FDI must be feeding us bits for PCH ports */ 1985 assert_fdi_tx_enabled(dev_priv, pipe); 1986 assert_fdi_rx_enabled(dev_priv, pipe); 1987 1988 if (HAS_PCH_CPT(dev)) { 1989 /* Workaround: Set the timing override bit before enabling the 1990 * pch transcoder. */ 1991 reg = TRANS_CHICKEN2(pipe); 1992 val = I915_READ(reg); 1993 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1994 I915_WRITE(reg, val); 1995 } 1996 1997 reg = PCH_TRANSCONF(pipe); 1998 val = I915_READ(reg); 1999 pipeconf_val = I915_READ(PIPECONF(pipe)); 2000 2001 if (HAS_PCH_IBX(dev_priv->dev)) { 2002 /* 2003 * make the BPC in transcoder be consistent with 2004 * that in pipeconf reg. 2005 */ 2006 val &= ~PIPECONF_BPC_MASK; 2007 val |= pipeconf_val & PIPECONF_BPC_MASK; 2008 } 2009 2010 val &= ~TRANS_INTERLACE_MASK; 2011 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 2012 if (HAS_PCH_IBX(dev_priv->dev) && 2013 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 2014 val |= TRANS_LEGACY_INTERLACED_ILK; 2015 else 2016 val |= TRANS_INTERLACED; 2017 else 2018 val |= TRANS_PROGRESSIVE; 2019 2020 I915_WRITE(reg, val | TRANS_ENABLE); 2021 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2022 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 2023 } 2024 2025 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 2026 enum transcoder cpu_transcoder) 2027 { 2028 u32 val, pipeconf_val; 2029 2030 /* PCH only available on ILK+ */ 2031 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 2032 2033 /* FDI must be feeding us bits for PCH ports */ 2034 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 2035 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2036 2037 /* Workaround: set timing override bit. */ 2038 val = I915_READ(_TRANSA_CHICKEN2); 2039 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2040 I915_WRITE(_TRANSA_CHICKEN2, val); 2041 2042 val = TRANS_ENABLE; 2043 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2044 2045 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 2046 PIPECONF_INTERLACED_ILK) 2047 val |= TRANS_INTERLACED; 2048 else 2049 val |= TRANS_PROGRESSIVE; 2050 2051 I915_WRITE(LPT_TRANSCONF, val); 2052 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 2053 DRM_ERROR("Failed to enable PCH transcoder\n"); 2054 } 2055 2056 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 2057 enum i915_pipe pipe) 2058 { 2059 struct drm_device *dev = dev_priv->dev; 2060 uint32_t reg, val; 2061 2062 /* FDI relies on the transcoder */ 2063 assert_fdi_tx_disabled(dev_priv, pipe); 2064 assert_fdi_rx_disabled(dev_priv, pipe); 2065 2066 /* Ports must be off as well */ 2067 assert_pch_ports_disabled(dev_priv, pipe); 2068 2069 reg = PCH_TRANSCONF(pipe); 2070 val = I915_READ(reg); 2071 val &= ~TRANS_ENABLE; 2072 I915_WRITE(reg, val); 2073 /* wait for PCH transcoder off, transcoder state */ 2074 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2075 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2076 2077 if (!HAS_PCH_IBX(dev)) { 2078 /* Workaround: Clear the timing override chicken bit again. */ 2079 reg = TRANS_CHICKEN2(pipe); 2080 val = I915_READ(reg); 2081 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2082 I915_WRITE(reg, val); 2083 } 2084 } 2085 2086 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 2087 { 2088 u32 val; 2089 2090 val = I915_READ(LPT_TRANSCONF); 2091 val &= ~TRANS_ENABLE; 2092 I915_WRITE(LPT_TRANSCONF, val); 2093 /* wait for PCH transcoder off, transcoder state */ 2094 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2095 DRM_ERROR("Failed to disable PCH transcoder\n"); 2096 2097 /* Workaround: clear timing override bit. */ 2098 val = I915_READ(_TRANSA_CHICKEN2); 2099 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2100 I915_WRITE(_TRANSA_CHICKEN2, val); 2101 } 2102 2103 /** 2104 * intel_enable_pipe - enable a pipe, asserting requirements 2105 * @crtc: crtc responsible for the pipe 2106 * 2107 * Enable @crtc's pipe, making sure that various hardware specific requirements 2108 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2109 */ 2110 static void intel_enable_pipe(struct intel_crtc *crtc) 2111 { 2112 struct drm_device *dev = crtc->base.dev; 2113 struct drm_i915_private *dev_priv = dev->dev_private; 2114 enum i915_pipe pipe = crtc->pipe; 2115 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2116 pipe); 2117 enum i915_pipe pch_transcoder; 2118 int reg; 2119 u32 val; 2120 2121 assert_planes_disabled(dev_priv, pipe); 2122 assert_cursor_disabled(dev_priv, pipe); 2123 assert_sprites_disabled(dev_priv, pipe); 2124 2125 if (HAS_PCH_LPT(dev_priv->dev)) 2126 pch_transcoder = TRANSCODER_A; 2127 else 2128 pch_transcoder = pipe; 2129 2130 /* 2131 * A pipe without a PLL won't actually be able to drive bits from 2132 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2133 * need the check. 2134 */ 2135 if (HAS_GMCH_DISPLAY(dev_priv->dev)) 2136 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2137 assert_dsi_pll_enabled(dev_priv); 2138 else 2139 assert_pll_enabled(dev_priv, pipe); 2140 else { 2141 if (crtc->config->has_pch_encoder) { 2142 /* if driving the PCH, we need FDI enabled */ 2143 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2144 assert_fdi_tx_pll_enabled(dev_priv, 2145 (enum i915_pipe) cpu_transcoder); 2146 } 2147 /* FIXME: assert CPU port conditions for SNB+ */ 2148 } 2149 2150 reg = PIPECONF(cpu_transcoder); 2151 val = I915_READ(reg); 2152 if (val & PIPECONF_ENABLE) { 2153 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2154 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2155 return; 2156 } 2157 2158 I915_WRITE(reg, val | PIPECONF_ENABLE); 2159 POSTING_READ(reg); 2160 } 2161 2162 /** 2163 * intel_disable_pipe - disable a pipe, asserting requirements 2164 * @crtc: crtc whose pipes is to be disabled 2165 * 2166 * Disable the pipe of @crtc, making sure that various hardware 2167 * specific requirements are met, if applicable, e.g. plane 2168 * disabled, panel fitter off, etc. 2169 * 2170 * Will wait until the pipe has shut down before returning. 2171 */ 2172 static void intel_disable_pipe(struct intel_crtc *crtc) 2173 { 2174 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2175 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2176 enum i915_pipe pipe = crtc->pipe; 2177 int reg; 2178 u32 val; 2179 2180 /* 2181 * Make sure planes won't keep trying to pump pixels to us, 2182 * or we might hang the display. 2183 */ 2184 assert_planes_disabled(dev_priv, pipe); 2185 assert_cursor_disabled(dev_priv, pipe); 2186 assert_sprites_disabled(dev_priv, pipe); 2187 2188 reg = PIPECONF(cpu_transcoder); 2189 val = I915_READ(reg); 2190 if ((val & PIPECONF_ENABLE) == 0) 2191 return; 2192 2193 /* 2194 * Double wide has implications for planes 2195 * so best keep it disabled when not needed. 2196 */ 2197 if (crtc->config->double_wide) 2198 val &= ~PIPECONF_DOUBLE_WIDE; 2199 2200 /* Don't disable pipe or pipe PLLs if needed */ 2201 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2202 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2203 val &= ~PIPECONF_ENABLE; 2204 2205 I915_WRITE(reg, val); 2206 if ((val & PIPECONF_ENABLE) == 0) 2207 intel_wait_for_pipe_off(crtc); 2208 } 2209 2210 /** 2211 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2212 * @plane: plane to be enabled 2213 * @crtc: crtc for the plane 2214 * 2215 * Enable @plane on @crtc, making sure that the pipe is running first. 2216 */ 2217 static void intel_enable_primary_hw_plane(struct drm_plane *plane, 2218 struct drm_crtc *crtc) 2219 { 2220 struct drm_device *dev = plane->dev; 2221 struct drm_i915_private *dev_priv = dev->dev_private; 2222 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2223 2224 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2225 assert_pipe_enabled(dev_priv, intel_crtc->pipe); 2226 to_intel_plane_state(plane->state)->visible = true; 2227 2228 dev_priv->display.update_primary_plane(crtc, plane->fb, 2229 crtc->x, crtc->y); 2230 } 2231 2232 static bool need_vtd_wa(struct drm_device *dev) 2233 { 2234 #ifdef CONFIG_INTEL_IOMMU 2235 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2236 return true; 2237 #endif 2238 return false; 2239 } 2240 2241 unsigned int 2242 intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 2243 uint64_t fb_format_modifier) 2244 { 2245 unsigned int tile_height; 2246 uint32_t pixel_bytes; 2247 2248 switch (fb_format_modifier) { 2249 case DRM_FORMAT_MOD_NONE: 2250 tile_height = 1; 2251 break; 2252 case I915_FORMAT_MOD_X_TILED: 2253 tile_height = IS_GEN2(dev) ? 16 : 8; 2254 break; 2255 case I915_FORMAT_MOD_Y_TILED: 2256 tile_height = 32; 2257 break; 2258 case I915_FORMAT_MOD_Yf_TILED: 2259 pixel_bytes = drm_format_plane_cpp(pixel_format, 0); 2260 switch (pixel_bytes) { 2261 default: 2262 case 1: 2263 tile_height = 64; 2264 break; 2265 case 2: 2266 case 4: 2267 tile_height = 32; 2268 break; 2269 case 8: 2270 tile_height = 16; 2271 break; 2272 case 16: 2273 WARN_ONCE(1, 2274 "128-bit pixels are not supported for display!"); 2275 tile_height = 16; 2276 break; 2277 } 2278 break; 2279 default: 2280 MISSING_CASE(fb_format_modifier); 2281 tile_height = 1; 2282 break; 2283 } 2284 2285 return tile_height; 2286 } 2287 2288 unsigned int 2289 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2290 uint32_t pixel_format, uint64_t fb_format_modifier) 2291 { 2292 return ALIGN(height, intel_tile_height(dev, pixel_format, 2293 fb_format_modifier)); 2294 } 2295 2296 static int 2297 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2298 const struct drm_plane_state *plane_state) 2299 { 2300 struct intel_rotation_info *info = &view->rotation_info; 2301 2302 *view = i915_ggtt_view_normal; 2303 2304 if (!plane_state) 2305 return 0; 2306 2307 if (!intel_rotation_90_or_270(plane_state->rotation)) 2308 return 0; 2309 2310 *view = i915_ggtt_view_rotated; 2311 2312 info->height = fb->height; 2313 info->pixel_format = fb->pixel_format; 2314 info->pitch = fb->pitches[0]; 2315 info->fb_modifier = fb->modifier[0]; 2316 2317 return 0; 2318 } 2319 2320 int 2321 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2322 struct drm_framebuffer *fb, 2323 const struct drm_plane_state *plane_state, 2324 struct intel_engine_cs *pipelined) 2325 { 2326 struct drm_device *dev = fb->dev; 2327 struct drm_i915_private *dev_priv = dev->dev_private; 2328 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2329 struct i915_ggtt_view view; 2330 u32 alignment; 2331 int ret; 2332 2333 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2334 2335 switch (fb->modifier[0]) { 2336 case DRM_FORMAT_MOD_NONE: 2337 if (INTEL_INFO(dev)->gen >= 9) 2338 alignment = 256 * 1024; 2339 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2340 alignment = 128 * 1024; 2341 else if (INTEL_INFO(dev)->gen >= 4) 2342 alignment = 4 * 1024; 2343 else 2344 alignment = 64 * 1024; 2345 break; 2346 case I915_FORMAT_MOD_X_TILED: 2347 if (INTEL_INFO(dev)->gen >= 9) 2348 alignment = 256 * 1024; 2349 else { 2350 /* pin() will align the object as required by fence */ 2351 alignment = 0; 2352 } 2353 break; 2354 case I915_FORMAT_MOD_Y_TILED: 2355 case I915_FORMAT_MOD_Yf_TILED: 2356 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, 2357 "Y tiling bo slipped through, driver bug!\n")) 2358 return -EINVAL; 2359 alignment = 1 * 1024 * 1024; 2360 break; 2361 default: 2362 MISSING_CASE(fb->modifier[0]); 2363 return -EINVAL; 2364 } 2365 2366 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2367 if (ret) 2368 return ret; 2369 2370 /* Note that the w/a also requires 64 PTE of padding following the 2371 * bo. We currently fill all unused PTE with the shadow page and so 2372 * we should always have valid PTE following the scanout preventing 2373 * the VT-d warning. 2374 */ 2375 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2376 alignment = 256 * 1024; 2377 2378 /* 2379 * Global gtt pte registers are special registers which actually forward 2380 * writes to a chunk of system memory. Which means that there is no risk 2381 * that the register values disappear as soon as we call 2382 * intel_runtime_pm_put(), so it is correct to wrap only the 2383 * pin/unpin/fence and not more. 2384 */ 2385 intel_runtime_pm_get(dev_priv); 2386 2387 dev_priv->mm.interruptible = false; 2388 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, 2389 &view); 2390 if (ret) 2391 goto err_interruptible; 2392 2393 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2394 * fence, whereas 965+ only requires a fence if using 2395 * framebuffer compression. For simplicity, we always install 2396 * a fence as the cost is not that onerous. 2397 */ 2398 ret = i915_gem_object_get_fence(obj); 2399 if (ret) 2400 goto err_unpin; 2401 2402 i915_gem_object_pin_fence(obj); 2403 2404 dev_priv->mm.interruptible = true; 2405 intel_runtime_pm_put(dev_priv); 2406 return 0; 2407 2408 err_unpin: 2409 i915_gem_object_unpin_from_display_plane(obj, &view); 2410 err_interruptible: 2411 dev_priv->mm.interruptible = true; 2412 intel_runtime_pm_put(dev_priv); 2413 return ret; 2414 } 2415 2416 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, 2417 const struct drm_plane_state *plane_state) 2418 { 2419 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2420 struct i915_ggtt_view view; 2421 int ret; 2422 2423 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2424 2425 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2426 WARN_ONCE(ret, "Couldn't get view from plane state!"); 2427 2428 i915_gem_object_unpin_fence(obj); 2429 i915_gem_object_unpin_from_display_plane(obj, &view); 2430 } 2431 2432 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2433 * is assumed to be a power-of-two. */ 2434 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2435 unsigned int tiling_mode, 2436 unsigned int cpp, 2437 unsigned int pitch) 2438 { 2439 if (tiling_mode != I915_TILING_NONE) { 2440 unsigned int tile_rows, tiles; 2441 2442 tile_rows = *y / 8; 2443 *y %= 8; 2444 2445 tiles = *x / (512/cpp); 2446 *x %= 512/cpp; 2447 2448 return tile_rows * pitch * 8 + tiles * 4096; 2449 } else { 2450 unsigned int offset; 2451 2452 offset = *y * pitch + *x * cpp; 2453 *y = 0; 2454 *x = (offset & 4095) / cpp; 2455 return offset & -4096; 2456 } 2457 } 2458 2459 static int i9xx_format_to_fourcc(int format) 2460 { 2461 switch (format) { 2462 case DISPPLANE_8BPP: 2463 return DRM_FORMAT_C8; 2464 case DISPPLANE_BGRX555: 2465 return DRM_FORMAT_XRGB1555; 2466 case DISPPLANE_BGRX565: 2467 return DRM_FORMAT_RGB565; 2468 default: 2469 case DISPPLANE_BGRX888: 2470 return DRM_FORMAT_XRGB8888; 2471 case DISPPLANE_RGBX888: 2472 return DRM_FORMAT_XBGR8888; 2473 case DISPPLANE_BGRX101010: 2474 return DRM_FORMAT_XRGB2101010; 2475 case DISPPLANE_RGBX101010: 2476 return DRM_FORMAT_XBGR2101010; 2477 } 2478 } 2479 2480 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2481 { 2482 switch (format) { 2483 case PLANE_CTL_FORMAT_RGB_565: 2484 return DRM_FORMAT_RGB565; 2485 default: 2486 case PLANE_CTL_FORMAT_XRGB_8888: 2487 if (rgb_order) { 2488 if (alpha) 2489 return DRM_FORMAT_ABGR8888; 2490 else 2491 return DRM_FORMAT_XBGR8888; 2492 } else { 2493 if (alpha) 2494 return DRM_FORMAT_ARGB8888; 2495 else 2496 return DRM_FORMAT_XRGB8888; 2497 } 2498 case PLANE_CTL_FORMAT_XRGB_2101010: 2499 if (rgb_order) 2500 return DRM_FORMAT_XBGR2101010; 2501 else 2502 return DRM_FORMAT_XRGB2101010; 2503 } 2504 } 2505 2506 static bool 2507 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2508 struct intel_initial_plane_config *plane_config) 2509 { 2510 struct drm_device *dev = crtc->base.dev; 2511 struct drm_i915_gem_object *obj = NULL; 2512 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2513 struct drm_framebuffer *fb = &plane_config->fb->base; 2514 u32 base = plane_config->base; 2515 2516 if (plane_config->size == 0) 2517 return false; 2518 2519 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2520 plane_config->size); 2521 if (!obj) 2522 return false; 2523 2524 obj->tiling_mode = plane_config->tiling; 2525 if (obj->tiling_mode == I915_TILING_X) 2526 obj->stride = fb->pitches[0]; 2527 2528 mode_cmd.pixel_format = fb->pixel_format; 2529 mode_cmd.width = fb->width; 2530 mode_cmd.height = fb->height; 2531 mode_cmd.pitches[0] = fb->pitches[0]; 2532 mode_cmd.modifier[0] = fb->modifier[0]; 2533 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2534 2535 mutex_lock(&dev->struct_mutex); 2536 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2537 &mode_cmd, obj)) { 2538 DRM_DEBUG_KMS("intel fb init failed\n"); 2539 goto out_unref_obj; 2540 } 2541 mutex_unlock(&dev->struct_mutex); 2542 2543 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2544 return true; 2545 2546 out_unref_obj: 2547 drm_gem_object_unreference(&obj->base); 2548 mutex_unlock(&dev->struct_mutex); 2549 return false; 2550 } 2551 2552 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2553 static void 2554 update_state_fb(struct drm_plane *plane) 2555 { 2556 if (plane->fb == plane->state->fb) 2557 return; 2558 2559 if (plane->state->fb) 2560 drm_framebuffer_unreference(plane->state->fb); 2561 plane->state->fb = plane->fb; 2562 if (plane->state->fb) 2563 drm_framebuffer_reference(plane->state->fb); 2564 } 2565 2566 static void 2567 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2568 struct intel_initial_plane_config *plane_config) 2569 { 2570 struct drm_device *dev = intel_crtc->base.dev; 2571 struct drm_i915_private *dev_priv = dev->dev_private; 2572 struct drm_crtc *c; 2573 struct intel_crtc *i; 2574 struct drm_i915_gem_object *obj; 2575 struct drm_plane *primary = intel_crtc->base.primary; 2576 struct drm_framebuffer *fb; 2577 2578 if (!plane_config->fb) 2579 return; 2580 2581 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2582 fb = &plane_config->fb->base; 2583 goto valid_fb; 2584 } 2585 2586 kfree(plane_config->fb); 2587 2588 /* 2589 * Failed to alloc the obj, check to see if we should share 2590 * an fb with another CRTC instead 2591 */ 2592 for_each_crtc(dev, c) { 2593 i = to_intel_crtc(c); 2594 2595 if (c == &intel_crtc->base) 2596 continue; 2597 2598 if (!i->active) 2599 continue; 2600 2601 fb = c->primary->fb; 2602 if (!fb) 2603 continue; 2604 2605 obj = intel_fb_obj(fb); 2606 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2607 drm_framebuffer_reference(fb); 2608 goto valid_fb; 2609 } 2610 } 2611 2612 return; 2613 2614 valid_fb: 2615 obj = intel_fb_obj(fb); 2616 if (obj->tiling_mode != I915_TILING_NONE) 2617 dev_priv->preserve_bios_swizzle = true; 2618 2619 primary->fb = fb; 2620 primary->state->crtc = &intel_crtc->base; 2621 primary->crtc = &intel_crtc->base; 2622 update_state_fb(primary); 2623 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2624 } 2625 2626 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2627 struct drm_framebuffer *fb, 2628 int x, int y) 2629 { 2630 struct drm_device *dev = crtc->dev; 2631 struct drm_i915_private *dev_priv = dev->dev_private; 2632 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2633 struct drm_plane *primary = crtc->primary; 2634 bool visible = to_intel_plane_state(primary->state)->visible; 2635 struct drm_i915_gem_object *obj; 2636 int plane = intel_crtc->plane; 2637 unsigned long linear_offset; 2638 u32 dspcntr; 2639 u32 reg = DSPCNTR(plane); 2640 int pixel_size; 2641 2642 if (!visible || !fb) { 2643 I915_WRITE(reg, 0); 2644 if (INTEL_INFO(dev)->gen >= 4) 2645 I915_WRITE(DSPSURF(plane), 0); 2646 else 2647 I915_WRITE(DSPADDR(plane), 0); 2648 POSTING_READ(reg); 2649 return; 2650 } 2651 2652 obj = intel_fb_obj(fb); 2653 if (WARN_ON(obj == NULL)) 2654 return; 2655 2656 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2657 2658 dspcntr = DISPPLANE_GAMMA_ENABLE; 2659 2660 dspcntr |= DISPLAY_PLANE_ENABLE; 2661 2662 if (INTEL_INFO(dev)->gen < 4) { 2663 if (intel_crtc->pipe == PIPE_B) 2664 dspcntr |= DISPPLANE_SEL_PIPE_B; 2665 2666 /* pipesrc and dspsize control the size that is scaled from, 2667 * which should always be the user's requested size. 2668 */ 2669 I915_WRITE(DSPSIZE(plane), 2670 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2671 (intel_crtc->config->pipe_src_w - 1)); 2672 I915_WRITE(DSPPOS(plane), 0); 2673 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2674 I915_WRITE(PRIMSIZE(plane), 2675 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2676 (intel_crtc->config->pipe_src_w - 1)); 2677 I915_WRITE(PRIMPOS(plane), 0); 2678 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2679 } 2680 2681 switch (fb->pixel_format) { 2682 case DRM_FORMAT_C8: 2683 dspcntr |= DISPPLANE_8BPP; 2684 break; 2685 case DRM_FORMAT_XRGB1555: 2686 dspcntr |= DISPPLANE_BGRX555; 2687 break; 2688 case DRM_FORMAT_RGB565: 2689 dspcntr |= DISPPLANE_BGRX565; 2690 break; 2691 case DRM_FORMAT_XRGB8888: 2692 dspcntr |= DISPPLANE_BGRX888; 2693 break; 2694 case DRM_FORMAT_XBGR8888: 2695 dspcntr |= DISPPLANE_RGBX888; 2696 break; 2697 case DRM_FORMAT_XRGB2101010: 2698 dspcntr |= DISPPLANE_BGRX101010; 2699 break; 2700 case DRM_FORMAT_XBGR2101010: 2701 dspcntr |= DISPPLANE_RGBX101010; 2702 break; 2703 default: 2704 BUG(); 2705 } 2706 2707 if (INTEL_INFO(dev)->gen >= 4 && 2708 obj->tiling_mode != I915_TILING_NONE) 2709 dspcntr |= DISPPLANE_TILED; 2710 2711 if (IS_G4X(dev)) 2712 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2713 2714 linear_offset = y * fb->pitches[0] + x * pixel_size; 2715 2716 if (INTEL_INFO(dev)->gen >= 4) { 2717 intel_crtc->dspaddr_offset = 2718 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2719 pixel_size, 2720 fb->pitches[0]); 2721 linear_offset -= intel_crtc->dspaddr_offset; 2722 } else { 2723 intel_crtc->dspaddr_offset = linear_offset; 2724 } 2725 2726 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2727 dspcntr |= DISPPLANE_ROTATE_180; 2728 2729 x += (intel_crtc->config->pipe_src_w - 1); 2730 y += (intel_crtc->config->pipe_src_h - 1); 2731 2732 /* Finding the last pixel of the last line of the display 2733 data and adding to linear_offset*/ 2734 linear_offset += 2735 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2736 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2737 } 2738 2739 I915_WRITE(reg, dspcntr); 2740 2741 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2742 if (INTEL_INFO(dev)->gen >= 4) { 2743 I915_WRITE(DSPSURF(plane), 2744 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2745 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2746 I915_WRITE(DSPLINOFF(plane), linear_offset); 2747 } else 2748 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2749 POSTING_READ(reg); 2750 } 2751 2752 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2753 struct drm_framebuffer *fb, 2754 int x, int y) 2755 { 2756 struct drm_device *dev = crtc->dev; 2757 struct drm_i915_private *dev_priv = dev->dev_private; 2758 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2759 struct drm_plane *primary = crtc->primary; 2760 bool visible = to_intel_plane_state(primary->state)->visible; 2761 struct drm_i915_gem_object *obj; 2762 int plane = intel_crtc->plane; 2763 unsigned long linear_offset; 2764 u32 dspcntr; 2765 u32 reg = DSPCNTR(plane); 2766 int pixel_size; 2767 2768 if (!visible || !fb) { 2769 I915_WRITE(reg, 0); 2770 I915_WRITE(DSPSURF(plane), 0); 2771 POSTING_READ(reg); 2772 return; 2773 } 2774 2775 obj = intel_fb_obj(fb); 2776 if (WARN_ON(obj == NULL)) 2777 return; 2778 2779 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2780 2781 dspcntr = DISPPLANE_GAMMA_ENABLE; 2782 2783 dspcntr |= DISPLAY_PLANE_ENABLE; 2784 2785 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2786 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2787 2788 switch (fb->pixel_format) { 2789 case DRM_FORMAT_C8: 2790 dspcntr |= DISPPLANE_8BPP; 2791 break; 2792 case DRM_FORMAT_RGB565: 2793 dspcntr |= DISPPLANE_BGRX565; 2794 break; 2795 case DRM_FORMAT_XRGB8888: 2796 dspcntr |= DISPPLANE_BGRX888; 2797 break; 2798 case DRM_FORMAT_XBGR8888: 2799 dspcntr |= DISPPLANE_RGBX888; 2800 break; 2801 case DRM_FORMAT_XRGB2101010: 2802 dspcntr |= DISPPLANE_BGRX101010; 2803 break; 2804 case DRM_FORMAT_XBGR2101010: 2805 dspcntr |= DISPPLANE_RGBX101010; 2806 break; 2807 default: 2808 BUG(); 2809 } 2810 2811 if (obj->tiling_mode != I915_TILING_NONE) 2812 dspcntr |= DISPPLANE_TILED; 2813 2814 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2815 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2816 2817 linear_offset = y * fb->pitches[0] + x * pixel_size; 2818 intel_crtc->dspaddr_offset = 2819 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2820 pixel_size, 2821 fb->pitches[0]); 2822 linear_offset -= intel_crtc->dspaddr_offset; 2823 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2824 dspcntr |= DISPPLANE_ROTATE_180; 2825 2826 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2827 x += (intel_crtc->config->pipe_src_w - 1); 2828 y += (intel_crtc->config->pipe_src_h - 1); 2829 2830 /* Finding the last pixel of the last line of the display 2831 data and adding to linear_offset*/ 2832 linear_offset += 2833 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2834 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2835 } 2836 } 2837 2838 I915_WRITE(reg, dspcntr); 2839 2840 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2841 I915_WRITE(DSPSURF(plane), 2842 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2843 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2844 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2845 } else { 2846 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2847 I915_WRITE(DSPLINOFF(plane), linear_offset); 2848 } 2849 POSTING_READ(reg); 2850 } 2851 2852 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, 2853 uint32_t pixel_format) 2854 { 2855 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; 2856 2857 /* 2858 * The stride is either expressed as a multiple of 64 bytes 2859 * chunks for linear buffers or in number of tiles for tiled 2860 * buffers. 2861 */ 2862 switch (fb_modifier) { 2863 case DRM_FORMAT_MOD_NONE: 2864 return 64; 2865 case I915_FORMAT_MOD_X_TILED: 2866 if (INTEL_INFO(dev)->gen == 2) 2867 return 128; 2868 return 512; 2869 case I915_FORMAT_MOD_Y_TILED: 2870 /* No need to check for old gens and Y tiling since this is 2871 * about the display engine and those will be blocked before 2872 * we get here. 2873 */ 2874 return 128; 2875 case I915_FORMAT_MOD_Yf_TILED: 2876 if (bits_per_pixel == 8) 2877 return 64; 2878 else 2879 return 128; 2880 default: 2881 MISSING_CASE(fb_modifier); 2882 return 64; 2883 } 2884 } 2885 2886 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2887 struct drm_i915_gem_object *obj) 2888 { 2889 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2890 2891 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2892 view = &i915_ggtt_view_rotated; 2893 2894 return i915_gem_obj_ggtt_offset_view(obj, view); 2895 } 2896 2897 /* 2898 * This function detaches (aka. unbinds) unused scalers in hardware 2899 */ 2900 void skl_detach_scalers(struct intel_crtc *intel_crtc) 2901 { 2902 struct drm_device *dev; 2903 struct drm_i915_private *dev_priv; 2904 struct intel_crtc_scaler_state *scaler_state; 2905 int i; 2906 2907 if (!intel_crtc || !intel_crtc->config) 2908 return; 2909 2910 dev = intel_crtc->base.dev; 2911 dev_priv = dev->dev_private; 2912 scaler_state = &intel_crtc->config->scaler_state; 2913 2914 /* loop through and disable scalers that aren't in use */ 2915 for (i = 0; i < intel_crtc->num_scalers; i++) { 2916 if (!scaler_state->scalers[i].in_use) { 2917 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, i), 0); 2918 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, i), 0); 2919 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, i), 0); 2920 DRM_DEBUG_KMS("CRTC:%d Disabled scaler id %u.%u\n", 2921 intel_crtc->base.base.id, intel_crtc->pipe, i); 2922 } 2923 } 2924 } 2925 2926 u32 skl_plane_ctl_format(uint32_t pixel_format) 2927 { 2928 switch (pixel_format) { 2929 case DRM_FORMAT_C8: 2930 return PLANE_CTL_FORMAT_INDEXED; 2931 case DRM_FORMAT_RGB565: 2932 return PLANE_CTL_FORMAT_RGB_565; 2933 case DRM_FORMAT_XBGR8888: 2934 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX; 2935 case DRM_FORMAT_XRGB8888: 2936 return PLANE_CTL_FORMAT_XRGB_8888; 2937 /* 2938 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers 2939 * to be already pre-multiplied. We need to add a knob (or a different 2940 * DRM_FORMAT) for user-space to configure that. 2941 */ 2942 case DRM_FORMAT_ABGR8888: 2943 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX | 2944 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2945 case DRM_FORMAT_ARGB8888: 2946 return PLANE_CTL_FORMAT_XRGB_8888 | 2947 PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2948 case DRM_FORMAT_XRGB2101010: 2949 return PLANE_CTL_FORMAT_XRGB_2101010; 2950 case DRM_FORMAT_XBGR2101010: 2951 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010; 2952 case DRM_FORMAT_YUYV: 2953 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV; 2954 case DRM_FORMAT_YVYU: 2955 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU; 2956 case DRM_FORMAT_UYVY: 2957 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY; 2958 case DRM_FORMAT_VYUY: 2959 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY; 2960 default: 2961 MISSING_CASE(pixel_format); 2962 } 2963 2964 return 0; 2965 } 2966 2967 u32 skl_plane_ctl_tiling(uint64_t fb_modifier) 2968 { 2969 switch (fb_modifier) { 2970 case DRM_FORMAT_MOD_NONE: 2971 break; 2972 case I915_FORMAT_MOD_X_TILED: 2973 return PLANE_CTL_TILED_X; 2974 case I915_FORMAT_MOD_Y_TILED: 2975 return PLANE_CTL_TILED_Y; 2976 case I915_FORMAT_MOD_Yf_TILED: 2977 return PLANE_CTL_TILED_YF; 2978 default: 2979 MISSING_CASE(fb_modifier); 2980 } 2981 2982 return 0; 2983 } 2984 2985 u32 skl_plane_ctl_rotation(unsigned int rotation) 2986 { 2987 switch (rotation) { 2988 case BIT(DRM_ROTATE_0): 2989 break; 2990 /* 2991 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr 2992 * while i915 HW rotation is clockwise, thats why this swapping. 2993 */ 2994 case BIT(DRM_ROTATE_90): 2995 return PLANE_CTL_ROTATE_270; 2996 case BIT(DRM_ROTATE_180): 2997 return PLANE_CTL_ROTATE_180; 2998 case BIT(DRM_ROTATE_270): 2999 return PLANE_CTL_ROTATE_90; 3000 default: 3001 MISSING_CASE(rotation); 3002 } 3003 3004 return 0; 3005 } 3006 3007 static void skylake_update_primary_plane(struct drm_crtc *crtc, 3008 struct drm_framebuffer *fb, 3009 int x, int y) 3010 { 3011 struct drm_device *dev = crtc->dev; 3012 struct drm_i915_private *dev_priv = dev->dev_private; 3013 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3014 struct drm_plane *plane = crtc->primary; 3015 bool visible = to_intel_plane_state(plane->state)->visible; 3016 struct drm_i915_gem_object *obj; 3017 int pipe = intel_crtc->pipe; 3018 u32 plane_ctl, stride_div, stride; 3019 u32 tile_height, plane_offset, plane_size; 3020 unsigned int rotation; 3021 int x_offset, y_offset; 3022 unsigned long surf_addr; 3023 struct intel_crtc_state *crtc_state = intel_crtc->config; 3024 struct intel_plane_state *plane_state; 3025 int src_x = 0, src_y = 0, src_w = 0, src_h = 0; 3026 int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0; 3027 int scaler_id = -1; 3028 3029 plane_state = to_intel_plane_state(plane->state); 3030 3031 if (!visible || !fb) { 3032 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3033 I915_WRITE(PLANE_SURF(pipe, 0), 0); 3034 POSTING_READ(PLANE_CTL(pipe, 0)); 3035 return; 3036 } 3037 3038 plane_ctl = PLANE_CTL_ENABLE | 3039 PLANE_CTL_PIPE_GAMMA_ENABLE | 3040 PLANE_CTL_PIPE_CSC_ENABLE; 3041 3042 plane_ctl |= skl_plane_ctl_format(fb->pixel_format); 3043 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]); 3044 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3045 3046 rotation = plane->state->rotation; 3047 plane_ctl |= skl_plane_ctl_rotation(rotation); 3048 3049 obj = intel_fb_obj(fb); 3050 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3051 fb->pixel_format); 3052 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj); 3053 3054 /* 3055 * FIXME: intel_plane_state->src, dst aren't set when transitional 3056 * update_plane helpers are called from legacy paths. 3057 * Once full atomic crtc is available, below check can be avoided. 3058 */ 3059 if (drm_rect_width(&plane_state->src)) { 3060 scaler_id = plane_state->scaler_id; 3061 src_x = plane_state->src.x1 >> 16; 3062 src_y = plane_state->src.y1 >> 16; 3063 src_w = drm_rect_width(&plane_state->src) >> 16; 3064 src_h = drm_rect_height(&plane_state->src) >> 16; 3065 dst_x = plane_state->dst.x1; 3066 dst_y = plane_state->dst.y1; 3067 dst_w = drm_rect_width(&plane_state->dst); 3068 dst_h = drm_rect_height(&plane_state->dst); 3069 3070 WARN_ON(x != src_x || y != src_y); 3071 } else { 3072 src_w = intel_crtc->config->pipe_src_w; 3073 src_h = intel_crtc->config->pipe_src_h; 3074 } 3075 3076 if (intel_rotation_90_or_270(rotation)) { 3077 /* stride = Surface height in tiles */ 3078 tile_height = intel_tile_height(dev, fb->pixel_format, 3079 fb->modifier[0]); 3080 stride = DIV_ROUND_UP(fb->height, tile_height); 3081 x_offset = stride * tile_height - y - src_h; 3082 y_offset = x; 3083 plane_size = (src_w - 1) << 16 | (src_h - 1); 3084 } else { 3085 stride = fb->pitches[0] / stride_div; 3086 x_offset = x; 3087 y_offset = y; 3088 plane_size = (src_h - 1) << 16 | (src_w - 1); 3089 } 3090 plane_offset = y_offset << 16 | x_offset; 3091 3092 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3093 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset); 3094 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size); 3095 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 3096 3097 if (scaler_id >= 0) { 3098 uint32_t ps_ctrl = 0; 3099 3100 WARN_ON(!dst_w || !dst_h); 3101 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) | 3102 crtc_state->scaler_state.scalers[scaler_id].mode; 3103 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 3104 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 3105 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y); 3106 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h); 3107 I915_WRITE(PLANE_POS(pipe, 0), 0); 3108 } else { 3109 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x); 3110 } 3111 3112 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3113 3114 POSTING_READ(PLANE_SURF(pipe, 0)); 3115 } 3116 3117 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3118 static int 3119 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3120 int x, int y, enum mode_set_atomic state) 3121 { 3122 struct drm_device *dev = crtc->dev; 3123 struct drm_i915_private *dev_priv = dev->dev_private; 3124 3125 if (dev_priv->display.disable_fbc) 3126 dev_priv->display.disable_fbc(dev); 3127 3128 dev_priv->display.update_primary_plane(crtc, fb, x, y); 3129 3130 return 0; 3131 } 3132 3133 static void intel_complete_page_flips(struct drm_device *dev) 3134 { 3135 struct drm_crtc *crtc; 3136 3137 for_each_crtc(dev, crtc) { 3138 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3139 enum plane plane = intel_crtc->plane; 3140 3141 intel_prepare_page_flip(dev, plane); 3142 intel_finish_page_flip_plane(dev, plane); 3143 } 3144 } 3145 3146 static void intel_update_primary_planes(struct drm_device *dev) 3147 { 3148 struct drm_i915_private *dev_priv = dev->dev_private; 3149 struct drm_crtc *crtc; 3150 3151 for_each_crtc(dev, crtc) { 3152 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3153 3154 drm_modeset_lock(&crtc->mutex, NULL); 3155 /* 3156 * FIXME: Once we have proper support for primary planes (and 3157 * disabling them without disabling the entire crtc) allow again 3158 * a NULL crtc->primary->fb. 3159 */ 3160 if (intel_crtc->active && crtc->primary->fb) 3161 dev_priv->display.update_primary_plane(crtc, 3162 crtc->primary->fb, 3163 crtc->x, 3164 crtc->y); 3165 drm_modeset_unlock(&crtc->mutex); 3166 } 3167 } 3168 3169 void intel_crtc_reset(struct intel_crtc *crtc) 3170 { 3171 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 3172 3173 if (!crtc->active) 3174 return; 3175 3176 intel_crtc_disable_planes(&crtc->base); 3177 dev_priv->display.crtc_disable(&crtc->base); 3178 dev_priv->display.crtc_enable(&crtc->base); 3179 intel_crtc_enable_planes(&crtc->base); 3180 } 3181 3182 void intel_prepare_reset(struct drm_device *dev) 3183 { 3184 struct drm_i915_private *dev_priv = to_i915(dev); 3185 struct intel_crtc *crtc; 3186 3187 /* no reset support for gen2 */ 3188 if (IS_GEN2(dev)) 3189 return; 3190 3191 /* reset doesn't touch the display */ 3192 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3193 return; 3194 3195 drm_modeset_lock_all(dev); 3196 3197 /* 3198 * Disabling the crtcs gracefully seems nicer. Also the 3199 * g33 docs say we should at least disable all the planes. 3200 */ 3201 for_each_intel_crtc(dev, crtc) { 3202 if (!crtc->active) 3203 continue; 3204 3205 intel_crtc_disable_planes(&crtc->base); 3206 dev_priv->display.crtc_disable(&crtc->base); 3207 } 3208 } 3209 3210 void intel_finish_reset(struct drm_device *dev) 3211 { 3212 struct drm_i915_private *dev_priv = to_i915(dev); 3213 3214 /* 3215 * Flips in the rings will be nuked by the reset, 3216 * so complete all pending flips so that user space 3217 * will get its events and not get stuck. 3218 */ 3219 intel_complete_page_flips(dev); 3220 3221 /* no reset support for gen2 */ 3222 if (IS_GEN2(dev)) 3223 return; 3224 3225 /* reset doesn't touch the display */ 3226 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3227 /* 3228 * Flips in the rings have been nuked by the reset, 3229 * so update the base address of all primary 3230 * planes to the the last fb to make sure we're 3231 * showing the correct fb after a reset. 3232 */ 3233 intel_update_primary_planes(dev); 3234 return; 3235 } 3236 3237 /* 3238 * The display has been reset as well, 3239 * so need a full re-initialization. 3240 */ 3241 intel_runtime_pm_disable_interrupts(dev_priv); 3242 intel_runtime_pm_enable_interrupts(dev_priv); 3243 3244 intel_modeset_init_hw(dev); 3245 3246 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3247 if (dev_priv->display.hpd_irq_setup) 3248 dev_priv->display.hpd_irq_setup(dev); 3249 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3250 3251 intel_modeset_setup_hw_state(dev, true); 3252 3253 intel_hpd_init(dev_priv); 3254 3255 drm_modeset_unlock_all(dev); 3256 } 3257 3258 static void 3259 intel_finish_fb(struct drm_framebuffer *old_fb) 3260 { 3261 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 3262 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 3263 bool was_interruptible = dev_priv->mm.interruptible; 3264 int ret; 3265 3266 /* Big Hammer, we also need to ensure that any pending 3267 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 3268 * current scanout is retired before unpinning the old 3269 * framebuffer. Note that we rely on userspace rendering 3270 * into the buffer attached to the pipe they are waiting 3271 * on. If not, userspace generates a GPU hang with IPEHR 3272 * point to the MI_WAIT_FOR_EVENT. 3273 * 3274 * This should only fail upon a hung GPU, in which case we 3275 * can safely continue. 3276 */ 3277 dev_priv->mm.interruptible = false; 3278 ret = i915_gem_object_wait_rendering(obj, true); 3279 dev_priv->mm.interruptible = was_interruptible; 3280 3281 WARN_ON(ret); 3282 } 3283 3284 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3285 { 3286 struct drm_device *dev = crtc->dev; 3287 struct drm_i915_private *dev_priv = dev->dev_private; 3288 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3289 bool pending; 3290 3291 if (i915_reset_in_progress(&dev_priv->gpu_error) || 3292 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 3293 return false; 3294 3295 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3296 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3297 lockmgr(&dev->event_lock, LK_RELEASE); 3298 3299 return pending; 3300 } 3301 3302 static void intel_update_pipe_size(struct intel_crtc *crtc) 3303 { 3304 struct drm_device *dev = crtc->base.dev; 3305 struct drm_i915_private *dev_priv = dev->dev_private; 3306 const struct drm_display_mode *adjusted_mode; 3307 3308 if (!i915.fastboot) 3309 return; 3310 3311 /* 3312 * Update pipe size and adjust fitter if needed: the reason for this is 3313 * that in compute_mode_changes we check the native mode (not the pfit 3314 * mode) to see if we can flip rather than do a full mode set. In the 3315 * fastboot case, we'll flip, but if we don't update the pipesrc and 3316 * pfit state, we'll end up with a big fb scanned out into the wrong 3317 * sized surface. 3318 * 3319 * To fix this properly, we need to hoist the checks up into 3320 * compute_mode_changes (or above), check the actual pfit state and 3321 * whether the platform allows pfit disable with pipe active, and only 3322 * then update the pipesrc and pfit state, even on the flip path. 3323 */ 3324 3325 adjusted_mode = &crtc->config->base.adjusted_mode; 3326 3327 I915_WRITE(PIPESRC(crtc->pipe), 3328 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 3329 (adjusted_mode->crtc_vdisplay - 1)); 3330 if (!crtc->config->pch_pfit.enabled && 3331 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3332 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3333 I915_WRITE(PF_CTL(crtc->pipe), 0); 3334 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 3335 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 3336 } 3337 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay; 3338 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay; 3339 } 3340 3341 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3342 { 3343 struct drm_device *dev = crtc->dev; 3344 struct drm_i915_private *dev_priv = dev->dev_private; 3345 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3346 int pipe = intel_crtc->pipe; 3347 u32 reg, temp; 3348 3349 /* enable normal train */ 3350 reg = FDI_TX_CTL(pipe); 3351 temp = I915_READ(reg); 3352 if (IS_IVYBRIDGE(dev)) { 3353 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3354 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3355 } else { 3356 temp &= ~FDI_LINK_TRAIN_NONE; 3357 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3358 } 3359 I915_WRITE(reg, temp); 3360 3361 reg = FDI_RX_CTL(pipe); 3362 temp = I915_READ(reg); 3363 if (HAS_PCH_CPT(dev)) { 3364 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3365 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3366 } else { 3367 temp &= ~FDI_LINK_TRAIN_NONE; 3368 temp |= FDI_LINK_TRAIN_NONE; 3369 } 3370 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3371 3372 /* wait one idle pattern time */ 3373 POSTING_READ(reg); 3374 udelay(1000); 3375 3376 /* IVB wants error correction enabled */ 3377 if (IS_IVYBRIDGE(dev)) 3378 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3379 FDI_FE_ERRC_ENABLE); 3380 } 3381 3382 /* The FDI link training functions for ILK/Ibexpeak. */ 3383 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3384 { 3385 struct drm_device *dev = crtc->dev; 3386 struct drm_i915_private *dev_priv = dev->dev_private; 3387 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3388 int pipe = intel_crtc->pipe; 3389 u32 reg, temp, tries; 3390 3391 /* FDI needs bits from pipe first */ 3392 assert_pipe_enabled(dev_priv, pipe); 3393 3394 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3395 for train result */ 3396 reg = FDI_RX_IMR(pipe); 3397 temp = I915_READ(reg); 3398 temp &= ~FDI_RX_SYMBOL_LOCK; 3399 temp &= ~FDI_RX_BIT_LOCK; 3400 I915_WRITE(reg, temp); 3401 I915_READ(reg); 3402 udelay(150); 3403 3404 /* enable CPU FDI TX and PCH FDI RX */ 3405 reg = FDI_TX_CTL(pipe); 3406 temp = I915_READ(reg); 3407 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3408 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3409 temp &= ~FDI_LINK_TRAIN_NONE; 3410 temp |= FDI_LINK_TRAIN_PATTERN_1; 3411 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3412 3413 reg = FDI_RX_CTL(pipe); 3414 temp = I915_READ(reg); 3415 temp &= ~FDI_LINK_TRAIN_NONE; 3416 temp |= FDI_LINK_TRAIN_PATTERN_1; 3417 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3418 3419 POSTING_READ(reg); 3420 udelay(150); 3421 3422 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3423 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3424 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3425 FDI_RX_PHASE_SYNC_POINTER_EN); 3426 3427 reg = FDI_RX_IIR(pipe); 3428 for (tries = 0; tries < 5; tries++) { 3429 temp = I915_READ(reg); 3430 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3431 3432 if ((temp & FDI_RX_BIT_LOCK)) { 3433 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3434 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3435 break; 3436 } 3437 } 3438 if (tries == 5) 3439 DRM_ERROR("FDI train 1 fail!\n"); 3440 3441 /* Train 2 */ 3442 reg = FDI_TX_CTL(pipe); 3443 temp = I915_READ(reg); 3444 temp &= ~FDI_LINK_TRAIN_NONE; 3445 temp |= FDI_LINK_TRAIN_PATTERN_2; 3446 I915_WRITE(reg, temp); 3447 3448 reg = FDI_RX_CTL(pipe); 3449 temp = I915_READ(reg); 3450 temp &= ~FDI_LINK_TRAIN_NONE; 3451 temp |= FDI_LINK_TRAIN_PATTERN_2; 3452 I915_WRITE(reg, temp); 3453 3454 POSTING_READ(reg); 3455 udelay(150); 3456 3457 reg = FDI_RX_IIR(pipe); 3458 for (tries = 0; tries < 5; tries++) { 3459 temp = I915_READ(reg); 3460 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3461 3462 if (temp & FDI_RX_SYMBOL_LOCK) { 3463 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3464 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3465 break; 3466 } 3467 } 3468 if (tries == 5) 3469 DRM_ERROR("FDI train 2 fail!\n"); 3470 3471 DRM_DEBUG_KMS("FDI train done\n"); 3472 3473 } 3474 3475 static const int snb_b_fdi_train_param[] = { 3476 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3477 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3478 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3479 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3480 }; 3481 3482 /* The FDI link training functions for SNB/Cougarpoint. */ 3483 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3484 { 3485 struct drm_device *dev = crtc->dev; 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3488 int pipe = intel_crtc->pipe; 3489 u32 reg, temp, i, retry; 3490 3491 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3492 for train result */ 3493 reg = FDI_RX_IMR(pipe); 3494 temp = I915_READ(reg); 3495 temp &= ~FDI_RX_SYMBOL_LOCK; 3496 temp &= ~FDI_RX_BIT_LOCK; 3497 I915_WRITE(reg, temp); 3498 3499 POSTING_READ(reg); 3500 udelay(150); 3501 3502 /* enable CPU FDI TX and PCH FDI RX */ 3503 reg = FDI_TX_CTL(pipe); 3504 temp = I915_READ(reg); 3505 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3506 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3507 temp &= ~FDI_LINK_TRAIN_NONE; 3508 temp |= FDI_LINK_TRAIN_PATTERN_1; 3509 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3510 /* SNB-B */ 3511 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3512 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3513 3514 I915_WRITE(FDI_RX_MISC(pipe), 3515 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3516 3517 reg = FDI_RX_CTL(pipe); 3518 temp = I915_READ(reg); 3519 if (HAS_PCH_CPT(dev)) { 3520 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3521 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3522 } else { 3523 temp &= ~FDI_LINK_TRAIN_NONE; 3524 temp |= FDI_LINK_TRAIN_PATTERN_1; 3525 } 3526 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3527 3528 POSTING_READ(reg); 3529 udelay(150); 3530 3531 for (i = 0; i < 4; i++) { 3532 reg = FDI_TX_CTL(pipe); 3533 temp = I915_READ(reg); 3534 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3535 temp |= snb_b_fdi_train_param[i]; 3536 I915_WRITE(reg, temp); 3537 3538 POSTING_READ(reg); 3539 udelay(500); 3540 3541 for (retry = 0; retry < 5; retry++) { 3542 reg = FDI_RX_IIR(pipe); 3543 temp = I915_READ(reg); 3544 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3545 if (temp & FDI_RX_BIT_LOCK) { 3546 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3547 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3548 break; 3549 } 3550 udelay(50); 3551 } 3552 if (retry < 5) 3553 break; 3554 } 3555 if (i == 4) 3556 DRM_ERROR("FDI train 1 fail!\n"); 3557 3558 /* Train 2 */ 3559 reg = FDI_TX_CTL(pipe); 3560 temp = I915_READ(reg); 3561 temp &= ~FDI_LINK_TRAIN_NONE; 3562 temp |= FDI_LINK_TRAIN_PATTERN_2; 3563 if (IS_GEN6(dev)) { 3564 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3565 /* SNB-B */ 3566 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3567 } 3568 I915_WRITE(reg, temp); 3569 3570 reg = FDI_RX_CTL(pipe); 3571 temp = I915_READ(reg); 3572 if (HAS_PCH_CPT(dev)) { 3573 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3574 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3575 } else { 3576 temp &= ~FDI_LINK_TRAIN_NONE; 3577 temp |= FDI_LINK_TRAIN_PATTERN_2; 3578 } 3579 I915_WRITE(reg, temp); 3580 3581 POSTING_READ(reg); 3582 udelay(150); 3583 3584 for (i = 0; i < 4; i++) { 3585 reg = FDI_TX_CTL(pipe); 3586 temp = I915_READ(reg); 3587 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3588 temp |= snb_b_fdi_train_param[i]; 3589 I915_WRITE(reg, temp); 3590 3591 POSTING_READ(reg); 3592 udelay(500); 3593 3594 for (retry = 0; retry < 5; retry++) { 3595 reg = FDI_RX_IIR(pipe); 3596 temp = I915_READ(reg); 3597 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3598 if (temp & FDI_RX_SYMBOL_LOCK) { 3599 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3600 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3601 break; 3602 } 3603 udelay(50); 3604 } 3605 if (retry < 5) 3606 break; 3607 } 3608 if (i == 4) 3609 DRM_ERROR("FDI train 2 fail!\n"); 3610 3611 DRM_DEBUG_KMS("FDI train done.\n"); 3612 } 3613 3614 /* Manual link training for Ivy Bridge A0 parts */ 3615 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3616 { 3617 struct drm_device *dev = crtc->dev; 3618 struct drm_i915_private *dev_priv = dev->dev_private; 3619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3620 int pipe = intel_crtc->pipe; 3621 u32 reg, temp, i, j; 3622 3623 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3624 for train result */ 3625 reg = FDI_RX_IMR(pipe); 3626 temp = I915_READ(reg); 3627 temp &= ~FDI_RX_SYMBOL_LOCK; 3628 temp &= ~FDI_RX_BIT_LOCK; 3629 I915_WRITE(reg, temp); 3630 3631 POSTING_READ(reg); 3632 udelay(150); 3633 3634 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3635 I915_READ(FDI_RX_IIR(pipe))); 3636 3637 /* Try each vswing and preemphasis setting twice before moving on */ 3638 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3639 /* disable first in case we need to retry */ 3640 reg = FDI_TX_CTL(pipe); 3641 temp = I915_READ(reg); 3642 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3643 temp &= ~FDI_TX_ENABLE; 3644 I915_WRITE(reg, temp); 3645 3646 reg = FDI_RX_CTL(pipe); 3647 temp = I915_READ(reg); 3648 temp &= ~FDI_LINK_TRAIN_AUTO; 3649 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3650 temp &= ~FDI_RX_ENABLE; 3651 I915_WRITE(reg, temp); 3652 3653 /* enable CPU FDI TX and PCH FDI RX */ 3654 reg = FDI_TX_CTL(pipe); 3655 temp = I915_READ(reg); 3656 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3657 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3658 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3659 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3660 temp |= snb_b_fdi_train_param[j/2]; 3661 temp |= FDI_COMPOSITE_SYNC; 3662 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3663 3664 I915_WRITE(FDI_RX_MISC(pipe), 3665 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3666 3667 reg = FDI_RX_CTL(pipe); 3668 temp = I915_READ(reg); 3669 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3670 temp |= FDI_COMPOSITE_SYNC; 3671 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3672 3673 POSTING_READ(reg); 3674 udelay(1); /* should be 0.5us */ 3675 3676 for (i = 0; i < 4; i++) { 3677 reg = FDI_RX_IIR(pipe); 3678 temp = I915_READ(reg); 3679 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3680 3681 if (temp & FDI_RX_BIT_LOCK || 3682 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3683 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3684 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3685 i); 3686 break; 3687 } 3688 udelay(1); /* should be 0.5us */ 3689 } 3690 if (i == 4) { 3691 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3692 continue; 3693 } 3694 3695 /* Train 2 */ 3696 reg = FDI_TX_CTL(pipe); 3697 temp = I915_READ(reg); 3698 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3699 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3700 I915_WRITE(reg, temp); 3701 3702 reg = FDI_RX_CTL(pipe); 3703 temp = I915_READ(reg); 3704 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3705 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3706 I915_WRITE(reg, temp); 3707 3708 POSTING_READ(reg); 3709 udelay(2); /* should be 1.5us */ 3710 3711 for (i = 0; i < 4; i++) { 3712 reg = FDI_RX_IIR(pipe); 3713 temp = I915_READ(reg); 3714 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3715 3716 if (temp & FDI_RX_SYMBOL_LOCK || 3717 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3718 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3719 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3720 i); 3721 goto train_done; 3722 } 3723 udelay(2); /* should be 1.5us */ 3724 } 3725 if (i == 4) 3726 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3727 } 3728 3729 train_done: 3730 DRM_DEBUG_KMS("FDI train done.\n"); 3731 } 3732 3733 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3734 { 3735 struct drm_device *dev = intel_crtc->base.dev; 3736 struct drm_i915_private *dev_priv = dev->dev_private; 3737 int pipe = intel_crtc->pipe; 3738 u32 reg, temp; 3739 3740 3741 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3742 reg = FDI_RX_CTL(pipe); 3743 temp = I915_READ(reg); 3744 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3745 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3746 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3747 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3748 3749 POSTING_READ(reg); 3750 udelay(200); 3751 3752 /* Switch from Rawclk to PCDclk */ 3753 temp = I915_READ(reg); 3754 I915_WRITE(reg, temp | FDI_PCDCLK); 3755 3756 POSTING_READ(reg); 3757 udelay(200); 3758 3759 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3760 reg = FDI_TX_CTL(pipe); 3761 temp = I915_READ(reg); 3762 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3763 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3764 3765 POSTING_READ(reg); 3766 udelay(100); 3767 } 3768 } 3769 3770 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3771 { 3772 struct drm_device *dev = intel_crtc->base.dev; 3773 struct drm_i915_private *dev_priv = dev->dev_private; 3774 int pipe = intel_crtc->pipe; 3775 u32 reg, temp; 3776 3777 /* Switch from PCDclk to Rawclk */ 3778 reg = FDI_RX_CTL(pipe); 3779 temp = I915_READ(reg); 3780 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3781 3782 /* Disable CPU FDI TX PLL */ 3783 reg = FDI_TX_CTL(pipe); 3784 temp = I915_READ(reg); 3785 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3786 3787 POSTING_READ(reg); 3788 udelay(100); 3789 3790 reg = FDI_RX_CTL(pipe); 3791 temp = I915_READ(reg); 3792 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3793 3794 /* Wait for the clocks to turn off. */ 3795 POSTING_READ(reg); 3796 udelay(100); 3797 } 3798 3799 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3800 { 3801 struct drm_device *dev = crtc->dev; 3802 struct drm_i915_private *dev_priv = dev->dev_private; 3803 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3804 int pipe = intel_crtc->pipe; 3805 u32 reg, temp; 3806 3807 /* disable CPU FDI tx and PCH FDI rx */ 3808 reg = FDI_TX_CTL(pipe); 3809 temp = I915_READ(reg); 3810 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3811 POSTING_READ(reg); 3812 3813 reg = FDI_RX_CTL(pipe); 3814 temp = I915_READ(reg); 3815 temp &= ~(0x7 << 16); 3816 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3817 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3818 3819 POSTING_READ(reg); 3820 udelay(100); 3821 3822 /* Ironlake workaround, disable clock pointer after downing FDI */ 3823 if (HAS_PCH_IBX(dev)) 3824 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3825 3826 /* still set train pattern 1 */ 3827 reg = FDI_TX_CTL(pipe); 3828 temp = I915_READ(reg); 3829 temp &= ~FDI_LINK_TRAIN_NONE; 3830 temp |= FDI_LINK_TRAIN_PATTERN_1; 3831 I915_WRITE(reg, temp); 3832 3833 reg = FDI_RX_CTL(pipe); 3834 temp = I915_READ(reg); 3835 if (HAS_PCH_CPT(dev)) { 3836 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3837 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3838 } else { 3839 temp &= ~FDI_LINK_TRAIN_NONE; 3840 temp |= FDI_LINK_TRAIN_PATTERN_1; 3841 } 3842 /* BPC in FDI rx is consistent with that in PIPECONF */ 3843 temp &= ~(0x07 << 16); 3844 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3845 I915_WRITE(reg, temp); 3846 3847 POSTING_READ(reg); 3848 udelay(100); 3849 } 3850 3851 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3852 { 3853 struct intel_crtc *crtc; 3854 3855 /* Note that we don't need to be called with mode_config.lock here 3856 * as our list of CRTC objects is static for the lifetime of the 3857 * device and so cannot disappear as we iterate. Similarly, we can 3858 * happily treat the predicates as racy, atomic checks as userspace 3859 * cannot claim and pin a new fb without at least acquring the 3860 * struct_mutex and so serialising with us. 3861 */ 3862 for_each_intel_crtc(dev, crtc) { 3863 if (atomic_read(&crtc->unpin_work_count) == 0) 3864 continue; 3865 3866 if (crtc->unpin_work) 3867 intel_wait_for_vblank(dev, crtc->pipe); 3868 3869 return true; 3870 } 3871 3872 return false; 3873 } 3874 3875 static void page_flip_completed(struct intel_crtc *intel_crtc) 3876 { 3877 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3878 struct intel_unpin_work *work = intel_crtc->unpin_work; 3879 3880 /* ensure that the unpin work is consistent wrt ->pending. */ 3881 smp_rmb(); 3882 intel_crtc->unpin_work = NULL; 3883 3884 if (work->event) 3885 drm_send_vblank_event(intel_crtc->base.dev, 3886 intel_crtc->pipe, 3887 work->event); 3888 3889 drm_crtc_vblank_put(&intel_crtc->base); 3890 3891 wake_up_all(&dev_priv->pending_flip_queue); 3892 queue_work(dev_priv->wq, &work->work); 3893 3894 trace_i915_flip_complete(intel_crtc->plane, 3895 work->pending_flip_obj); 3896 } 3897 3898 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3899 { 3900 struct drm_device *dev = crtc->dev; 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 3903 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3904 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3905 !intel_crtc_has_pending_flip(crtc), 3906 60*HZ) == 0)) { 3907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3908 3909 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3910 if (intel_crtc->unpin_work) { 3911 WARN_ONCE(1, "Removing stuck page flip\n"); 3912 page_flip_completed(intel_crtc); 3913 } 3914 lockmgr(&dev->event_lock, LK_RELEASE); 3915 } 3916 3917 if (crtc->primary->fb) { 3918 mutex_lock(&dev->struct_mutex); 3919 intel_finish_fb(crtc->primary->fb); 3920 mutex_unlock(&dev->struct_mutex); 3921 } 3922 } 3923 3924 /* Program iCLKIP clock to the desired frequency */ 3925 static void lpt_program_iclkip(struct drm_crtc *crtc) 3926 { 3927 struct drm_device *dev = crtc->dev; 3928 struct drm_i915_private *dev_priv = dev->dev_private; 3929 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3930 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3931 u32 temp; 3932 3933 mutex_lock(&dev_priv->sb_lock); 3934 3935 /* It is necessary to ungate the pixclk gate prior to programming 3936 * the divisors, and gate it back when it is done. 3937 */ 3938 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3939 3940 /* Disable SSCCTL */ 3941 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3942 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3943 SBI_SSCCTL_DISABLE, 3944 SBI_ICLK); 3945 3946 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3947 if (clock == 20000) { 3948 auxdiv = 1; 3949 divsel = 0x41; 3950 phaseinc = 0x20; 3951 } else { 3952 /* The iCLK virtual clock root frequency is in MHz, 3953 * but the adjusted_mode->crtc_clock in in KHz. To get the 3954 * divisors, it is necessary to divide one by another, so we 3955 * convert the virtual clock precision to KHz here for higher 3956 * precision. 3957 */ 3958 u32 iclk_virtual_root_freq = 172800 * 1000; 3959 u32 iclk_pi_range = 64; 3960 u32 desired_divisor, msb_divisor_value, pi_value; 3961 3962 desired_divisor = (iclk_virtual_root_freq / clock); 3963 msb_divisor_value = desired_divisor / iclk_pi_range; 3964 pi_value = desired_divisor % iclk_pi_range; 3965 3966 auxdiv = 0; 3967 divsel = msb_divisor_value - 2; 3968 phaseinc = pi_value; 3969 } 3970 3971 /* This should not happen with any sane values */ 3972 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3973 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3974 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3975 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3976 3977 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3978 clock, 3979 auxdiv, 3980 divsel, 3981 phasedir, 3982 phaseinc); 3983 3984 /* Program SSCDIVINTPHASE6 */ 3985 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3986 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3987 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3988 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3989 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3990 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3991 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3992 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3993 3994 /* Program SSCAUXDIV */ 3995 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3996 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3997 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3998 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3999 4000 /* Enable modulator and associated divider */ 4001 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 4002 temp &= ~SBI_SSCCTL_DISABLE; 4003 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 4004 4005 /* Wait for initialization time */ 4006 udelay(24); 4007 4008 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 4009 4010 mutex_unlock(&dev_priv->sb_lock); 4011 } 4012 4013 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 4014 enum i915_pipe pch_transcoder) 4015 { 4016 struct drm_device *dev = crtc->base.dev; 4017 struct drm_i915_private *dev_priv = dev->dev_private; 4018 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 4019 4020 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 4021 I915_READ(HTOTAL(cpu_transcoder))); 4022 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 4023 I915_READ(HBLANK(cpu_transcoder))); 4024 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 4025 I915_READ(HSYNC(cpu_transcoder))); 4026 4027 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 4028 I915_READ(VTOTAL(cpu_transcoder))); 4029 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 4030 I915_READ(VBLANK(cpu_transcoder))); 4031 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 4032 I915_READ(VSYNC(cpu_transcoder))); 4033 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 4034 I915_READ(VSYNCSHIFT(cpu_transcoder))); 4035 } 4036 4037 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 4038 { 4039 struct drm_i915_private *dev_priv = dev->dev_private; 4040 uint32_t temp; 4041 4042 temp = I915_READ(SOUTH_CHICKEN1); 4043 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 4044 return; 4045 4046 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 4047 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 4048 4049 temp &= ~FDI_BC_BIFURCATION_SELECT; 4050 if (enable) 4051 temp |= FDI_BC_BIFURCATION_SELECT; 4052 4053 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 4054 I915_WRITE(SOUTH_CHICKEN1, temp); 4055 POSTING_READ(SOUTH_CHICKEN1); 4056 } 4057 4058 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 4059 { 4060 struct drm_device *dev = intel_crtc->base.dev; 4061 4062 switch (intel_crtc->pipe) { 4063 case PIPE_A: 4064 break; 4065 case PIPE_B: 4066 if (intel_crtc->config->fdi_lanes > 2) 4067 cpt_set_fdi_bc_bifurcation(dev, false); 4068 else 4069 cpt_set_fdi_bc_bifurcation(dev, true); 4070 4071 break; 4072 case PIPE_C: 4073 cpt_set_fdi_bc_bifurcation(dev, true); 4074 4075 break; 4076 default: 4077 BUG(); 4078 } 4079 } 4080 4081 /* 4082 * Enable PCH resources required for PCH ports: 4083 * - PCH PLLs 4084 * - FDI training & RX/TX 4085 * - update transcoder timings 4086 * - DP transcoding bits 4087 * - transcoder 4088 */ 4089 static void ironlake_pch_enable(struct drm_crtc *crtc) 4090 { 4091 struct drm_device *dev = crtc->dev; 4092 struct drm_i915_private *dev_priv = dev->dev_private; 4093 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4094 int pipe = intel_crtc->pipe; 4095 u32 reg, temp; 4096 4097 assert_pch_transcoder_disabled(dev_priv, pipe); 4098 4099 if (IS_IVYBRIDGE(dev)) 4100 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 4101 4102 /* Write the TU size bits before fdi link training, so that error 4103 * detection works. */ 4104 I915_WRITE(FDI_RX_TUSIZE1(pipe), 4105 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 4106 4107 /* For PCH output, training FDI link */ 4108 dev_priv->display.fdi_link_train(crtc); 4109 4110 /* We need to program the right clock selection before writing the pixel 4111 * mutliplier into the DPLL. */ 4112 if (HAS_PCH_CPT(dev)) { 4113 u32 sel; 4114 4115 temp = I915_READ(PCH_DPLL_SEL); 4116 temp |= TRANS_DPLL_ENABLE(pipe); 4117 sel = TRANS_DPLLB_SEL(pipe); 4118 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 4119 temp |= sel; 4120 else 4121 temp &= ~sel; 4122 I915_WRITE(PCH_DPLL_SEL, temp); 4123 } 4124 4125 /* XXX: pch pll's can be enabled any time before we enable the PCH 4126 * transcoder, and we actually should do this to not upset any PCH 4127 * transcoder that already use the clock when we share it. 4128 * 4129 * Note that enable_shared_dpll tries to do the right thing, but 4130 * get_shared_dpll unconditionally resets the pll - we need that to have 4131 * the right LVDS enable sequence. */ 4132 intel_enable_shared_dpll(intel_crtc); 4133 4134 /* set transcoder timing, panel must allow it */ 4135 assert_panel_unlocked(dev_priv, pipe); 4136 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4137 4138 intel_fdi_normal_train(crtc); 4139 4140 /* For PCH DP, enable TRANS_DP_CTL */ 4141 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4142 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4143 reg = TRANS_DP_CTL(pipe); 4144 temp = I915_READ(reg); 4145 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4146 TRANS_DP_SYNC_MASK | 4147 TRANS_DP_BPC_MASK); 4148 temp |= TRANS_DP_OUTPUT_ENABLE; 4149 temp |= bpc << 9; /* same format but at 11:9 */ 4150 4151 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 4152 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4153 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 4154 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4155 4156 switch (intel_trans_dp_port_sel(crtc)) { 4157 case PCH_DP_B: 4158 temp |= TRANS_DP_PORT_SEL_B; 4159 break; 4160 case PCH_DP_C: 4161 temp |= TRANS_DP_PORT_SEL_C; 4162 break; 4163 case PCH_DP_D: 4164 temp |= TRANS_DP_PORT_SEL_D; 4165 break; 4166 default: 4167 BUG(); 4168 } 4169 4170 I915_WRITE(reg, temp); 4171 } 4172 4173 ironlake_enable_pch_transcoder(dev_priv, pipe); 4174 } 4175 4176 static void lpt_pch_enable(struct drm_crtc *crtc) 4177 { 4178 struct drm_device *dev = crtc->dev; 4179 struct drm_i915_private *dev_priv = dev->dev_private; 4180 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4181 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4182 4183 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4184 4185 lpt_program_iclkip(crtc); 4186 4187 /* Set transcoder timing. */ 4188 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4189 4190 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4191 } 4192 4193 void intel_put_shared_dpll(struct intel_crtc *crtc) 4194 { 4195 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 4196 4197 if (pll == NULL) 4198 return; 4199 4200 if (!(pll->config.crtc_mask & (1 << crtc->pipe))) { 4201 WARN(1, "bad %s crtc mask\n", pll->name); 4202 return; 4203 } 4204 4205 pll->config.crtc_mask &= ~(1 << crtc->pipe); 4206 if (pll->config.crtc_mask == 0) { 4207 WARN_ON(pll->on); 4208 WARN_ON(pll->active); 4209 } 4210 4211 crtc->config->shared_dpll = DPLL_ID_PRIVATE; 4212 } 4213 4214 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 4215 struct intel_crtc_state *crtc_state) 4216 { 4217 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 4218 struct intel_shared_dpll *pll; 4219 enum intel_dpll_id i; 4220 4221 if (HAS_PCH_IBX(dev_priv->dev)) { 4222 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 4223 i = (enum intel_dpll_id) crtc->pipe; 4224 pll = &dev_priv->shared_dplls[i]; 4225 4226 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4227 crtc->base.base.id, pll->name); 4228 4229 WARN_ON(pll->new_config->crtc_mask); 4230 4231 goto found; 4232 } 4233 4234 if (IS_BROXTON(dev_priv->dev)) { 4235 /* PLL is attached to port in bxt */ 4236 struct intel_encoder *encoder; 4237 struct intel_digital_port *intel_dig_port; 4238 4239 encoder = intel_ddi_get_crtc_new_encoder(crtc_state); 4240 if (WARN_ON(!encoder)) 4241 return NULL; 4242 4243 intel_dig_port = enc_to_dig_port(&encoder->base); 4244 /* 1:1 mapping between ports and PLLs */ 4245 i = (enum intel_dpll_id)intel_dig_port->port; 4246 pll = &dev_priv->shared_dplls[i]; 4247 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4248 crtc->base.base.id, pll->name); 4249 WARN_ON(pll->new_config->crtc_mask); 4250 4251 goto found; 4252 } 4253 4254 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4255 pll = &dev_priv->shared_dplls[i]; 4256 4257 /* Only want to check enabled timings first */ 4258 if (pll->new_config->crtc_mask == 0) 4259 continue; 4260 4261 if (memcmp(&crtc_state->dpll_hw_state, 4262 &pll->new_config->hw_state, 4263 sizeof(pll->new_config->hw_state)) == 0) { 4264 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 4265 crtc->base.base.id, pll->name, 4266 pll->new_config->crtc_mask, 4267 pll->active); 4268 goto found; 4269 } 4270 } 4271 4272 /* Ok no matching timings, maybe there's a free one? */ 4273 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4274 pll = &dev_priv->shared_dplls[i]; 4275 if (pll->new_config->crtc_mask == 0) { 4276 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 4277 crtc->base.base.id, pll->name); 4278 goto found; 4279 } 4280 } 4281 4282 return NULL; 4283 4284 found: 4285 if (pll->new_config->crtc_mask == 0) 4286 pll->new_config->hw_state = crtc_state->dpll_hw_state; 4287 4288 crtc_state->shared_dpll = i; 4289 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 4290 pipe_name(crtc->pipe)); 4291 4292 pll->new_config->crtc_mask |= 1 << crtc->pipe; 4293 4294 return pll; 4295 } 4296 4297 /** 4298 * intel_shared_dpll_start_config - start a new PLL staged config 4299 * @dev_priv: DRM device 4300 * @clear_pipes: mask of pipes that will have their PLLs freed 4301 * 4302 * Starts a new PLL staged config, copying the current config but 4303 * releasing the references of pipes specified in clear_pipes. 4304 */ 4305 static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv, 4306 unsigned clear_pipes) 4307 { 4308 struct intel_shared_dpll *pll; 4309 enum intel_dpll_id i; 4310 4311 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4312 pll = &dev_priv->shared_dplls[i]; 4313 4314 pll->new_config = kmemdup(&pll->config, sizeof pll->config, 4315 GFP_KERNEL); 4316 if (!pll->new_config) 4317 goto cleanup; 4318 4319 pll->new_config->crtc_mask &= ~clear_pipes; 4320 } 4321 4322 return 0; 4323 4324 cleanup: 4325 while (--i >= 0) { 4326 pll = &dev_priv->shared_dplls[i]; 4327 kfree(pll->new_config); 4328 pll->new_config = NULL; 4329 } 4330 4331 return -ENOMEM; 4332 } 4333 4334 static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv) 4335 { 4336 struct intel_shared_dpll *pll; 4337 enum intel_dpll_id i; 4338 4339 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4340 pll = &dev_priv->shared_dplls[i]; 4341 4342 WARN_ON(pll->new_config == &pll->config); 4343 4344 pll->config = *pll->new_config; 4345 kfree(pll->new_config); 4346 pll->new_config = NULL; 4347 } 4348 } 4349 4350 static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv) 4351 { 4352 struct intel_shared_dpll *pll; 4353 enum intel_dpll_id i; 4354 4355 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4356 pll = &dev_priv->shared_dplls[i]; 4357 4358 WARN_ON(pll->new_config == &pll->config); 4359 4360 kfree(pll->new_config); 4361 pll->new_config = NULL; 4362 } 4363 } 4364 4365 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4366 { 4367 struct drm_i915_private *dev_priv = dev->dev_private; 4368 int dslreg = PIPEDSL(pipe); 4369 u32 temp; 4370 4371 temp = I915_READ(dslreg); 4372 udelay(500); 4373 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4374 if (wait_for(I915_READ(dslreg) != temp, 5)) 4375 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4376 } 4377 } 4378 4379 /** 4380 * skl_update_scaler_users - Stages update to crtc's scaler state 4381 * @intel_crtc: crtc 4382 * @crtc_state: crtc_state 4383 * @plane: plane (NULL indicates crtc is requesting update) 4384 * @plane_state: plane's state 4385 * @force_detach: request unconditional detachment of scaler 4386 * 4387 * This function updates scaler state for requested plane or crtc. 4388 * To request scaler usage update for a plane, caller shall pass plane pointer. 4389 * To request scaler usage update for crtc, caller shall pass plane pointer 4390 * as NULL. 4391 * 4392 * Return 4393 * 0 - scaler_usage updated successfully 4394 * error - requested scaling cannot be supported or other error condition 4395 */ 4396 int 4397 skl_update_scaler_users( 4398 struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state, 4399 struct intel_plane *intel_plane, struct intel_plane_state *plane_state, 4400 int force_detach) 4401 { 4402 int need_scaling; 4403 int idx; 4404 int src_w, src_h, dst_w, dst_h; 4405 int *scaler_id; 4406 struct drm_framebuffer *fb; 4407 struct intel_crtc_scaler_state *scaler_state; 4408 unsigned int rotation; 4409 4410 if (!intel_crtc || !crtc_state) 4411 return 0; 4412 4413 scaler_state = &crtc_state->scaler_state; 4414 4415 idx = intel_plane ? drm_plane_index(&intel_plane->base) : SKL_CRTC_INDEX; 4416 fb = intel_plane ? plane_state->base.fb : NULL; 4417 4418 if (intel_plane) { 4419 src_w = drm_rect_width(&plane_state->src) >> 16; 4420 src_h = drm_rect_height(&plane_state->src) >> 16; 4421 dst_w = drm_rect_width(&plane_state->dst); 4422 dst_h = drm_rect_height(&plane_state->dst); 4423 scaler_id = &plane_state->scaler_id; 4424 rotation = plane_state->base.rotation; 4425 } else { 4426 struct drm_display_mode *adjusted_mode = 4427 &crtc_state->base.adjusted_mode; 4428 src_w = crtc_state->pipe_src_w; 4429 src_h = crtc_state->pipe_src_h; 4430 dst_w = adjusted_mode->hdisplay; 4431 dst_h = adjusted_mode->vdisplay; 4432 scaler_id = &scaler_state->scaler_id; 4433 rotation = DRM_ROTATE_0; 4434 } 4435 4436 need_scaling = intel_rotation_90_or_270(rotation) ? 4437 (src_h != dst_w || src_w != dst_h): 4438 (src_w != dst_w || src_h != dst_h); 4439 4440 /* 4441 * if plane is being disabled or scaler is no more required or force detach 4442 * - free scaler binded to this plane/crtc 4443 * - in order to do this, update crtc->scaler_usage 4444 * 4445 * Here scaler state in crtc_state is set free so that 4446 * scaler can be assigned to other user. Actual register 4447 * update to free the scaler is done in plane/panel-fit programming. 4448 * For this purpose crtc/plane_state->scaler_id isn't reset here. 4449 */ 4450 if (force_detach || !need_scaling || (intel_plane && 4451 (!fb || !plane_state->visible))) { 4452 if (*scaler_id >= 0) { 4453 scaler_state->scaler_users &= ~(1 << idx); 4454 scaler_state->scalers[*scaler_id].in_use = 0; 4455 4456 DRM_DEBUG_KMS("Staged freeing scaler id %d.%d from %s:%d " 4457 "crtc_state = %p scaler_users = 0x%x\n", 4458 intel_crtc->pipe, *scaler_id, intel_plane ? "PLANE" : "CRTC", 4459 intel_plane ? intel_plane->base.base.id : 4460 intel_crtc->base.base.id, crtc_state, 4461 scaler_state->scaler_users); 4462 *scaler_id = -1; 4463 } 4464 return 0; 4465 } 4466 4467 /* range checks */ 4468 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H || 4469 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H || 4470 4471 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H || 4472 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) { 4473 DRM_DEBUG_KMS("%s:%d scaler_user index %u.%u: src %ux%u dst %ux%u " 4474 "size is out of scaler range\n", 4475 intel_plane ? "PLANE" : "CRTC", 4476 intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id, 4477 intel_crtc->pipe, idx, src_w, src_h, dst_w, dst_h); 4478 return -EINVAL; 4479 } 4480 4481 /* check colorkey */ 4482 if (WARN_ON(intel_plane && 4483 intel_plane->ckey.flags != I915_SET_COLORKEY_NONE)) { 4484 DRM_DEBUG_KMS("PLANE:%d scaling %ux%u->%ux%u not allowed with colorkey", 4485 intel_plane->base.base.id, src_w, src_h, dst_w, dst_h); 4486 return -EINVAL; 4487 } 4488 4489 /* Check src format */ 4490 if (intel_plane) { 4491 switch (fb->pixel_format) { 4492 case DRM_FORMAT_RGB565: 4493 case DRM_FORMAT_XBGR8888: 4494 case DRM_FORMAT_XRGB8888: 4495 case DRM_FORMAT_ABGR8888: 4496 case DRM_FORMAT_ARGB8888: 4497 case DRM_FORMAT_XRGB2101010: 4498 case DRM_FORMAT_XBGR2101010: 4499 case DRM_FORMAT_YUYV: 4500 case DRM_FORMAT_YVYU: 4501 case DRM_FORMAT_UYVY: 4502 case DRM_FORMAT_VYUY: 4503 break; 4504 default: 4505 DRM_DEBUG_KMS("PLANE:%d FB:%d unsupported scaling format 0x%x\n", 4506 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4507 return -EINVAL; 4508 } 4509 } 4510 4511 /* mark this plane as a scaler user in crtc_state */ 4512 scaler_state->scaler_users |= (1 << idx); 4513 DRM_DEBUG_KMS("%s:%d staged scaling request for %ux%u->%ux%u " 4514 "crtc_state = %p scaler_users = 0x%x\n", 4515 intel_plane ? "PLANE" : "CRTC", 4516 intel_plane ? intel_plane->base.base.id : intel_crtc->base.base.id, 4517 src_w, src_h, dst_w, dst_h, crtc_state, scaler_state->scaler_users); 4518 return 0; 4519 } 4520 4521 static void skylake_pfit_update(struct intel_crtc *crtc, int enable) 4522 { 4523 struct drm_device *dev = crtc->base.dev; 4524 struct drm_i915_private *dev_priv = dev->dev_private; 4525 int pipe = crtc->pipe; 4526 struct intel_crtc_scaler_state *scaler_state = 4527 &crtc->config->scaler_state; 4528 4529 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config); 4530 4531 /* To update pfit, first update scaler state */ 4532 skl_update_scaler_users(crtc, crtc->config, NULL, NULL, !enable); 4533 intel_atomic_setup_scalers(crtc->base.dev, crtc, crtc->config); 4534 skl_detach_scalers(crtc); 4535 if (!enable) 4536 return; 4537 4538 if (crtc->config->pch_pfit.enabled) { 4539 int id; 4540 4541 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) { 4542 DRM_ERROR("Requesting pfit without getting a scaler first\n"); 4543 return; 4544 } 4545 4546 id = scaler_state->scaler_id; 4547 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN | 4548 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode); 4549 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos); 4550 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size); 4551 4552 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id); 4553 } 4554 } 4555 4556 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4557 { 4558 struct drm_device *dev = crtc->base.dev; 4559 struct drm_i915_private *dev_priv = dev->dev_private; 4560 int pipe = crtc->pipe; 4561 4562 if (crtc->config->pch_pfit.enabled) { 4563 /* Force use of hard-coded filter coefficients 4564 * as some pre-programmed values are broken, 4565 * e.g. x201. 4566 */ 4567 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4568 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4569 PF_PIPE_SEL_IVB(pipe)); 4570 else 4571 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4572 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4573 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4574 } 4575 } 4576 4577 static void intel_enable_sprite_planes(struct drm_crtc *crtc) 4578 { 4579 struct drm_device *dev = crtc->dev; 4580 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4581 struct drm_plane *plane; 4582 struct intel_plane *intel_plane; 4583 4584 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 4585 intel_plane = to_intel_plane(plane); 4586 if (intel_plane->pipe == pipe) 4587 intel_plane_restore(&intel_plane->base); 4588 } 4589 } 4590 4591 void hsw_enable_ips(struct intel_crtc *crtc) 4592 { 4593 struct drm_device *dev = crtc->base.dev; 4594 struct drm_i915_private *dev_priv = dev->dev_private; 4595 4596 if (!crtc->config->ips_enabled) 4597 return; 4598 4599 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4600 intel_wait_for_vblank(dev, crtc->pipe); 4601 4602 assert_plane_enabled(dev_priv, crtc->plane); 4603 if (IS_BROADWELL(dev)) { 4604 mutex_lock(&dev_priv->rps.hw_lock); 4605 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4606 mutex_unlock(&dev_priv->rps.hw_lock); 4607 /* Quoting Art Runyan: "its not safe to expect any particular 4608 * value in IPS_CTL bit 31 after enabling IPS through the 4609 * mailbox." Moreover, the mailbox may return a bogus state, 4610 * so we need to just enable it and continue on. 4611 */ 4612 } else { 4613 I915_WRITE(IPS_CTL, IPS_ENABLE); 4614 /* The bit only becomes 1 in the next vblank, so this wait here 4615 * is essentially intel_wait_for_vblank. If we don't have this 4616 * and don't wait for vblanks until the end of crtc_enable, then 4617 * the HW state readout code will complain that the expected 4618 * IPS_CTL value is not the one we read. */ 4619 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4620 DRM_ERROR("Timed out waiting for IPS enable\n"); 4621 } 4622 } 4623 4624 void hsw_disable_ips(struct intel_crtc *crtc) 4625 { 4626 struct drm_device *dev = crtc->base.dev; 4627 struct drm_i915_private *dev_priv = dev->dev_private; 4628 4629 if (!crtc->config->ips_enabled) 4630 return; 4631 4632 assert_plane_enabled(dev_priv, crtc->plane); 4633 if (IS_BROADWELL(dev)) { 4634 mutex_lock(&dev_priv->rps.hw_lock); 4635 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4636 mutex_unlock(&dev_priv->rps.hw_lock); 4637 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4638 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4639 DRM_ERROR("Timed out waiting for IPS disable\n"); 4640 } else { 4641 I915_WRITE(IPS_CTL, 0); 4642 POSTING_READ(IPS_CTL); 4643 } 4644 4645 /* We need to wait for a vblank before we can disable the plane. */ 4646 intel_wait_for_vblank(dev, crtc->pipe); 4647 } 4648 4649 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4650 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4651 { 4652 struct drm_device *dev = crtc->dev; 4653 struct drm_i915_private *dev_priv = dev->dev_private; 4654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4655 enum i915_pipe pipe = intel_crtc->pipe; 4656 int palreg = PALETTE(pipe); 4657 int i; 4658 bool reenable_ips = false; 4659 4660 /* The clocks have to be on to load the palette. */ 4661 if (!crtc->state->enable || !intel_crtc->active) 4662 return; 4663 4664 if (HAS_GMCH_DISPLAY(dev_priv->dev)) { 4665 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4666 assert_dsi_pll_enabled(dev_priv); 4667 else 4668 assert_pll_enabled(dev_priv, pipe); 4669 } 4670 4671 /* use legacy palette for Ironlake */ 4672 if (!HAS_GMCH_DISPLAY(dev)) 4673 palreg = LGC_PALETTE(pipe); 4674 4675 /* Workaround : Do not read or write the pipe palette/gamma data while 4676 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4677 */ 4678 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4679 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4680 GAMMA_MODE_MODE_SPLIT)) { 4681 hsw_disable_ips(intel_crtc); 4682 reenable_ips = true; 4683 } 4684 4685 for (i = 0; i < 256; i++) { 4686 I915_WRITE(palreg + 4 * i, 4687 (intel_crtc->lut_r[i] << 16) | 4688 (intel_crtc->lut_g[i] << 8) | 4689 intel_crtc->lut_b[i]); 4690 } 4691 4692 if (reenable_ips) 4693 hsw_enable_ips(intel_crtc); 4694 } 4695 4696 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) 4697 { 4698 if (intel_crtc->overlay) { 4699 struct drm_device *dev = intel_crtc->base.dev; 4700 struct drm_i915_private *dev_priv = dev->dev_private; 4701 4702 mutex_lock(&dev->struct_mutex); 4703 dev_priv->mm.interruptible = false; 4704 (void) intel_overlay_switch_off(intel_crtc->overlay); 4705 dev_priv->mm.interruptible = true; 4706 mutex_unlock(&dev->struct_mutex); 4707 } 4708 4709 /* Let userspace switch the overlay on again. In most cases userspace 4710 * has to recompute where to put it anyway. 4711 */ 4712 } 4713 4714 /** 4715 * intel_post_enable_primary - Perform operations after enabling primary plane 4716 * @crtc: the CRTC whose primary plane was just enabled 4717 * 4718 * Performs potentially sleeping operations that must be done after the primary 4719 * plane is enabled, such as updating FBC and IPS. Note that this may be 4720 * called due to an explicit primary plane update, or due to an implicit 4721 * re-enable that is caused when a sprite plane is updated to no longer 4722 * completely hide the primary plane. 4723 */ 4724 static void 4725 intel_post_enable_primary(struct drm_crtc *crtc) 4726 { 4727 struct drm_device *dev = crtc->dev; 4728 struct drm_i915_private *dev_priv = dev->dev_private; 4729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4730 int pipe = intel_crtc->pipe; 4731 4732 /* 4733 * BDW signals flip done immediately if the plane 4734 * is disabled, even if the plane enable is already 4735 * armed to occur at the next vblank :( 4736 */ 4737 if (IS_BROADWELL(dev)) 4738 intel_wait_for_vblank(dev, pipe); 4739 4740 /* 4741 * FIXME IPS should be fine as long as one plane is 4742 * enabled, but in practice it seems to have problems 4743 * when going from primary only to sprite only and vice 4744 * versa. 4745 */ 4746 hsw_enable_ips(intel_crtc); 4747 4748 mutex_lock(&dev->struct_mutex); 4749 intel_fbc_update(dev); 4750 mutex_unlock(&dev->struct_mutex); 4751 4752 /* 4753 * Gen2 reports pipe underruns whenever all planes are disabled. 4754 * So don't enable underrun reporting before at least some planes 4755 * are enabled. 4756 * FIXME: Need to fix the logic to work when we turn off all planes 4757 * but leave the pipe running. 4758 */ 4759 if (IS_GEN2(dev)) 4760 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4761 4762 /* Underruns don't raise interrupts, so check manually. */ 4763 if (HAS_GMCH_DISPLAY(dev)) 4764 i9xx_check_fifo_underruns(dev_priv); 4765 } 4766 4767 /** 4768 * intel_pre_disable_primary - Perform operations before disabling primary plane 4769 * @crtc: the CRTC whose primary plane is to be disabled 4770 * 4771 * Performs potentially sleeping operations that must be done before the 4772 * primary plane is disabled, such as updating FBC and IPS. Note that this may 4773 * be called due to an explicit primary plane update, or due to an implicit 4774 * disable that is caused when a sprite plane completely hides the primary 4775 * plane. 4776 */ 4777 static void 4778 intel_pre_disable_primary(struct drm_crtc *crtc) 4779 { 4780 struct drm_device *dev = crtc->dev; 4781 struct drm_i915_private *dev_priv = dev->dev_private; 4782 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4783 int pipe = intel_crtc->pipe; 4784 4785 /* 4786 * Gen2 reports pipe underruns whenever all planes are disabled. 4787 * So diasble underrun reporting before all the planes get disabled. 4788 * FIXME: Need to fix the logic to work when we turn off all planes 4789 * but leave the pipe running. 4790 */ 4791 if (IS_GEN2(dev)) 4792 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 4793 4794 /* 4795 * Vblank time updates from the shadow to live plane control register 4796 * are blocked if the memory self-refresh mode is active at that 4797 * moment. So to make sure the plane gets truly disabled, disable 4798 * first the self-refresh mode. The self-refresh enable bit in turn 4799 * will be checked/applied by the HW only at the next frame start 4800 * event which is after the vblank start event, so we need to have a 4801 * wait-for-vblank between disabling the plane and the pipe. 4802 */ 4803 if (HAS_GMCH_DISPLAY(dev)) 4804 intel_set_memory_cxsr(dev_priv, false); 4805 4806 mutex_lock(&dev->struct_mutex); 4807 if (dev_priv->fbc.crtc == intel_crtc) 4808 intel_fbc_disable(dev); 4809 mutex_unlock(&dev->struct_mutex); 4810 4811 /* 4812 * FIXME IPS should be fine as long as one plane is 4813 * enabled, but in practice it seems to have problems 4814 * when going from primary only to sprite only and vice 4815 * versa. 4816 */ 4817 hsw_disable_ips(intel_crtc); 4818 } 4819 4820 static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4821 { 4822 struct drm_device *dev = crtc->dev; 4823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4824 int pipe = intel_crtc->pipe; 4825 4826 intel_enable_primary_hw_plane(crtc->primary, crtc); 4827 intel_enable_sprite_planes(crtc); 4828 intel_crtc_update_cursor(crtc, true); 4829 4830 intel_post_enable_primary(crtc); 4831 4832 /* 4833 * FIXME: Once we grow proper nuclear flip support out of this we need 4834 * to compute the mask of flip planes precisely. For the time being 4835 * consider this a flip to a NULL plane. 4836 */ 4837 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4838 } 4839 4840 static void intel_crtc_disable_planes(struct drm_crtc *crtc) 4841 { 4842 struct drm_device *dev = crtc->dev; 4843 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4844 struct intel_plane *intel_plane; 4845 int pipe = intel_crtc->pipe; 4846 4847 if (!intel_crtc->active) 4848 return; 4849 4850 intel_crtc_wait_for_pending_flips(crtc); 4851 4852 intel_pre_disable_primary(crtc); 4853 4854 intel_crtc_dpms_overlay_disable(intel_crtc); 4855 for_each_intel_plane(dev, intel_plane) { 4856 if (intel_plane->pipe == pipe) { 4857 struct drm_crtc *from = intel_plane->base.crtc; 4858 4859 intel_plane->disable_plane(&intel_plane->base, 4860 from ?: crtc, true); 4861 } 4862 } 4863 4864 /* 4865 * FIXME: Once we grow proper nuclear flip support out of this we need 4866 * to compute the mask of flip planes precisely. For the time being 4867 * consider this a flip to a NULL plane. 4868 */ 4869 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4870 } 4871 4872 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4873 { 4874 struct drm_device *dev = crtc->dev; 4875 struct drm_i915_private *dev_priv = dev->dev_private; 4876 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4877 struct intel_encoder *encoder; 4878 int pipe = intel_crtc->pipe; 4879 4880 WARN_ON(!crtc->state->enable); 4881 4882 if (intel_crtc->active) 4883 return; 4884 4885 if (intel_crtc->config->has_pch_encoder) 4886 intel_prepare_shared_dpll(intel_crtc); 4887 4888 if (intel_crtc->config->has_dp_encoder) 4889 intel_dp_set_m_n(intel_crtc, M1_N1); 4890 4891 intel_set_pipe_timings(intel_crtc); 4892 4893 if (intel_crtc->config->has_pch_encoder) { 4894 intel_cpu_transcoder_set_m_n(intel_crtc, 4895 &intel_crtc->config->fdi_m_n, NULL); 4896 } 4897 4898 ironlake_set_pipeconf(crtc); 4899 4900 intel_crtc->active = true; 4901 4902 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4903 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4904 4905 for_each_encoder_on_crtc(dev, crtc, encoder) 4906 if (encoder->pre_enable) 4907 encoder->pre_enable(encoder); 4908 4909 if (intel_crtc->config->has_pch_encoder) { 4910 /* Note: FDI PLL enabling _must_ be done before we enable the 4911 * cpu pipes, hence this is separate from all the other fdi/pch 4912 * enabling. */ 4913 ironlake_fdi_pll_enable(intel_crtc); 4914 } else { 4915 assert_fdi_tx_disabled(dev_priv, pipe); 4916 assert_fdi_rx_disabled(dev_priv, pipe); 4917 } 4918 4919 ironlake_pfit_enable(intel_crtc); 4920 4921 /* 4922 * On ILK+ LUT must be loaded before the pipe is running but with 4923 * clocks enabled 4924 */ 4925 intel_crtc_load_lut(crtc); 4926 4927 intel_update_watermarks(crtc); 4928 intel_enable_pipe(intel_crtc); 4929 4930 if (intel_crtc->config->has_pch_encoder) 4931 ironlake_pch_enable(crtc); 4932 4933 assert_vblank_disabled(crtc); 4934 drm_crtc_vblank_on(crtc); 4935 4936 for_each_encoder_on_crtc(dev, crtc, encoder) 4937 encoder->enable(encoder); 4938 4939 if (HAS_PCH_CPT(dev)) 4940 cpt_verify_modeset(dev, intel_crtc->pipe); 4941 } 4942 4943 /* IPS only exists on ULT machines and is tied to pipe A. */ 4944 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4945 { 4946 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4947 } 4948 4949 /* 4950 * This implements the workaround described in the "notes" section of the mode 4951 * set sequence documentation. When going from no pipes or single pipe to 4952 * multiple pipes, and planes are enabled after the pipe, we need to wait at 4953 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 4954 */ 4955 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) 4956 { 4957 struct drm_device *dev = crtc->base.dev; 4958 struct intel_crtc *crtc_it, *other_active_crtc = NULL; 4959 4960 /* We want to get the other_active_crtc only if there's only 1 other 4961 * active crtc. */ 4962 for_each_intel_crtc(dev, crtc_it) { 4963 if (!crtc_it->active || crtc_it == crtc) 4964 continue; 4965 4966 if (other_active_crtc) 4967 return; 4968 4969 other_active_crtc = crtc_it; 4970 } 4971 if (!other_active_crtc) 4972 return; 4973 4974 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4975 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4976 } 4977 4978 static void haswell_crtc_enable(struct drm_crtc *crtc) 4979 { 4980 struct drm_device *dev = crtc->dev; 4981 struct drm_i915_private *dev_priv = dev->dev_private; 4982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4983 struct intel_encoder *encoder; 4984 int pipe = intel_crtc->pipe; 4985 4986 WARN_ON(!crtc->state->enable); 4987 4988 if (intel_crtc->active) 4989 return; 4990 4991 if (intel_crtc_to_shared_dpll(intel_crtc)) 4992 intel_enable_shared_dpll(intel_crtc); 4993 4994 if (intel_crtc->config->has_dp_encoder) 4995 intel_dp_set_m_n(intel_crtc, M1_N1); 4996 4997 intel_set_pipe_timings(intel_crtc); 4998 4999 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 5000 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 5001 intel_crtc->config->pixel_multiplier - 1); 5002 } 5003 5004 if (intel_crtc->config->has_pch_encoder) { 5005 intel_cpu_transcoder_set_m_n(intel_crtc, 5006 &intel_crtc->config->fdi_m_n, NULL); 5007 } 5008 5009 haswell_set_pipeconf(crtc); 5010 5011 intel_set_pipe_csc(crtc); 5012 5013 intel_crtc->active = true; 5014 5015 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5016 for_each_encoder_on_crtc(dev, crtc, encoder) 5017 if (encoder->pre_enable) 5018 encoder->pre_enable(encoder); 5019 5020 if (intel_crtc->config->has_pch_encoder) { 5021 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5022 true); 5023 dev_priv->display.fdi_link_train(crtc); 5024 } 5025 5026 intel_ddi_enable_pipe_clock(intel_crtc); 5027 5028 if (INTEL_INFO(dev)->gen == 9) 5029 skylake_pfit_update(intel_crtc, 1); 5030 else if (INTEL_INFO(dev)->gen < 9) 5031 ironlake_pfit_enable(intel_crtc); 5032 else 5033 MISSING_CASE(INTEL_INFO(dev)->gen); 5034 5035 /* 5036 * On ILK+ LUT must be loaded before the pipe is running but with 5037 * clocks enabled 5038 */ 5039 intel_crtc_load_lut(crtc); 5040 5041 intel_ddi_set_pipe_settings(crtc); 5042 intel_ddi_enable_transcoder_func(crtc); 5043 5044 intel_update_watermarks(crtc); 5045 intel_enable_pipe(intel_crtc); 5046 5047 if (intel_crtc->config->has_pch_encoder) 5048 lpt_pch_enable(crtc); 5049 5050 if (intel_crtc->config->dp_encoder_is_mst) 5051 intel_ddi_set_vc_payload_alloc(crtc, true); 5052 5053 assert_vblank_disabled(crtc); 5054 drm_crtc_vblank_on(crtc); 5055 5056 for_each_encoder_on_crtc(dev, crtc, encoder) { 5057 encoder->enable(encoder); 5058 intel_opregion_notify_encoder(encoder, true); 5059 } 5060 5061 /* If we change the relative order between pipe/planes enabling, we need 5062 * to change the workaround. */ 5063 haswell_mode_set_planes_workaround(intel_crtc); 5064 } 5065 5066 static void ironlake_pfit_disable(struct intel_crtc *crtc) 5067 { 5068 struct drm_device *dev = crtc->base.dev; 5069 struct drm_i915_private *dev_priv = dev->dev_private; 5070 int pipe = crtc->pipe; 5071 5072 /* To avoid upsetting the power well on haswell only disable the pfit if 5073 * it's in use. The hw state code will make sure we get this right. */ 5074 if (crtc->config->pch_pfit.enabled) { 5075 I915_WRITE(PF_CTL(pipe), 0); 5076 I915_WRITE(PF_WIN_POS(pipe), 0); 5077 I915_WRITE(PF_WIN_SZ(pipe), 0); 5078 } 5079 } 5080 5081 static void ironlake_crtc_disable(struct drm_crtc *crtc) 5082 { 5083 struct drm_device *dev = crtc->dev; 5084 struct drm_i915_private *dev_priv = dev->dev_private; 5085 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5086 struct intel_encoder *encoder; 5087 int pipe = intel_crtc->pipe; 5088 u32 reg, temp; 5089 5090 if (!intel_crtc->active) 5091 return; 5092 5093 for_each_encoder_on_crtc(dev, crtc, encoder) 5094 encoder->disable(encoder); 5095 5096 drm_crtc_vblank_off(crtc); 5097 assert_vblank_disabled(crtc); 5098 5099 if (intel_crtc->config->has_pch_encoder) 5100 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 5101 5102 intel_disable_pipe(intel_crtc); 5103 5104 ironlake_pfit_disable(intel_crtc); 5105 5106 if (intel_crtc->config->has_pch_encoder) 5107 ironlake_fdi_disable(crtc); 5108 5109 for_each_encoder_on_crtc(dev, crtc, encoder) 5110 if (encoder->post_disable) 5111 encoder->post_disable(encoder); 5112 5113 if (intel_crtc->config->has_pch_encoder) { 5114 ironlake_disable_pch_transcoder(dev_priv, pipe); 5115 5116 if (HAS_PCH_CPT(dev)) { 5117 /* disable TRANS_DP_CTL */ 5118 reg = TRANS_DP_CTL(pipe); 5119 temp = I915_READ(reg); 5120 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 5121 TRANS_DP_PORT_SEL_MASK); 5122 temp |= TRANS_DP_PORT_SEL_NONE; 5123 I915_WRITE(reg, temp); 5124 5125 /* disable DPLL_SEL */ 5126 temp = I915_READ(PCH_DPLL_SEL); 5127 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 5128 I915_WRITE(PCH_DPLL_SEL, temp); 5129 } 5130 5131 /* disable PCH DPLL */ 5132 intel_disable_shared_dpll(intel_crtc); 5133 5134 ironlake_fdi_pll_disable(intel_crtc); 5135 } 5136 5137 intel_crtc->active = false; 5138 intel_update_watermarks(crtc); 5139 5140 mutex_lock(&dev->struct_mutex); 5141 intel_fbc_update(dev); 5142 mutex_unlock(&dev->struct_mutex); 5143 } 5144 5145 static void haswell_crtc_disable(struct drm_crtc *crtc) 5146 { 5147 struct drm_device *dev = crtc->dev; 5148 struct drm_i915_private *dev_priv = dev->dev_private; 5149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5150 struct intel_encoder *encoder; 5151 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5152 5153 if (!intel_crtc->active) 5154 return; 5155 5156 for_each_encoder_on_crtc(dev, crtc, encoder) { 5157 intel_opregion_notify_encoder(encoder, false); 5158 encoder->disable(encoder); 5159 } 5160 5161 drm_crtc_vblank_off(crtc); 5162 assert_vblank_disabled(crtc); 5163 5164 if (intel_crtc->config->has_pch_encoder) 5165 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 5166 false); 5167 intel_disable_pipe(intel_crtc); 5168 5169 if (intel_crtc->config->dp_encoder_is_mst) 5170 intel_ddi_set_vc_payload_alloc(crtc, false); 5171 5172 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5173 5174 if (INTEL_INFO(dev)->gen == 9) 5175 skylake_pfit_update(intel_crtc, 0); 5176 else if (INTEL_INFO(dev)->gen < 9) 5177 ironlake_pfit_disable(intel_crtc); 5178 else 5179 MISSING_CASE(INTEL_INFO(dev)->gen); 5180 5181 intel_ddi_disable_pipe_clock(intel_crtc); 5182 5183 if (intel_crtc->config->has_pch_encoder) { 5184 lpt_disable_pch_transcoder(dev_priv); 5185 intel_ddi_fdi_disable(crtc); 5186 } 5187 5188 for_each_encoder_on_crtc(dev, crtc, encoder) 5189 if (encoder->post_disable) 5190 encoder->post_disable(encoder); 5191 5192 intel_crtc->active = false; 5193 intel_update_watermarks(crtc); 5194 5195 mutex_lock(&dev->struct_mutex); 5196 intel_fbc_update(dev); 5197 mutex_unlock(&dev->struct_mutex); 5198 5199 if (intel_crtc_to_shared_dpll(intel_crtc)) 5200 intel_disable_shared_dpll(intel_crtc); 5201 } 5202 5203 static void ironlake_crtc_off(struct drm_crtc *crtc) 5204 { 5205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5206 intel_put_shared_dpll(intel_crtc); 5207 } 5208 5209 5210 static void i9xx_pfit_enable(struct intel_crtc *crtc) 5211 { 5212 struct drm_device *dev = crtc->base.dev; 5213 struct drm_i915_private *dev_priv = dev->dev_private; 5214 struct intel_crtc_state *pipe_config = crtc->config; 5215 5216 if (!pipe_config->gmch_pfit.control) 5217 return; 5218 5219 /* 5220 * The panel fitter should only be adjusted whilst the pipe is disabled, 5221 * according to register description and PRM. 5222 */ 5223 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 5224 assert_pipe_disabled(dev_priv, crtc->pipe); 5225 5226 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 5227 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 5228 5229 /* Border color in case we don't scale up to the full screen. Black by 5230 * default, change to something else for debugging. */ 5231 I915_WRITE(BCLRPAT(crtc->pipe), 0); 5232 } 5233 5234 static enum intel_display_power_domain port_to_power_domain(enum port port) 5235 { 5236 switch (port) { 5237 case PORT_A: 5238 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 5239 case PORT_B: 5240 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 5241 case PORT_C: 5242 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 5243 case PORT_D: 5244 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 5245 default: 5246 WARN_ON_ONCE(1); 5247 return POWER_DOMAIN_PORT_OTHER; 5248 } 5249 } 5250 5251 #define for_each_power_domain(domain, mask) \ 5252 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 5253 if ((1 << (domain)) & (mask)) 5254 5255 enum intel_display_power_domain 5256 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 5257 { 5258 struct drm_device *dev = intel_encoder->base.dev; 5259 struct intel_digital_port *intel_dig_port; 5260 5261 switch (intel_encoder->type) { 5262 case INTEL_OUTPUT_UNKNOWN: 5263 /* Only DDI platforms should ever use this output type */ 5264 WARN_ON_ONCE(!HAS_DDI(dev)); 5265 case INTEL_OUTPUT_DISPLAYPORT: 5266 case INTEL_OUTPUT_HDMI: 5267 case INTEL_OUTPUT_EDP: 5268 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5269 return port_to_power_domain(intel_dig_port->port); 5270 case INTEL_OUTPUT_ANALOG: 5271 return POWER_DOMAIN_PORT_CRT; 5272 case INTEL_OUTPUT_DSI: 5273 return POWER_DOMAIN_PORT_DSI; 5274 default: 5275 return POWER_DOMAIN_PORT_OTHER; 5276 } 5277 } 5278 5279 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 5280 { 5281 struct drm_device *dev = crtc->dev; 5282 struct intel_encoder *intel_encoder; 5283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5284 enum i915_pipe pipe = intel_crtc->pipe; 5285 unsigned long mask; 5286 enum transcoder transcoder; 5287 5288 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 5289 5290 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 5291 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 5292 if (intel_crtc->config->pch_pfit.enabled || 5293 intel_crtc->config->pch_pfit.force_thru) 5294 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 5295 5296 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5297 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 5298 5299 return mask; 5300 } 5301 5302 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) 5303 { 5304 struct drm_device *dev = state->dev; 5305 struct drm_i915_private *dev_priv = dev->dev_private; 5306 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 5307 struct intel_crtc *crtc; 5308 5309 /* 5310 * First get all needed power domains, then put all unneeded, to avoid 5311 * any unnecessary toggling of the power wells. 5312 */ 5313 for_each_intel_crtc(dev, crtc) { 5314 enum intel_display_power_domain domain; 5315 5316 if (!crtc->base.state->enable) 5317 continue; 5318 5319 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 5320 5321 for_each_power_domain(domain, pipe_domains[crtc->pipe]) 5322 intel_display_power_get(dev_priv, domain); 5323 } 5324 5325 if (dev_priv->display.modeset_global_resources) 5326 dev_priv->display.modeset_global_resources(state); 5327 5328 for_each_intel_crtc(dev, crtc) { 5329 enum intel_display_power_domain domain; 5330 5331 for_each_power_domain(domain, crtc->enabled_power_domains) 5332 intel_display_power_put(dev_priv, domain); 5333 5334 crtc->enabled_power_domains = pipe_domains[crtc->pipe]; 5335 } 5336 5337 intel_display_set_init_power(dev_priv, false); 5338 } 5339 5340 void broxton_set_cdclk(struct drm_device *dev, int frequency) 5341 { 5342 struct drm_i915_private *dev_priv = dev->dev_private; 5343 uint32_t divider; 5344 uint32_t ratio; 5345 uint32_t current_freq; 5346 int ret; 5347 5348 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5349 switch (frequency) { 5350 case 144000: 5351 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5352 ratio = BXT_DE_PLL_RATIO(60); 5353 break; 5354 case 288000: 5355 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5356 ratio = BXT_DE_PLL_RATIO(60); 5357 break; 5358 case 384000: 5359 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5360 ratio = BXT_DE_PLL_RATIO(60); 5361 break; 5362 case 576000: 5363 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5364 ratio = BXT_DE_PLL_RATIO(60); 5365 break; 5366 case 624000: 5367 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5368 ratio = BXT_DE_PLL_RATIO(65); 5369 break; 5370 case 19200: 5371 /* 5372 * Bypass frequency with DE PLL disabled. Init ratio, divider 5373 * to suppress GCC warning. 5374 */ 5375 ratio = 0; 5376 divider = 0; 5377 break; 5378 default: 5379 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5380 5381 return; 5382 } 5383 5384 mutex_lock(&dev_priv->rps.hw_lock); 5385 /* Inform power controller of upcoming frequency change */ 5386 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5387 0x80000000); 5388 mutex_unlock(&dev_priv->rps.hw_lock); 5389 5390 if (ret) { 5391 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5392 ret, frequency); 5393 return; 5394 } 5395 5396 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5397 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5398 current_freq = current_freq * 500 + 1000; 5399 5400 /* 5401 * DE PLL has to be disabled when 5402 * - setting to 19.2MHz (bypass, PLL isn't used) 5403 * - before setting to 624MHz (PLL needs toggling) 5404 * - before setting to any frequency from 624MHz (PLL needs toggling) 5405 */ 5406 if (frequency == 19200 || frequency == 624000 || 5407 current_freq == 624000) { 5408 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE); 5409 /* Timeout 200us */ 5410 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK), 5411 1)) 5412 DRM_ERROR("timout waiting for DE PLL unlock\n"); 5413 } 5414 5415 if (frequency != 19200) { 5416 uint32_t val; 5417 5418 val = I915_READ(BXT_DE_PLL_CTL); 5419 val &= ~BXT_DE_PLL_RATIO_MASK; 5420 val |= ratio; 5421 I915_WRITE(BXT_DE_PLL_CTL, val); 5422 5423 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE); 5424 /* Timeout 200us */ 5425 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1)) 5426 DRM_ERROR("timeout waiting for DE PLL lock\n"); 5427 5428 val = I915_READ(CDCLK_CTL); 5429 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK; 5430 val |= divider; 5431 /* 5432 * Disable SSA Precharge when CD clock frequency < 500 MHz, 5433 * enable otherwise. 5434 */ 5435 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5436 if (frequency >= 500000) 5437 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; 5438 5439 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5440 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5441 val |= (frequency - 1000) / 500; 5442 I915_WRITE(CDCLK_CTL, val); 5443 } 5444 5445 mutex_lock(&dev_priv->rps.hw_lock); 5446 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5447 DIV_ROUND_UP(frequency, 25000)); 5448 mutex_unlock(&dev_priv->rps.hw_lock); 5449 5450 if (ret) { 5451 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5452 ret, frequency); 5453 return; 5454 } 5455 5456 dev_priv->cdclk_freq = frequency; 5457 } 5458 5459 void broxton_init_cdclk(struct drm_device *dev) 5460 { 5461 struct drm_i915_private *dev_priv = dev->dev_private; 5462 uint32_t val; 5463 5464 /* 5465 * NDE_RSTWRN_OPT RST PCH Handshake En must always be 0b on BXT 5466 * or else the reset will hang because there is no PCH to respond. 5467 * Move the handshake programming to initialization sequence. 5468 * Previously was left up to BIOS. 5469 */ 5470 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5471 val &= ~RESET_PCH_HANDSHAKE_ENABLE; 5472 I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 5473 5474 /* Enable PG1 for cdclk */ 5475 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5476 5477 /* check if cd clock is enabled */ 5478 if (I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE) { 5479 DRM_DEBUG_KMS("Display already initialized\n"); 5480 return; 5481 } 5482 5483 /* 5484 * FIXME: 5485 * - The initial CDCLK needs to be read from VBT. 5486 * Need to make this change after VBT has changes for BXT. 5487 * - check if setting the max (or any) cdclk freq is really necessary 5488 * here, it belongs to modeset time 5489 */ 5490 broxton_set_cdclk(dev, 624000); 5491 5492 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5493 POSTING_READ(DBUF_CTL); 5494 5495 udelay(10); 5496 5497 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5498 DRM_ERROR("DBuf power enable timeout!\n"); 5499 } 5500 5501 void broxton_uninit_cdclk(struct drm_device *dev) 5502 { 5503 struct drm_i915_private *dev_priv = dev->dev_private; 5504 5505 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5506 POSTING_READ(DBUF_CTL); 5507 5508 udelay(10); 5509 5510 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5511 DRM_ERROR("DBuf power disable timeout!\n"); 5512 5513 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5514 broxton_set_cdclk(dev, 19200); 5515 5516 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5517 } 5518 5519 static const struct skl_cdclk_entry { 5520 unsigned int freq; 5521 unsigned int vco; 5522 } skl_cdclk_frequencies[] = { 5523 { .freq = 308570, .vco = 8640 }, 5524 { .freq = 337500, .vco = 8100 }, 5525 { .freq = 432000, .vco = 8640 }, 5526 { .freq = 450000, .vco = 8100 }, 5527 { .freq = 540000, .vco = 8100 }, 5528 { .freq = 617140, .vco = 8640 }, 5529 { .freq = 675000, .vco = 8100 }, 5530 }; 5531 5532 static unsigned int skl_cdclk_decimal(unsigned int freq) 5533 { 5534 return (freq - 1000) / 500; 5535 } 5536 5537 static unsigned int skl_cdclk_get_vco(unsigned int freq) 5538 { 5539 unsigned int i; 5540 5541 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) { 5542 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i]; 5543 5544 if (e->freq == freq) 5545 return e->vco; 5546 } 5547 5548 return 8100; 5549 } 5550 5551 static void 5552 skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5553 { 5554 unsigned int min_freq; 5555 u32 val; 5556 5557 /* select the minimum CDCLK before enabling DPLL 0 */ 5558 val = I915_READ(CDCLK_CTL); 5559 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK; 5560 val |= CDCLK_FREQ_337_308; 5561 5562 if (required_vco == 8640) 5563 min_freq = 308570; 5564 else 5565 min_freq = 337500; 5566 5567 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq); 5568 5569 I915_WRITE(CDCLK_CTL, val); 5570 POSTING_READ(CDCLK_CTL); 5571 5572 /* 5573 * We always enable DPLL0 with the lowest link rate possible, but still 5574 * taking into account the VCO required to operate the eDP panel at the 5575 * desired frequency. The usual DP link rates operate with a VCO of 5576 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5577 * The modeset code is responsible for the selection of the exact link 5578 * rate later on, with the constraint of choosing a frequency that 5579 * works with required_vco. 5580 */ 5581 val = I915_READ(DPLL_CTRL1); 5582 5583 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5584 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5585 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5586 if (required_vco == 8640) 5587 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5588 SKL_DPLL0); 5589 else 5590 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 5591 SKL_DPLL0); 5592 5593 I915_WRITE(DPLL_CTRL1, val); 5594 POSTING_READ(DPLL_CTRL1); 5595 5596 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5597 5598 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5599 DRM_ERROR("DPLL0 not locked\n"); 5600 } 5601 5602 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5603 { 5604 int ret; 5605 u32 val; 5606 5607 /* inform PCU we want to change CDCLK */ 5608 val = SKL_CDCLK_PREPARE_FOR_CHANGE; 5609 mutex_lock(&dev_priv->rps.hw_lock); 5610 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 5611 mutex_unlock(&dev_priv->rps.hw_lock); 5612 5613 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 5614 } 5615 5616 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 5617 { 5618 unsigned int i; 5619 5620 for (i = 0; i < 15; i++) { 5621 if (skl_cdclk_pcu_ready(dev_priv)) 5622 return true; 5623 udelay(10); 5624 } 5625 5626 return false; 5627 } 5628 5629 static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5630 { 5631 u32 freq_select, pcu_ack; 5632 5633 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5634 5635 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5636 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5637 return; 5638 } 5639 5640 /* set CDCLK_CTL */ 5641 switch(freq) { 5642 case 450000: 5643 case 432000: 5644 freq_select = CDCLK_FREQ_450_432; 5645 pcu_ack = 1; 5646 break; 5647 case 540000: 5648 freq_select = CDCLK_FREQ_540; 5649 pcu_ack = 2; 5650 break; 5651 case 308570: 5652 case 337500: 5653 default: 5654 freq_select = CDCLK_FREQ_337_308; 5655 pcu_ack = 0; 5656 break; 5657 case 617140: 5658 case 675000: 5659 freq_select = CDCLK_FREQ_675_617; 5660 pcu_ack = 3; 5661 break; 5662 } 5663 5664 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5665 POSTING_READ(CDCLK_CTL); 5666 5667 /* inform PCU of the change */ 5668 mutex_lock(&dev_priv->rps.hw_lock); 5669 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack); 5670 mutex_unlock(&dev_priv->rps.hw_lock); 5671 } 5672 5673 void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5674 { 5675 /* disable DBUF power */ 5676 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5677 POSTING_READ(DBUF_CTL); 5678 5679 udelay(10); 5680 5681 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5682 DRM_ERROR("DBuf power disable timeout\n"); 5683 5684 /* disable DPLL0 */ 5685 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5686 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5687 DRM_ERROR("Couldn't disable DPLL0\n"); 5688 5689 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5690 } 5691 5692 void skl_init_cdclk(struct drm_i915_private *dev_priv) 5693 { 5694 u32 val; 5695 unsigned int required_vco; 5696 5697 /* enable PCH reset handshake */ 5698 val = I915_READ(HSW_NDE_RSTWRN_OPT); 5699 I915_WRITE(HSW_NDE_RSTWRN_OPT, val | RESET_PCH_HANDSHAKE_ENABLE); 5700 5701 /* enable PG1 and Misc I/O */ 5702 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 5703 5704 /* DPLL0 already enabed !? */ 5705 if (I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE) { 5706 DRM_DEBUG_DRIVER("DPLL0 already running\n"); 5707 return; 5708 } 5709 5710 /* enable DPLL0 */ 5711 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk); 5712 skl_dpll0_enable(dev_priv, required_vco); 5713 5714 /* set CDCLK to the frequency the BIOS chose */ 5715 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5716 5717 /* enable DBUF power */ 5718 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5719 POSTING_READ(DBUF_CTL); 5720 5721 udelay(10); 5722 5723 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5724 DRM_ERROR("DBuf power enable timeout\n"); 5725 } 5726 5727 /* returns HPLL frequency in kHz */ 5728 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 5729 { 5730 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 5731 5732 /* Obtain SKU information */ 5733 mutex_lock(&dev_priv->sb_lock); 5734 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 5735 CCK_FUSE_HPLL_FREQ_MASK; 5736 mutex_unlock(&dev_priv->sb_lock); 5737 5738 return vco_freq[hpll_freq] * 1000; 5739 } 5740 5741 static void vlv_update_cdclk(struct drm_device *dev) 5742 { 5743 struct drm_i915_private *dev_priv = dev->dev_private; 5744 5745 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5746 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5747 dev_priv->cdclk_freq); 5748 5749 /* 5750 * Program the gmbus_freq based on the cdclk frequency. 5751 * BSpec erroneously claims we should aim for 4MHz, but 5752 * in fact 1MHz is the correct frequency. 5753 */ 5754 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5755 } 5756 5757 /* Adjust CDclk dividers to allow high res or save power if possible */ 5758 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5759 { 5760 struct drm_i915_private *dev_priv = dev->dev_private; 5761 u32 val, cmd; 5762 5763 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5764 != dev_priv->cdclk_freq); 5765 5766 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5767 cmd = 2; 5768 else if (cdclk == 266667) 5769 cmd = 1; 5770 else 5771 cmd = 0; 5772 5773 mutex_lock(&dev_priv->rps.hw_lock); 5774 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5775 val &= ~DSPFREQGUAR_MASK; 5776 val |= (cmd << DSPFREQGUAR_SHIFT); 5777 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5778 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5779 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5780 50)) { 5781 DRM_ERROR("timed out waiting for CDclk change\n"); 5782 } 5783 mutex_unlock(&dev_priv->rps.hw_lock); 5784 5785 mutex_lock(&dev_priv->sb_lock); 5786 5787 if (cdclk == 400000) { 5788 u32 divider; 5789 5790 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5791 5792 /* adjust cdclk divider */ 5793 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5794 val &= ~DISPLAY_FREQUENCY_VALUES; 5795 val |= divider; 5796 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5797 5798 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5799 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5800 50)) 5801 DRM_ERROR("timed out waiting for CDclk change\n"); 5802 } 5803 5804 /* adjust self-refresh exit latency value */ 5805 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5806 val &= ~0x7f; 5807 5808 /* 5809 * For high bandwidth configs, we set a higher latency in the bunit 5810 * so that the core display fetch happens in time to avoid underruns. 5811 */ 5812 if (cdclk == 400000) 5813 val |= 4500 / 250; /* 4.5 usec */ 5814 else 5815 val |= 3000 / 250; /* 3.0 usec */ 5816 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5817 5818 mutex_unlock(&dev_priv->sb_lock); 5819 5820 vlv_update_cdclk(dev); 5821 } 5822 5823 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5824 { 5825 struct drm_i915_private *dev_priv = dev->dev_private; 5826 u32 val, cmd; 5827 5828 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5829 != dev_priv->cdclk_freq); 5830 5831 switch (cdclk) { 5832 case 333333: 5833 case 320000: 5834 case 266667: 5835 case 200000: 5836 break; 5837 default: 5838 MISSING_CASE(cdclk); 5839 return; 5840 } 5841 5842 /* 5843 * Specs are full of misinformation, but testing on actual 5844 * hardware has shown that we just need to write the desired 5845 * CCK divider into the Punit register. 5846 */ 5847 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5848 5849 mutex_lock(&dev_priv->rps.hw_lock); 5850 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5851 val &= ~DSPFREQGUAR_MASK_CHV; 5852 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 5853 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5854 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5855 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 5856 50)) { 5857 DRM_ERROR("timed out waiting for CDclk change\n"); 5858 } 5859 mutex_unlock(&dev_priv->rps.hw_lock); 5860 5861 vlv_update_cdclk(dev); 5862 } 5863 5864 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 5865 int max_pixclk) 5866 { 5867 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 5868 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 5869 5870 /* 5871 * Really only a few cases to deal with, as only 4 CDclks are supported: 5872 * 200MHz 5873 * 267MHz 5874 * 320/333MHz (depends on HPLL freq) 5875 * 400MHz (VLV only) 5876 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 5877 * of the lower bin and adjust if needed. 5878 * 5879 * We seem to get an unstable or solid color picture at 200MHz. 5880 * Not sure what's wrong. For now use 200MHz only when all pipes 5881 * are off. 5882 */ 5883 if (!IS_CHERRYVIEW(dev_priv) && 5884 max_pixclk > freq_320*limit/100) 5885 return 400000; 5886 else if (max_pixclk > 266667*limit/100) 5887 return freq_320; 5888 else if (max_pixclk > 0) 5889 return 266667; 5890 else 5891 return 200000; 5892 } 5893 5894 static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5895 int max_pixclk) 5896 { 5897 /* 5898 * FIXME: 5899 * - remove the guardband, it's not needed on BXT 5900 * - set 19.2MHz bypass frequency if there are no active pipes 5901 */ 5902 if (max_pixclk > 576000*9/10) 5903 return 624000; 5904 else if (max_pixclk > 384000*9/10) 5905 return 576000; 5906 else if (max_pixclk > 288000*9/10) 5907 return 384000; 5908 else if (max_pixclk > 144000*9/10) 5909 return 288000; 5910 else 5911 return 144000; 5912 } 5913 5914 /* Compute the max pixel clock for new configuration. Uses atomic state if 5915 * that's non-NULL, look at current state otherwise. */ 5916 static int intel_mode_max_pixclk(struct drm_device *dev, 5917 struct drm_atomic_state *state) 5918 { 5919 struct intel_crtc *intel_crtc; 5920 struct intel_crtc_state *crtc_state; 5921 int max_pixclk = 0; 5922 5923 for_each_intel_crtc(dev, intel_crtc) { 5924 if (state) 5925 crtc_state = 5926 intel_atomic_get_crtc_state(state, intel_crtc); 5927 else 5928 crtc_state = intel_crtc->config; 5929 if (IS_ERR(crtc_state)) 5930 return PTR_ERR(crtc_state); 5931 5932 if (!crtc_state->base.enable) 5933 continue; 5934 5935 max_pixclk = max(max_pixclk, 5936 crtc_state->base.adjusted_mode.crtc_clock); 5937 } 5938 5939 return max_pixclk; 5940 } 5941 5942 static int valleyview_modeset_global_pipes(struct drm_atomic_state *state) 5943 { 5944 struct drm_i915_private *dev_priv = to_i915(state->dev); 5945 struct drm_crtc *crtc; 5946 struct drm_crtc_state *crtc_state; 5947 int max_pixclk = intel_mode_max_pixclk(state->dev, state); 5948 int cdclk, i; 5949 5950 if (max_pixclk < 0) 5951 return max_pixclk; 5952 5953 if (IS_VALLEYVIEW(dev_priv)) 5954 cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 5955 else 5956 cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); 5957 5958 if (cdclk == dev_priv->cdclk_freq) 5959 return 0; 5960 5961 /* add all active pipes to the state */ 5962 for_each_crtc(state->dev, crtc) { 5963 if (!crtc->state->enable) 5964 continue; 5965 5966 crtc_state = drm_atomic_get_crtc_state(state, crtc); 5967 if (IS_ERR(crtc_state)) 5968 return PTR_ERR(crtc_state); 5969 } 5970 5971 /* disable/enable all currently active pipes while we change cdclk */ 5972 for_each_crtc_in_state(state, crtc, crtc_state, i) 5973 if (crtc_state->enable) 5974 crtc_state->mode_changed = true; 5975 5976 return 0; 5977 } 5978 5979 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 5980 { 5981 unsigned int credits, default_credits; 5982 5983 if (IS_CHERRYVIEW(dev_priv)) 5984 default_credits = PFI_CREDIT(12); 5985 else 5986 default_credits = PFI_CREDIT(8); 5987 5988 if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { 5989 /* CHV suggested value is 31 or 63 */ 5990 if (IS_CHERRYVIEW(dev_priv)) 5991 credits = PFI_CREDIT_31; 5992 else 5993 credits = PFI_CREDIT(15); 5994 } else { 5995 credits = default_credits; 5996 } 5997 5998 /* 5999 * WA - write default credits before re-programming 6000 * FIXME: should we also set the resend bit here? 6001 */ 6002 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6003 default_credits); 6004 6005 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 6006 credits | PFI_CREDIT_RESEND); 6007 6008 /* 6009 * FIXME is this guaranteed to clear 6010 * immediately or should we poll for it? 6011 */ 6012 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 6013 } 6014 6015 static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state) 6016 { 6017 struct drm_device *dev = old_state->dev; 6018 struct drm_i915_private *dev_priv = dev->dev_private; 6019 int max_pixclk = intel_mode_max_pixclk(dev, NULL); 6020 int req_cdclk; 6021 6022 /* The path in intel_mode_max_pixclk() with a NULL atomic state should 6023 * never fail. */ 6024 if (WARN_ON(max_pixclk < 0)) 6025 return; 6026 6027 req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 6028 6029 if (req_cdclk != dev_priv->cdclk_freq) { 6030 /* 6031 * FIXME: We can end up here with all power domains off, yet 6032 * with a CDCLK frequency other than the minimum. To account 6033 * for this take the PIPE-A power domain, which covers the HW 6034 * blocks needed for the following programming. This can be 6035 * removed once it's guaranteed that we get here either with 6036 * the minimum CDCLK set, or the required power domains 6037 * enabled. 6038 */ 6039 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 6040 6041 if (IS_CHERRYVIEW(dev)) 6042 cherryview_set_cdclk(dev, req_cdclk); 6043 else 6044 valleyview_set_cdclk(dev, req_cdclk); 6045 6046 vlv_program_pfi_credits(dev_priv); 6047 6048 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 6049 } 6050 } 6051 6052 static void valleyview_crtc_enable(struct drm_crtc *crtc) 6053 { 6054 struct drm_device *dev = crtc->dev; 6055 struct drm_i915_private *dev_priv = to_i915(dev); 6056 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6057 struct intel_encoder *encoder; 6058 int pipe = intel_crtc->pipe; 6059 bool is_dsi; 6060 6061 WARN_ON(!crtc->state->enable); 6062 6063 if (intel_crtc->active) 6064 return; 6065 6066 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 6067 6068 if (!is_dsi) { 6069 if (IS_CHERRYVIEW(dev)) 6070 chv_prepare_pll(intel_crtc, intel_crtc->config); 6071 else 6072 vlv_prepare_pll(intel_crtc, intel_crtc->config); 6073 } 6074 6075 if (intel_crtc->config->has_dp_encoder) 6076 intel_dp_set_m_n(intel_crtc, M1_N1); 6077 6078 intel_set_pipe_timings(intel_crtc); 6079 6080 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6081 struct drm_i915_private *dev_priv = dev->dev_private; 6082 6083 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6084 I915_WRITE(CHV_CANVAS(pipe), 0); 6085 } 6086 6087 i9xx_set_pipeconf(intel_crtc); 6088 6089 intel_crtc->active = true; 6090 6091 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6092 6093 for_each_encoder_on_crtc(dev, crtc, encoder) 6094 if (encoder->pre_pll_enable) 6095 encoder->pre_pll_enable(encoder); 6096 6097 if (!is_dsi) { 6098 if (IS_CHERRYVIEW(dev)) 6099 chv_enable_pll(intel_crtc, intel_crtc->config); 6100 else 6101 vlv_enable_pll(intel_crtc, intel_crtc->config); 6102 } 6103 6104 for_each_encoder_on_crtc(dev, crtc, encoder) 6105 if (encoder->pre_enable) 6106 encoder->pre_enable(encoder); 6107 6108 i9xx_pfit_enable(intel_crtc); 6109 6110 intel_crtc_load_lut(crtc); 6111 6112 intel_update_watermarks(crtc); 6113 intel_enable_pipe(intel_crtc); 6114 6115 assert_vblank_disabled(crtc); 6116 drm_crtc_vblank_on(crtc); 6117 6118 for_each_encoder_on_crtc(dev, crtc, encoder) 6119 encoder->enable(encoder); 6120 } 6121 6122 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6123 { 6124 struct drm_device *dev = crtc->base.dev; 6125 struct drm_i915_private *dev_priv = dev->dev_private; 6126 6127 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6128 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6129 } 6130 6131 static void i9xx_crtc_enable(struct drm_crtc *crtc) 6132 { 6133 struct drm_device *dev = crtc->dev; 6134 struct drm_i915_private *dev_priv = to_i915(dev); 6135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6136 struct intel_encoder *encoder; 6137 int pipe = intel_crtc->pipe; 6138 6139 WARN_ON(!crtc->state->enable); 6140 6141 if (intel_crtc->active) 6142 return; 6143 6144 i9xx_set_pll_dividers(intel_crtc); 6145 6146 if (intel_crtc->config->has_dp_encoder) 6147 intel_dp_set_m_n(intel_crtc, M1_N1); 6148 6149 intel_set_pipe_timings(intel_crtc); 6150 6151 i9xx_set_pipeconf(intel_crtc); 6152 6153 intel_crtc->active = true; 6154 6155 if (!IS_GEN2(dev)) 6156 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 6157 6158 for_each_encoder_on_crtc(dev, crtc, encoder) 6159 if (encoder->pre_enable) 6160 encoder->pre_enable(encoder); 6161 6162 i9xx_enable_pll(intel_crtc); 6163 6164 i9xx_pfit_enable(intel_crtc); 6165 6166 intel_crtc_load_lut(crtc); 6167 6168 intel_update_watermarks(crtc); 6169 intel_enable_pipe(intel_crtc); 6170 6171 assert_vblank_disabled(crtc); 6172 drm_crtc_vblank_on(crtc); 6173 6174 for_each_encoder_on_crtc(dev, crtc, encoder) 6175 encoder->enable(encoder); 6176 } 6177 6178 static void i9xx_pfit_disable(struct intel_crtc *crtc) 6179 { 6180 struct drm_device *dev = crtc->base.dev; 6181 struct drm_i915_private *dev_priv = dev->dev_private; 6182 6183 if (!crtc->config->gmch_pfit.control) 6184 return; 6185 6186 assert_pipe_disabled(dev_priv, crtc->pipe); 6187 6188 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 6189 I915_READ(PFIT_CONTROL)); 6190 I915_WRITE(PFIT_CONTROL, 0); 6191 } 6192 6193 static void i9xx_crtc_disable(struct drm_crtc *crtc) 6194 { 6195 struct drm_device *dev = crtc->dev; 6196 struct drm_i915_private *dev_priv = dev->dev_private; 6197 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6198 struct intel_encoder *encoder; 6199 int pipe = intel_crtc->pipe; 6200 6201 if (!intel_crtc->active) 6202 return; 6203 6204 /* 6205 * On gen2 planes are double buffered but the pipe isn't, so we must 6206 * wait for planes to fully turn off before disabling the pipe. 6207 * We also need to wait on all gmch platforms because of the 6208 * self-refresh mode constraint explained above. 6209 */ 6210 intel_wait_for_vblank(dev, pipe); 6211 6212 for_each_encoder_on_crtc(dev, crtc, encoder) 6213 encoder->disable(encoder); 6214 6215 drm_crtc_vblank_off(crtc); 6216 assert_vblank_disabled(crtc); 6217 6218 intel_disable_pipe(intel_crtc); 6219 6220 i9xx_pfit_disable(intel_crtc); 6221 6222 for_each_encoder_on_crtc(dev, crtc, encoder) 6223 if (encoder->post_disable) 6224 encoder->post_disable(encoder); 6225 6226 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 6227 if (IS_CHERRYVIEW(dev)) 6228 chv_disable_pll(dev_priv, pipe); 6229 else if (IS_VALLEYVIEW(dev)) 6230 vlv_disable_pll(dev_priv, pipe); 6231 else 6232 i9xx_disable_pll(intel_crtc); 6233 } 6234 6235 if (!IS_GEN2(dev)) 6236 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6237 6238 intel_crtc->active = false; 6239 intel_update_watermarks(crtc); 6240 6241 mutex_lock(&dev->struct_mutex); 6242 intel_fbc_update(dev); 6243 mutex_unlock(&dev->struct_mutex); 6244 } 6245 6246 static void i9xx_crtc_off(struct drm_crtc *crtc) 6247 { 6248 } 6249 6250 /* Master function to enable/disable CRTC and corresponding power wells */ 6251 void intel_crtc_control(struct drm_crtc *crtc, bool enable) 6252 { 6253 struct drm_device *dev = crtc->dev; 6254 struct drm_i915_private *dev_priv = dev->dev_private; 6255 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6256 enum intel_display_power_domain domain; 6257 unsigned long domains; 6258 6259 if (enable) { 6260 if (!intel_crtc->active) { 6261 domains = get_crtc_power_domains(crtc); 6262 for_each_power_domain(domain, domains) 6263 intel_display_power_get(dev_priv, domain); 6264 intel_crtc->enabled_power_domains = domains; 6265 6266 dev_priv->display.crtc_enable(crtc); 6267 intel_crtc_enable_planes(crtc); 6268 } 6269 } else { 6270 if (intel_crtc->active) { 6271 intel_crtc_disable_planes(crtc); 6272 dev_priv->display.crtc_disable(crtc); 6273 6274 domains = intel_crtc->enabled_power_domains; 6275 for_each_power_domain(domain, domains) 6276 intel_display_power_put(dev_priv, domain); 6277 intel_crtc->enabled_power_domains = 0; 6278 } 6279 } 6280 6281 update_state_fb(intel_crtc->base.primary); 6282 } 6283 6284 /** 6285 * Sets the power management mode of the pipe and plane. 6286 */ 6287 void intel_crtc_update_dpms(struct drm_crtc *crtc) 6288 { 6289 struct drm_device *dev = crtc->dev; 6290 struct intel_encoder *intel_encoder; 6291 bool enable = false; 6292 6293 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 6294 enable |= intel_encoder->connectors_active; 6295 6296 intel_crtc_control(crtc, enable); 6297 6298 crtc->state->active = enable; 6299 } 6300 6301 static void intel_crtc_disable(struct drm_crtc *crtc) 6302 { 6303 struct drm_device *dev = crtc->dev; 6304 struct drm_connector *connector; 6305 struct drm_i915_private *dev_priv = dev->dev_private; 6306 6307 intel_crtc_disable_planes(crtc); 6308 dev_priv->display.crtc_disable(crtc); 6309 dev_priv->display.off(crtc); 6310 6311 drm_plane_helper_disable(crtc->primary); 6312 6313 /* Update computed state. */ 6314 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 6315 if (!connector->encoder || !connector->encoder->crtc) 6316 continue; 6317 6318 if (connector->encoder->crtc != crtc) 6319 continue; 6320 6321 connector->dpms = DRM_MODE_DPMS_OFF; 6322 to_intel_encoder(connector->encoder)->connectors_active = false; 6323 } 6324 } 6325 6326 void intel_encoder_destroy(struct drm_encoder *encoder) 6327 { 6328 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 6329 6330 drm_encoder_cleanup(encoder); 6331 kfree(intel_encoder); 6332 } 6333 6334 /* Simple dpms helper for encoders with just one connector, no cloning and only 6335 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 6336 * state of the entire output pipe. */ 6337 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 6338 { 6339 if (mode == DRM_MODE_DPMS_ON) { 6340 encoder->connectors_active = true; 6341 6342 intel_crtc_update_dpms(encoder->base.crtc); 6343 } else { 6344 encoder->connectors_active = false; 6345 6346 intel_crtc_update_dpms(encoder->base.crtc); 6347 } 6348 } 6349 6350 /* Cross check the actual hw state with our own modeset state tracking (and it's 6351 * internal consistency). */ 6352 static void intel_connector_check_state(struct intel_connector *connector) 6353 { 6354 if (connector->get_hw_state(connector)) { 6355 struct intel_encoder *encoder = connector->encoder; 6356 struct drm_crtc *crtc; 6357 bool encoder_enabled; 6358 enum i915_pipe pipe; 6359 6360 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 6361 connector->base.base.id, 6362 connector->base.name); 6363 6364 /* there is no real hw state for MST connectors */ 6365 if (connector->mst_port) 6366 return; 6367 6368 I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 6369 "wrong connector dpms state\n"); 6370 I915_STATE_WARN(connector->base.encoder != &encoder->base, 6371 "active connector not linked to encoder\n"); 6372 6373 if (encoder) { 6374 I915_STATE_WARN(!encoder->connectors_active, 6375 "encoder->connectors_active not set\n"); 6376 6377 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 6378 I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n"); 6379 if (I915_STATE_WARN_ON(!encoder->base.crtc)) 6380 return; 6381 6382 crtc = encoder->base.crtc; 6383 6384 I915_STATE_WARN(!crtc->state->enable, 6385 "crtc not enabled\n"); 6386 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 6387 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, 6388 "encoder active on the wrong pipe\n"); 6389 } 6390 } 6391 } 6392 6393 int intel_connector_init(struct intel_connector *connector) 6394 { 6395 struct drm_connector_state *connector_state; 6396 6397 connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); 6398 if (!connector_state) 6399 return -ENOMEM; 6400 6401 connector->base.state = connector_state; 6402 return 0; 6403 } 6404 6405 struct intel_connector *intel_connector_alloc(void) 6406 { 6407 struct intel_connector *connector; 6408 6409 connector = kzalloc(sizeof *connector, GFP_KERNEL); 6410 if (!connector) 6411 return NULL; 6412 6413 if (intel_connector_init(connector) < 0) { 6414 kfree(connector); 6415 return NULL; 6416 } 6417 6418 return connector; 6419 } 6420 6421 /* Even simpler default implementation, if there's really no special case to 6422 * consider. */ 6423 void intel_connector_dpms(struct drm_connector *connector, int mode) 6424 { 6425 /* All the simple cases only support two dpms states. */ 6426 if (mode != DRM_MODE_DPMS_ON) 6427 mode = DRM_MODE_DPMS_OFF; 6428 6429 if (mode == connector->dpms) 6430 return; 6431 6432 connector->dpms = mode; 6433 6434 /* Only need to change hw state when actually enabled */ 6435 if (connector->encoder) 6436 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); 6437 6438 intel_modeset_check_state(connector->dev); 6439 } 6440 6441 /* Simple connector->get_hw_state implementation for encoders that support only 6442 * one connector and no cloning and hence the encoder state determines the state 6443 * of the connector. */ 6444 bool intel_connector_get_hw_state(struct intel_connector *connector) 6445 { 6446 enum i915_pipe pipe = 0; 6447 struct intel_encoder *encoder = connector->encoder; 6448 6449 return encoder->get_hw_state(encoder, &pipe); 6450 } 6451 6452 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state) 6453 { 6454 if (crtc_state->base.enable && crtc_state->has_pch_encoder) 6455 return crtc_state->fdi_lanes; 6456 6457 return 0; 6458 } 6459 6460 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 6461 struct intel_crtc_state *pipe_config) 6462 { 6463 struct drm_atomic_state *state = pipe_config->base.state; 6464 struct intel_crtc *other_crtc; 6465 struct intel_crtc_state *other_crtc_state; 6466 6467 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 6468 pipe_name(pipe), pipe_config->fdi_lanes); 6469 if (pipe_config->fdi_lanes > 4) { 6470 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 6471 pipe_name(pipe), pipe_config->fdi_lanes); 6472 return -EINVAL; 6473 } 6474 6475 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 6476 if (pipe_config->fdi_lanes > 2) { 6477 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 6478 pipe_config->fdi_lanes); 6479 return -EINVAL; 6480 } else { 6481 return 0; 6482 } 6483 } 6484 6485 if (INTEL_INFO(dev)->num_pipes == 2) 6486 return 0; 6487 6488 /* Ivybridge 3 pipe is really complicated */ 6489 switch (pipe) { 6490 case PIPE_A: 6491 return 0; 6492 case PIPE_B: 6493 if (pipe_config->fdi_lanes <= 2) 6494 return 0; 6495 6496 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C)); 6497 other_crtc_state = 6498 intel_atomic_get_crtc_state(state, other_crtc); 6499 if (IS_ERR(other_crtc_state)) 6500 return PTR_ERR(other_crtc_state); 6501 6502 if (pipe_required_fdi_lanes(other_crtc_state) > 0) { 6503 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 6504 pipe_name(pipe), pipe_config->fdi_lanes); 6505 return -EINVAL; 6506 } 6507 return 0; 6508 case PIPE_C: 6509 if (pipe_config->fdi_lanes > 2) { 6510 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 6511 pipe_name(pipe), pipe_config->fdi_lanes); 6512 return -EINVAL; 6513 } 6514 6515 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B)); 6516 other_crtc_state = 6517 intel_atomic_get_crtc_state(state, other_crtc); 6518 if (IS_ERR(other_crtc_state)) 6519 return PTR_ERR(other_crtc_state); 6520 6521 if (pipe_required_fdi_lanes(other_crtc_state) > 2) { 6522 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 6523 return -EINVAL; 6524 } 6525 return 0; 6526 default: 6527 BUG(); 6528 } 6529 } 6530 6531 #define RETRY 1 6532 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 6533 struct intel_crtc_state *pipe_config) 6534 { 6535 struct drm_device *dev = intel_crtc->base.dev; 6536 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6537 int lane, link_bw, fdi_dotclock, ret; 6538 bool needs_recompute = false; 6539 6540 retry: 6541 /* FDI is a binary signal running at ~2.7GHz, encoding 6542 * each output octet as 10 bits. The actual frequency 6543 * is stored as a divider into a 100MHz clock, and the 6544 * mode pixel clock is stored in units of 1KHz. 6545 * Hence the bw of each lane in terms of the mode signal 6546 * is: 6547 */ 6548 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 6549 6550 fdi_dotclock = adjusted_mode->crtc_clock; 6551 6552 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 6553 pipe_config->pipe_bpp); 6554 6555 pipe_config->fdi_lanes = lane; 6556 6557 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 6558 link_bw, &pipe_config->fdi_m_n); 6559 6560 ret = ironlake_check_fdi_lanes(intel_crtc->base.dev, 6561 intel_crtc->pipe, pipe_config); 6562 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { 6563 pipe_config->pipe_bpp -= 2*3; 6564 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 6565 pipe_config->pipe_bpp); 6566 needs_recompute = true; 6567 pipe_config->bw_constrained = true; 6568 6569 goto retry; 6570 } 6571 6572 if (needs_recompute) 6573 return RETRY; 6574 6575 return ret; 6576 } 6577 6578 static void hsw_compute_ips_config(struct intel_crtc *crtc, 6579 struct intel_crtc_state *pipe_config) 6580 { 6581 pipe_config->ips_enabled = i915.enable_ips && 6582 hsw_crtc_supports_ips(crtc) && 6583 pipe_config->pipe_bpp <= 24; 6584 } 6585 6586 static int intel_crtc_compute_config(struct intel_crtc *crtc, 6587 struct intel_crtc_state *pipe_config) 6588 { 6589 struct drm_device *dev = crtc->base.dev; 6590 struct drm_i915_private *dev_priv = dev->dev_private; 6591 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6592 int ret; 6593 6594 /* FIXME should check pixel clock limits on all platforms */ 6595 if (INTEL_INFO(dev)->gen < 4) { 6596 int clock_limit = 6597 dev_priv->display.get_display_clock_speed(dev); 6598 6599 /* 6600 * Enable pixel doubling when the dot clock 6601 * is > 90% of the (display) core speed. 6602 * 6603 * GDG double wide on either pipe, 6604 * otherwise pipe A only. 6605 */ 6606 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 6607 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 6608 clock_limit *= 2; 6609 pipe_config->double_wide = true; 6610 } 6611 6612 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 6613 return -EINVAL; 6614 } 6615 6616 /* 6617 * Pipe horizontal size must be even in: 6618 * - DVO ganged mode 6619 * - LVDS dual channel mode 6620 * - Double wide pipe 6621 */ 6622 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6623 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6624 pipe_config->pipe_src_w &= ~1; 6625 6626 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 6627 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6628 */ 6629 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6630 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 6631 return -EINVAL; 6632 6633 if (HAS_IPS(dev)) 6634 hsw_compute_ips_config(crtc, pipe_config); 6635 6636 if (pipe_config->has_pch_encoder) 6637 return ironlake_fdi_compute_config(crtc, pipe_config); 6638 6639 /* FIXME: remove below call once atomic mode set is place and all crtc 6640 * related checks called from atomic_crtc_check function */ 6641 ret = 0; 6642 DRM_DEBUG_KMS("intel_crtc = %p drm_state (pipe_config->base.state) = %p\n", 6643 crtc, pipe_config->base.state); 6644 ret = intel_atomic_setup_scalers(dev, crtc, pipe_config); 6645 6646 return ret; 6647 } 6648 6649 static int skylake_get_display_clock_speed(struct drm_device *dev) 6650 { 6651 struct drm_i915_private *dev_priv = to_i915(dev); 6652 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6653 uint32_t cdctl = I915_READ(CDCLK_CTL); 6654 uint32_t linkrate; 6655 6656 if (!(lcpll1 & LCPLL_PLL_ENABLE)) { 6657 WARN(1, "LCPLL1 not enabled\n"); 6658 return 24000; /* 24MHz is the cd freq with NSSC ref */ 6659 } 6660 6661 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6662 return 540000; 6663 6664 linkrate = (I915_READ(DPLL_CTRL1) & 6665 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1; 6666 6667 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6668 linkrate == DPLL_CTRL1_LINK_RATE_1080) { 6669 /* vco 8640 */ 6670 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6671 case CDCLK_FREQ_450_432: 6672 return 432000; 6673 case CDCLK_FREQ_337_308: 6674 return 308570; 6675 case CDCLK_FREQ_675_617: 6676 return 617140; 6677 default: 6678 WARN(1, "Unknown cd freq selection\n"); 6679 } 6680 } else { 6681 /* vco 8100 */ 6682 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6683 case CDCLK_FREQ_450_432: 6684 return 450000; 6685 case CDCLK_FREQ_337_308: 6686 return 337500; 6687 case CDCLK_FREQ_675_617: 6688 return 675000; 6689 default: 6690 WARN(1, "Unknown cd freq selection\n"); 6691 } 6692 } 6693 6694 /* error case, do as if DPLL0 isn't enabled */ 6695 return 24000; 6696 } 6697 6698 static int broadwell_get_display_clock_speed(struct drm_device *dev) 6699 { 6700 struct drm_i915_private *dev_priv = dev->dev_private; 6701 uint32_t lcpll = I915_READ(LCPLL_CTL); 6702 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6703 6704 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6705 return 800000; 6706 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6707 return 450000; 6708 else if (freq == LCPLL_CLK_FREQ_450) 6709 return 450000; 6710 else if (freq == LCPLL_CLK_FREQ_54O_BDW) 6711 return 540000; 6712 else if (freq == LCPLL_CLK_FREQ_337_5_BDW) 6713 return 337500; 6714 else 6715 return 675000; 6716 } 6717 6718 static int haswell_get_display_clock_speed(struct drm_device *dev) 6719 { 6720 struct drm_i915_private *dev_priv = dev->dev_private; 6721 uint32_t lcpll = I915_READ(LCPLL_CTL); 6722 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6723 6724 if (lcpll & LCPLL_CD_SOURCE_FCLK) 6725 return 800000; 6726 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT) 6727 return 450000; 6728 else if (freq == LCPLL_CLK_FREQ_450) 6729 return 450000; 6730 else if (IS_HSW_ULT(dev)) 6731 return 337500; 6732 else 6733 return 540000; 6734 } 6735 6736 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6737 { 6738 struct drm_i915_private *dev_priv = dev->dev_private; 6739 u32 val; 6740 int divider; 6741 6742 if (dev_priv->hpll_freq == 0) 6743 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 6744 6745 mutex_lock(&dev_priv->sb_lock); 6746 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 6747 mutex_unlock(&dev_priv->sb_lock); 6748 6749 divider = val & DISPLAY_FREQUENCY_VALUES; 6750 6751 WARN((val & DISPLAY_FREQUENCY_STATUS) != 6752 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 6753 "cdclk change in progress\n"); 6754 6755 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 6756 } 6757 6758 static int ilk_get_display_clock_speed(struct drm_device *dev) 6759 { 6760 return 450000; 6761 } 6762 6763 static int i945_get_display_clock_speed(struct drm_device *dev) 6764 { 6765 return 400000; 6766 } 6767 6768 static int i915_get_display_clock_speed(struct drm_device *dev) 6769 { 6770 return 333333; 6771 } 6772 6773 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 6774 { 6775 return 200000; 6776 } 6777 6778 static int pnv_get_display_clock_speed(struct drm_device *dev) 6779 { 6780 u16 gcfgc = 0; 6781 6782 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6783 6784 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6785 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 6786 return 266667; 6787 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 6788 return 333333; 6789 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 6790 return 444444; 6791 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 6792 return 200000; 6793 default: 6794 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 6795 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 6796 return 133333; 6797 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 6798 return 166667; 6799 } 6800 } 6801 6802 static int i915gm_get_display_clock_speed(struct drm_device *dev) 6803 { 6804 u16 gcfgc = 0; 6805 6806 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 6807 6808 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 6809 return 133333; 6810 else { 6811 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 6812 case GC_DISPLAY_CLOCK_333_MHZ: 6813 return 333333; 6814 default: 6815 case GC_DISPLAY_CLOCK_190_200_MHZ: 6816 return 190000; 6817 } 6818 } 6819 } 6820 6821 static int i865_get_display_clock_speed(struct drm_device *dev) 6822 { 6823 return 266667; 6824 } 6825 6826 static int i855_get_display_clock_speed(struct drm_device *dev) 6827 { 6828 u16 hpllcc = 0; 6829 /* Assume that the hardware is in the high speed state. This 6830 * should be the default. 6831 */ 6832 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 6833 case GC_CLOCK_133_200: 6834 case GC_CLOCK_100_200: 6835 return 200000; 6836 case GC_CLOCK_166_250: 6837 return 250000; 6838 case GC_CLOCK_100_133: 6839 return 133333; 6840 } 6841 6842 /* Shouldn't happen */ 6843 return 0; 6844 } 6845 6846 static int i830_get_display_clock_speed(struct drm_device *dev) 6847 { 6848 return 133333; 6849 } 6850 6851 static void 6852 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 6853 { 6854 while (*num > DATA_LINK_M_N_MASK || 6855 *den > DATA_LINK_M_N_MASK) { 6856 *num >>= 1; 6857 *den >>= 1; 6858 } 6859 } 6860 6861 static void compute_m_n(unsigned int m, unsigned int n, 6862 uint32_t *ret_m, uint32_t *ret_n) 6863 { 6864 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 6865 *ret_m = div_u64((uint64_t) m * *ret_n, n); 6866 intel_reduce_m_n_ratio(ret_m, ret_n); 6867 } 6868 6869 void 6870 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 6871 int pixel_clock, int link_clock, 6872 struct intel_link_m_n *m_n) 6873 { 6874 m_n->tu = 64; 6875 6876 compute_m_n(bits_per_pixel * pixel_clock, 6877 link_clock * nlanes * 8, 6878 &m_n->gmch_m, &m_n->gmch_n); 6879 6880 compute_m_n(pixel_clock, link_clock, 6881 &m_n->link_m, &m_n->link_n); 6882 } 6883 6884 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6885 { 6886 if (i915.panel_use_ssc >= 0) 6887 return i915.panel_use_ssc != 0; 6888 return dev_priv->vbt.lvds_use_ssc 6889 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6890 } 6891 6892 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 6893 int num_connectors) 6894 { 6895 struct drm_device *dev = crtc_state->base.crtc->dev; 6896 struct drm_i915_private *dev_priv = dev->dev_private; 6897 int refclk; 6898 6899 WARN_ON(!crtc_state->base.state); 6900 6901 if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { 6902 refclk = 100000; 6903 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6904 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 6905 refclk = dev_priv->vbt.lvds_ssc_freq; 6906 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 6907 } else if (!IS_GEN2(dev)) { 6908 refclk = 96000; 6909 } else { 6910 refclk = 48000; 6911 } 6912 6913 return refclk; 6914 } 6915 6916 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6917 { 6918 return (1 << dpll->n) << 16 | dpll->m2; 6919 } 6920 6921 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6922 { 6923 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6924 } 6925 6926 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6927 struct intel_crtc_state *crtc_state, 6928 intel_clock_t *reduced_clock) 6929 { 6930 struct drm_device *dev = crtc->base.dev; 6931 u32 fp, fp2 = 0; 6932 6933 if (IS_PINEVIEW(dev)) { 6934 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6935 if (reduced_clock) 6936 fp2 = pnv_dpll_compute_fp(reduced_clock); 6937 } else { 6938 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6939 if (reduced_clock) 6940 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6941 } 6942 6943 crtc_state->dpll_hw_state.fp0 = fp; 6944 6945 crtc->lowfreq_avail = false; 6946 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6947 reduced_clock) { 6948 crtc_state->dpll_hw_state.fp1 = fp2; 6949 crtc->lowfreq_avail = true; 6950 } else { 6951 crtc_state->dpll_hw_state.fp1 = fp; 6952 } 6953 } 6954 6955 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 6956 pipe) 6957 { 6958 u32 reg_val; 6959 6960 /* 6961 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6962 * and set it to a reasonable value instead. 6963 */ 6964 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6965 reg_val &= 0xffffff00; 6966 reg_val |= 0x00000030; 6967 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6968 6969 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6970 reg_val &= 0x8cffffff; 6971 reg_val = 0x8c000000; 6972 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6973 6974 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6975 reg_val &= 0xffffff00; 6976 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6977 6978 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6979 reg_val &= 0x00ffffff; 6980 reg_val |= 0xb0000000; 6981 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6982 } 6983 6984 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6985 struct intel_link_m_n *m_n) 6986 { 6987 struct drm_device *dev = crtc->base.dev; 6988 struct drm_i915_private *dev_priv = dev->dev_private; 6989 int pipe = crtc->pipe; 6990 6991 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6992 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6993 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6994 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6995 } 6996 6997 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6998 struct intel_link_m_n *m_n, 6999 struct intel_link_m_n *m2_n2) 7000 { 7001 struct drm_device *dev = crtc->base.dev; 7002 struct drm_i915_private *dev_priv = dev->dev_private; 7003 int pipe = crtc->pipe; 7004 enum transcoder transcoder = crtc->config->cpu_transcoder; 7005 7006 if (INTEL_INFO(dev)->gen >= 5) { 7007 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 7008 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 7009 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 7010 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 7011 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 7012 * for gen < 8) and if DRRS is supported (to make sure the 7013 * registers are not unnecessarily accessed). 7014 */ 7015 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 7016 crtc->config->has_drrs) { 7017 I915_WRITE(PIPE_DATA_M2(transcoder), 7018 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 7019 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 7020 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 7021 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 7022 } 7023 } else { 7024 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7025 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 7026 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 7027 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 7028 } 7029 } 7030 7031 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 7032 { 7033 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 7034 7035 if (m_n == M1_N1) { 7036 dp_m_n = &crtc->config->dp_m_n; 7037 dp_m2_n2 = &crtc->config->dp_m2_n2; 7038 } else if (m_n == M2_N2) { 7039 7040 /* 7041 * M2_N2 registers are not supported. Hence m2_n2 divider value 7042 * needs to be programmed into M1_N1. 7043 */ 7044 dp_m_n = &crtc->config->dp_m2_n2; 7045 } else { 7046 DRM_ERROR("Unsupported divider value\n"); 7047 return; 7048 } 7049 7050 if (crtc->config->has_pch_encoder) 7051 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 7052 else 7053 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 7054 } 7055 7056 static void vlv_update_pll(struct intel_crtc *crtc, 7057 struct intel_crtc_state *pipe_config) 7058 { 7059 u32 dpll, dpll_md; 7060 7061 /* 7062 * Enable DPIO clock input. We should never disable the reference 7063 * clock for pipe B, since VGA hotplug / manual detection depends 7064 * on it. 7065 */ 7066 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 7067 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 7068 /* We should never disable this, set it here for state tracking */ 7069 if (crtc->pipe == PIPE_B) 7070 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7071 dpll |= DPLL_VCO_ENABLE; 7072 pipe_config->dpll_hw_state.dpll = dpll; 7073 7074 dpll_md = (pipe_config->pixel_multiplier - 1) 7075 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7076 pipe_config->dpll_hw_state.dpll_md = dpll_md; 7077 } 7078 7079 static void vlv_prepare_pll(struct intel_crtc *crtc, 7080 const struct intel_crtc_state *pipe_config) 7081 { 7082 struct drm_device *dev = crtc->base.dev; 7083 struct drm_i915_private *dev_priv = dev->dev_private; 7084 int pipe = crtc->pipe; 7085 u32 mdiv; 7086 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7087 u32 coreclk, reg_val; 7088 7089 mutex_lock(&dev_priv->sb_lock); 7090 7091 bestn = pipe_config->dpll.n; 7092 bestm1 = pipe_config->dpll.m1; 7093 bestm2 = pipe_config->dpll.m2; 7094 bestp1 = pipe_config->dpll.p1; 7095 bestp2 = pipe_config->dpll.p2; 7096 7097 /* See eDP HDMI DPIO driver vbios notes doc */ 7098 7099 /* PLL B needs special handling */ 7100 if (pipe == PIPE_B) 7101 vlv_pllb_recal_opamp(dev_priv, pipe); 7102 7103 /* Set up Tx target for periodic Rcomp update */ 7104 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 7105 7106 /* Disable target IRef on PLL */ 7107 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 7108 reg_val &= 0x00ffffff; 7109 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 7110 7111 /* Disable fast lock */ 7112 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 7113 7114 /* Set idtafcrecal before PLL is enabled */ 7115 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 7116 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 7117 mdiv |= ((bestn << DPIO_N_SHIFT)); 7118 mdiv |= (1 << DPIO_K_SHIFT); 7119 7120 /* 7121 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 7122 * but we don't support that). 7123 * Note: don't use the DAC post divider as it seems unstable. 7124 */ 7125 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 7126 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7127 7128 mdiv |= DPIO_ENABLE_CALIBRATION; 7129 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 7130 7131 /* Set HBR and RBR LPF coefficients */ 7132 if (pipe_config->port_clock == 162000 || 7133 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7134 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7135 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7136 0x009f0003); 7137 else 7138 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7139 0x00d0000f); 7140 7141 if (pipe_config->has_dp_encoder) { 7142 /* Use SSC source */ 7143 if (pipe == PIPE_A) 7144 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7145 0x0df40000); 7146 else 7147 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7148 0x0df70000); 7149 } else { /* HDMI or VGA */ 7150 /* Use bend source */ 7151 if (pipe == PIPE_A) 7152 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7153 0x0df70000); 7154 else 7155 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7156 0x0df40000); 7157 } 7158 7159 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7160 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7161 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7162 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 7163 coreclk |= 0x01000000; 7164 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7165 7166 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 7167 mutex_unlock(&dev_priv->sb_lock); 7168 } 7169 7170 static void chv_update_pll(struct intel_crtc *crtc, 7171 struct intel_crtc_state *pipe_config) 7172 { 7173 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 7174 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 7175 DPLL_VCO_ENABLE; 7176 if (crtc->pipe != PIPE_A) 7177 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7178 7179 pipe_config->dpll_hw_state.dpll_md = 7180 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7181 } 7182 7183 static void chv_prepare_pll(struct intel_crtc *crtc, 7184 const struct intel_crtc_state *pipe_config) 7185 { 7186 struct drm_device *dev = crtc->base.dev; 7187 struct drm_i915_private *dev_priv = dev->dev_private; 7188 int pipe = crtc->pipe; 7189 int dpll_reg = DPLL(crtc->pipe); 7190 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7191 u32 loopfilter, tribuf_calcntr; 7192 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 7193 u32 dpio_val; 7194 int vco; 7195 7196 bestn = pipe_config->dpll.n; 7197 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 7198 bestm1 = pipe_config->dpll.m1; 7199 bestm2 = pipe_config->dpll.m2 >> 22; 7200 bestp1 = pipe_config->dpll.p1; 7201 bestp2 = pipe_config->dpll.p2; 7202 vco = pipe_config->dpll.vco; 7203 dpio_val = 0; 7204 loopfilter = 0; 7205 7206 /* 7207 * Enable Refclk and SSC 7208 */ 7209 I915_WRITE(dpll_reg, 7210 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 7211 7212 mutex_lock(&dev_priv->sb_lock); 7213 7214 /* p1 and p2 divider */ 7215 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 7216 5 << DPIO_CHV_S1_DIV_SHIFT | 7217 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 7218 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 7219 1 << DPIO_CHV_K_DIV_SHIFT); 7220 7221 /* Feedback post-divider - m2 */ 7222 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 7223 7224 /* Feedback refclk divider - n and m1 */ 7225 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 7226 DPIO_CHV_M1_DIV_BY_2 | 7227 1 << DPIO_CHV_N_DIV_SHIFT); 7228 7229 /* M2 fraction division */ 7230 if (bestm2_frac) 7231 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 7232 7233 /* M2 fraction division enable */ 7234 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7235 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 7236 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 7237 if (bestm2_frac) 7238 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 7239 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 7240 7241 /* Program digital lock detect threshold */ 7242 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 7243 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 7244 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 7245 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 7246 if (!bestm2_frac) 7247 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 7248 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 7249 7250 /* Loop filter */ 7251 if (vco == 5400000) { 7252 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 7253 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 7254 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 7255 tribuf_calcntr = 0x9; 7256 } else if (vco <= 6200000) { 7257 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 7258 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 7259 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7260 tribuf_calcntr = 0x9; 7261 } else if (vco <= 6480000) { 7262 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7263 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7264 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7265 tribuf_calcntr = 0x8; 7266 } else { 7267 /* Not supported. Apply the same limits as in the max case */ 7268 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 7269 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 7270 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 7271 tribuf_calcntr = 0; 7272 } 7273 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 7274 7275 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 7276 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 7277 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 7278 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 7279 7280 /* AFC Recal */ 7281 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 7282 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 7283 DPIO_AFC_RECAL); 7284 7285 mutex_unlock(&dev_priv->sb_lock); 7286 } 7287 7288 /** 7289 * vlv_force_pll_on - forcibly enable just the PLL 7290 * @dev_priv: i915 private structure 7291 * @pipe: pipe PLL to enable 7292 * @dpll: PLL configuration 7293 * 7294 * Enable the PLL for @pipe using the supplied @dpll config. To be used 7295 * in cases where we need the PLL enabled even when @pipe is not going to 7296 * be enabled. 7297 */ 7298 void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 7299 const struct dpll *dpll) 7300 { 7301 struct intel_crtc *crtc = 7302 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 7303 struct intel_crtc_state pipe_config = { 7304 .base.crtc = &crtc->base, 7305 .pixel_multiplier = 1, 7306 .dpll = *dpll, 7307 }; 7308 7309 if (IS_CHERRYVIEW(dev)) { 7310 chv_update_pll(crtc, &pipe_config); 7311 chv_prepare_pll(crtc, &pipe_config); 7312 chv_enable_pll(crtc, &pipe_config); 7313 } else { 7314 vlv_update_pll(crtc, &pipe_config); 7315 vlv_prepare_pll(crtc, &pipe_config); 7316 vlv_enable_pll(crtc, &pipe_config); 7317 } 7318 } 7319 7320 /** 7321 * vlv_force_pll_off - forcibly disable just the PLL 7322 * @dev_priv: i915 private structure 7323 * @pipe: pipe PLL to disable 7324 * 7325 * Disable the PLL for @pipe. To be used in cases where we need 7326 * the PLL enabled even when @pipe is not going to be enabled. 7327 */ 7328 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 7329 { 7330 if (IS_CHERRYVIEW(dev)) 7331 chv_disable_pll(to_i915(dev), pipe); 7332 else 7333 vlv_disable_pll(to_i915(dev), pipe); 7334 } 7335 7336 static void i9xx_update_pll(struct intel_crtc *crtc, 7337 struct intel_crtc_state *crtc_state, 7338 intel_clock_t *reduced_clock, 7339 int num_connectors) 7340 { 7341 struct drm_device *dev = crtc->base.dev; 7342 struct drm_i915_private *dev_priv = dev->dev_private; 7343 u32 dpll; 7344 bool is_sdvo; 7345 struct dpll *clock = &crtc_state->dpll; 7346 7347 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7348 7349 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || 7350 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); 7351 7352 dpll = DPLL_VGA_MODE_DIS; 7353 7354 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7355 dpll |= DPLLB_MODE_LVDS; 7356 else 7357 dpll |= DPLLB_MODE_DAC_SERIAL; 7358 7359 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7360 dpll |= (crtc_state->pixel_multiplier - 1) 7361 << SDVO_MULTIPLIER_SHIFT_HIRES; 7362 } 7363 7364 if (is_sdvo) 7365 dpll |= DPLL_SDVO_HIGH_SPEED; 7366 7367 if (crtc_state->has_dp_encoder) 7368 dpll |= DPLL_SDVO_HIGH_SPEED; 7369 7370 /* compute bitmask from p1 value */ 7371 if (IS_PINEVIEW(dev)) 7372 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 7373 else { 7374 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7375 if (IS_G4X(dev) && reduced_clock) 7376 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7377 } 7378 switch (clock->p2) { 7379 case 5: 7380 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7381 break; 7382 case 7: 7383 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7384 break; 7385 case 10: 7386 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7387 break; 7388 case 14: 7389 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7390 break; 7391 } 7392 if (INTEL_INFO(dev)->gen >= 4) 7393 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 7394 7395 if (crtc_state->sdvo_tv_clock) 7396 dpll |= PLL_REF_INPUT_TVCLKINBC; 7397 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7398 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7399 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7400 else 7401 dpll |= PLL_REF_INPUT_DREFCLK; 7402 7403 dpll |= DPLL_VCO_ENABLE; 7404 crtc_state->dpll_hw_state.dpll = dpll; 7405 7406 if (INTEL_INFO(dev)->gen >= 4) { 7407 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 7408 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 7409 crtc_state->dpll_hw_state.dpll_md = dpll_md; 7410 } 7411 } 7412 7413 static void i8xx_update_pll(struct intel_crtc *crtc, 7414 struct intel_crtc_state *crtc_state, 7415 intel_clock_t *reduced_clock, 7416 int num_connectors) 7417 { 7418 struct drm_device *dev = crtc->base.dev; 7419 struct drm_i915_private *dev_priv = dev->dev_private; 7420 u32 dpll; 7421 struct dpll *clock = &crtc_state->dpll; 7422 7423 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7424 7425 dpll = DPLL_VGA_MODE_DIS; 7426 7427 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7428 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7429 } else { 7430 if (clock->p1 == 2) 7431 dpll |= PLL_P1_DIVIDE_BY_TWO; 7432 else 7433 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7434 if (clock->p2 == 4) 7435 dpll |= PLL_P2_DIVIDE_BY_4; 7436 } 7437 7438 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7439 dpll |= DPLL_DVO_2X_MODE; 7440 7441 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7442 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7443 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7444 else 7445 dpll |= PLL_REF_INPUT_DREFCLK; 7446 7447 dpll |= DPLL_VCO_ENABLE; 7448 crtc_state->dpll_hw_state.dpll = dpll; 7449 } 7450 7451 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7452 { 7453 struct drm_device *dev = intel_crtc->base.dev; 7454 struct drm_i915_private *dev_priv = dev->dev_private; 7455 enum i915_pipe pipe = intel_crtc->pipe; 7456 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7457 struct drm_display_mode *adjusted_mode = 7458 &intel_crtc->config->base.adjusted_mode; 7459 uint32_t crtc_vtotal, crtc_vblank_end; 7460 int vsyncshift = 0; 7461 7462 /* We need to be careful not to changed the adjusted mode, for otherwise 7463 * the hw state checker will get angry at the mismatch. */ 7464 crtc_vtotal = adjusted_mode->crtc_vtotal; 7465 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 7466 7467 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 7468 /* the chip adds 2 halflines automatically */ 7469 crtc_vtotal -= 1; 7470 crtc_vblank_end -= 1; 7471 7472 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7473 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7474 else 7475 vsyncshift = adjusted_mode->crtc_hsync_start - 7476 adjusted_mode->crtc_htotal / 2; 7477 if (vsyncshift < 0) 7478 vsyncshift += adjusted_mode->crtc_htotal; 7479 } 7480 7481 if (INTEL_INFO(dev)->gen > 3) 7482 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 7483 7484 I915_WRITE(HTOTAL(cpu_transcoder), 7485 (adjusted_mode->crtc_hdisplay - 1) | 7486 ((adjusted_mode->crtc_htotal - 1) << 16)); 7487 I915_WRITE(HBLANK(cpu_transcoder), 7488 (adjusted_mode->crtc_hblank_start - 1) | 7489 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 7490 I915_WRITE(HSYNC(cpu_transcoder), 7491 (adjusted_mode->crtc_hsync_start - 1) | 7492 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 7493 7494 I915_WRITE(VTOTAL(cpu_transcoder), 7495 (adjusted_mode->crtc_vdisplay - 1) | 7496 ((crtc_vtotal - 1) << 16)); 7497 I915_WRITE(VBLANK(cpu_transcoder), 7498 (adjusted_mode->crtc_vblank_start - 1) | 7499 ((crtc_vblank_end - 1) << 16)); 7500 I915_WRITE(VSYNC(cpu_transcoder), 7501 (adjusted_mode->crtc_vsync_start - 1) | 7502 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 7503 7504 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 7505 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 7506 * documented on the DDI_FUNC_CTL register description, EDP Input Select 7507 * bits. */ 7508 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 7509 (pipe == PIPE_B || pipe == PIPE_C)) 7510 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 7511 7512 /* pipesrc controls the size that is scaled from, which should 7513 * always be the user's requested size. 7514 */ 7515 I915_WRITE(PIPESRC(pipe), 7516 ((intel_crtc->config->pipe_src_w - 1) << 16) | 7517 (intel_crtc->config->pipe_src_h - 1)); 7518 } 7519 7520 static void intel_get_pipe_timings(struct intel_crtc *crtc, 7521 struct intel_crtc_state *pipe_config) 7522 { 7523 struct drm_device *dev = crtc->base.dev; 7524 struct drm_i915_private *dev_priv = dev->dev_private; 7525 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7526 uint32_t tmp; 7527 7528 tmp = I915_READ(HTOTAL(cpu_transcoder)); 7529 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 7530 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 7531 tmp = I915_READ(HBLANK(cpu_transcoder)); 7532 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 7533 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 7534 tmp = I915_READ(HSYNC(cpu_transcoder)); 7535 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 7536 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 7537 7538 tmp = I915_READ(VTOTAL(cpu_transcoder)); 7539 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 7540 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 7541 tmp = I915_READ(VBLANK(cpu_transcoder)); 7542 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 7543 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 7544 tmp = I915_READ(VSYNC(cpu_transcoder)); 7545 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 7546 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 7547 7548 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 7549 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 7550 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 7551 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 7552 } 7553 7554 tmp = I915_READ(PIPESRC(crtc->pipe)); 7555 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 7556 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 7557 7558 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 7559 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 7560 } 7561 7562 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 7563 struct intel_crtc_state *pipe_config) 7564 { 7565 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 7566 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 7567 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 7568 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 7569 7570 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 7571 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 7572 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 7573 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 7574 7575 mode->flags = pipe_config->base.adjusted_mode.flags; 7576 7577 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 7578 mode->flags |= pipe_config->base.adjusted_mode.flags; 7579 } 7580 7581 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7582 { 7583 struct drm_device *dev = intel_crtc->base.dev; 7584 struct drm_i915_private *dev_priv = dev->dev_private; 7585 uint32_t pipeconf; 7586 7587 pipeconf = 0; 7588 7589 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 7590 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 7591 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 7592 7593 if (intel_crtc->config->double_wide) 7594 pipeconf |= PIPECONF_DOUBLE_WIDE; 7595 7596 /* only g4x and later have fancy bpc/dither controls */ 7597 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 7598 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 7599 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 7600 pipeconf |= PIPECONF_DITHER_EN | 7601 PIPECONF_DITHER_TYPE_SP; 7602 7603 switch (intel_crtc->config->pipe_bpp) { 7604 case 18: 7605 pipeconf |= PIPECONF_6BPC; 7606 break; 7607 case 24: 7608 pipeconf |= PIPECONF_8BPC; 7609 break; 7610 case 30: 7611 pipeconf |= PIPECONF_10BPC; 7612 break; 7613 default: 7614 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7615 BUG(); 7616 } 7617 } 7618 7619 if (HAS_PIPE_CXSR(dev)) { 7620 if (intel_crtc->lowfreq_avail) { 7621 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 7622 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 7623 } else { 7624 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 7625 } 7626 } 7627 7628 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7629 if (INTEL_INFO(dev)->gen < 4 || 7630 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7631 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7632 else 7633 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7634 } else 7635 pipeconf |= PIPECONF_PROGRESSIVE; 7636 7637 if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) 7638 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 7639 7640 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 7641 POSTING_READ(PIPECONF(intel_crtc->pipe)); 7642 } 7643 7644 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 7645 struct intel_crtc_state *crtc_state) 7646 { 7647 struct drm_device *dev = crtc->base.dev; 7648 struct drm_i915_private *dev_priv = dev->dev_private; 7649 int refclk, num_connectors = 0; 7650 intel_clock_t clock, reduced_clock; 7651 bool ok, has_reduced_clock = false; 7652 bool is_lvds = false, is_dsi = false; 7653 struct intel_encoder *encoder; 7654 const intel_limit_t *limit; 7655 struct drm_atomic_state *state = crtc_state->base.state; 7656 struct drm_connector *connector; 7657 struct drm_connector_state *connector_state; 7658 int i; 7659 7660 memset(&crtc_state->dpll_hw_state, 0, 7661 sizeof(crtc_state->dpll_hw_state)); 7662 7663 for_each_connector_in_state(state, connector, connector_state, i) { 7664 if (connector_state->crtc != &crtc->base) 7665 continue; 7666 7667 encoder = to_intel_encoder(connector_state->best_encoder); 7668 7669 switch (encoder->type) { 7670 case INTEL_OUTPUT_LVDS: 7671 is_lvds = true; 7672 break; 7673 case INTEL_OUTPUT_DSI: 7674 is_dsi = true; 7675 break; 7676 default: 7677 break; 7678 } 7679 7680 num_connectors++; 7681 } 7682 7683 if (is_dsi) 7684 return 0; 7685 7686 if (!crtc_state->clock_set) { 7687 refclk = i9xx_get_refclk(crtc_state, num_connectors); 7688 7689 /* 7690 * Returns a set of divisors for the desired target clock with 7691 * the given refclk, or FALSE. The returned values represent 7692 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 7693 * 2) / p1 / p2. 7694 */ 7695 limit = intel_limit(crtc_state, refclk); 7696 ok = dev_priv->display.find_dpll(limit, crtc_state, 7697 crtc_state->port_clock, 7698 refclk, NULL, &clock); 7699 if (!ok) { 7700 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7701 return -EINVAL; 7702 } 7703 7704 if (is_lvds && dev_priv->lvds_downclock_avail) { 7705 /* 7706 * Ensure we match the reduced clock's P to the target 7707 * clock. If the clocks don't match, we can't switch 7708 * the display clock by using the FP0/FP1. In such case 7709 * we will disable the LVDS downclock feature. 7710 */ 7711 has_reduced_clock = 7712 dev_priv->display.find_dpll(limit, crtc_state, 7713 dev_priv->lvds_downclock, 7714 refclk, &clock, 7715 &reduced_clock); 7716 } 7717 /* Compat-code for transition, will disappear. */ 7718 crtc_state->dpll.n = clock.n; 7719 crtc_state->dpll.m1 = clock.m1; 7720 crtc_state->dpll.m2 = clock.m2; 7721 crtc_state->dpll.p1 = clock.p1; 7722 crtc_state->dpll.p2 = clock.p2; 7723 } 7724 7725 if (IS_GEN2(dev)) { 7726 i8xx_update_pll(crtc, crtc_state, 7727 has_reduced_clock ? &reduced_clock : NULL, 7728 num_connectors); 7729 } else if (IS_CHERRYVIEW(dev)) { 7730 chv_update_pll(crtc, crtc_state); 7731 } else if (IS_VALLEYVIEW(dev)) { 7732 vlv_update_pll(crtc, crtc_state); 7733 } else { 7734 i9xx_update_pll(crtc, crtc_state, 7735 has_reduced_clock ? &reduced_clock : NULL, 7736 num_connectors); 7737 } 7738 7739 return 0; 7740 } 7741 7742 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 7743 struct intel_crtc_state *pipe_config) 7744 { 7745 struct drm_device *dev = crtc->base.dev; 7746 struct drm_i915_private *dev_priv = dev->dev_private; 7747 uint32_t tmp; 7748 7749 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 7750 return; 7751 7752 tmp = I915_READ(PFIT_CONTROL); 7753 if (!(tmp & PFIT_ENABLE)) 7754 return; 7755 7756 /* Check whether the pfit is attached to our pipe. */ 7757 if (INTEL_INFO(dev)->gen < 4) { 7758 if (crtc->pipe != PIPE_B) 7759 return; 7760 } else { 7761 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 7762 return; 7763 } 7764 7765 pipe_config->gmch_pfit.control = tmp; 7766 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 7767 if (INTEL_INFO(dev)->gen < 5) 7768 pipe_config->gmch_pfit.lvds_border_bits = 7769 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 7770 } 7771 7772 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 7773 struct intel_crtc_state *pipe_config) 7774 { 7775 struct drm_device *dev = crtc->base.dev; 7776 struct drm_i915_private *dev_priv = dev->dev_private; 7777 int pipe = pipe_config->cpu_transcoder; 7778 intel_clock_t clock; 7779 u32 mdiv; 7780 int refclk = 100000; 7781 7782 /* In case of MIPI DPLL will not even be used */ 7783 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 7784 return; 7785 7786 mutex_lock(&dev_priv->sb_lock); 7787 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 7788 mutex_unlock(&dev_priv->sb_lock); 7789 7790 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 7791 clock.m2 = mdiv & DPIO_M2DIV_MASK; 7792 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 7793 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 7794 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 7795 7796 vlv_clock(refclk, &clock); 7797 7798 /* clock.dot is the fast clock */ 7799 pipe_config->port_clock = clock.dot / 5; 7800 } 7801 7802 static void 7803 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 7804 struct intel_initial_plane_config *plane_config) 7805 { 7806 struct drm_device *dev = crtc->base.dev; 7807 struct drm_i915_private *dev_priv = dev->dev_private; 7808 u32 val, base, offset; 7809 int pipe = crtc->pipe, plane = crtc->plane; 7810 int fourcc, pixel_format; 7811 unsigned int aligned_height; 7812 struct drm_framebuffer *fb; 7813 struct intel_framebuffer *intel_fb; 7814 7815 val = I915_READ(DSPCNTR(plane)); 7816 if (!(val & DISPLAY_PLANE_ENABLE)) 7817 return; 7818 7819 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7820 if (!intel_fb) { 7821 DRM_DEBUG_KMS("failed to alloc fb\n"); 7822 return; 7823 } 7824 7825 fb = &intel_fb->base; 7826 7827 if (INTEL_INFO(dev)->gen >= 4) { 7828 if (val & DISPPLANE_TILED) { 7829 plane_config->tiling = I915_TILING_X; 7830 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 7831 } 7832 } 7833 7834 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 7835 fourcc = i9xx_format_to_fourcc(pixel_format); 7836 fb->pixel_format = fourcc; 7837 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 7838 7839 if (INTEL_INFO(dev)->gen >= 4) { 7840 if (plane_config->tiling) 7841 offset = I915_READ(DSPTILEOFF(plane)); 7842 else 7843 offset = I915_READ(DSPLINOFF(plane)); 7844 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 7845 } else { 7846 base = I915_READ(DSPADDR(plane)); 7847 } 7848 plane_config->base = base; 7849 7850 val = I915_READ(PIPESRC(pipe)); 7851 fb->width = ((val >> 16) & 0xfff) + 1; 7852 fb->height = ((val >> 0) & 0xfff) + 1; 7853 7854 val = I915_READ(DSPSTRIDE(pipe)); 7855 fb->pitches[0] = val & 0xffffffc0; 7856 7857 aligned_height = intel_fb_align_height(dev, fb->height, 7858 fb->pixel_format, 7859 fb->modifier[0]); 7860 7861 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 7862 7863 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 7864 pipe_name(pipe), plane, fb->width, fb->height, 7865 fb->bits_per_pixel, base, fb->pitches[0], 7866 plane_config->size); 7867 7868 plane_config->fb = intel_fb; 7869 } 7870 7871 static void chv_crtc_clock_get(struct intel_crtc *crtc, 7872 struct intel_crtc_state *pipe_config) 7873 { 7874 struct drm_device *dev = crtc->base.dev; 7875 struct drm_i915_private *dev_priv = dev->dev_private; 7876 int pipe = pipe_config->cpu_transcoder; 7877 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7878 intel_clock_t clock; 7879 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 7880 int refclk = 100000; 7881 7882 mutex_lock(&dev_priv->sb_lock); 7883 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7884 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7885 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7886 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7887 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 7888 mutex_unlock(&dev_priv->sb_lock); 7889 7890 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7891 clock.m2 = (pll_dw0 & 0xff) << 22; 7892 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN) 7893 clock.m2 |= pll_dw2 & 0x3fffff; 7894 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7895 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7896 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7897 7898 chv_clock(refclk, &clock); 7899 7900 /* clock.dot is the fast clock */ 7901 pipe_config->port_clock = clock.dot / 5; 7902 } 7903 7904 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7905 struct intel_crtc_state *pipe_config) 7906 { 7907 struct drm_device *dev = crtc->base.dev; 7908 struct drm_i915_private *dev_priv = dev->dev_private; 7909 uint32_t tmp; 7910 7911 if (!intel_display_power_is_enabled(dev_priv, 7912 POWER_DOMAIN_PIPE(crtc->pipe))) 7913 return false; 7914 7915 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7916 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7917 7918 tmp = I915_READ(PIPECONF(crtc->pipe)); 7919 if (!(tmp & PIPECONF_ENABLE)) 7920 return false; 7921 7922 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 7923 switch (tmp & PIPECONF_BPC_MASK) { 7924 case PIPECONF_6BPC: 7925 pipe_config->pipe_bpp = 18; 7926 break; 7927 case PIPECONF_8BPC: 7928 pipe_config->pipe_bpp = 24; 7929 break; 7930 case PIPECONF_10BPC: 7931 pipe_config->pipe_bpp = 30; 7932 break; 7933 default: 7934 break; 7935 } 7936 } 7937 7938 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7939 pipe_config->limited_color_range = true; 7940 7941 if (INTEL_INFO(dev)->gen < 4) 7942 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7943 7944 intel_get_pipe_timings(crtc, pipe_config); 7945 7946 i9xx_get_pfit_config(crtc, pipe_config); 7947 7948 if (INTEL_INFO(dev)->gen >= 4) { 7949 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7950 pipe_config->pixel_multiplier = 7951 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7952 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7953 pipe_config->dpll_hw_state.dpll_md = tmp; 7954 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7955 tmp = I915_READ(DPLL(crtc->pipe)); 7956 pipe_config->pixel_multiplier = 7957 ((tmp & SDVO_MULTIPLIER_MASK) 7958 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7959 } else { 7960 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7961 * port and will be fixed up in the encoder->get_config 7962 * function. */ 7963 pipe_config->pixel_multiplier = 1; 7964 } 7965 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7966 if (!IS_VALLEYVIEW(dev)) { 7967 /* 7968 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7969 * on 830. Filter it out here so that we don't 7970 * report errors due to that. 7971 */ 7972 if (IS_I830(dev)) 7973 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7974 7975 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7976 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7977 } else { 7978 /* Mask out read-only status bits. */ 7979 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7980 DPLL_PORTC_READY_MASK | 7981 DPLL_PORTB_READY_MASK); 7982 } 7983 7984 if (IS_CHERRYVIEW(dev)) 7985 chv_crtc_clock_get(crtc, pipe_config); 7986 else if (IS_VALLEYVIEW(dev)) 7987 vlv_crtc_clock_get(crtc, pipe_config); 7988 else 7989 i9xx_crtc_clock_get(crtc, pipe_config); 7990 7991 return true; 7992 } 7993 7994 static void ironlake_init_pch_refclk(struct drm_device *dev) 7995 { 7996 struct drm_i915_private *dev_priv = dev->dev_private; 7997 struct intel_encoder *encoder; 7998 u32 val, final; 7999 bool has_lvds = false; 8000 bool has_cpu_edp = false; 8001 bool has_panel = false; 8002 bool has_ck505 = false; 8003 bool can_ssc = false; 8004 8005 /* We need to take the global config into account */ 8006 for_each_intel_encoder(dev, encoder) { 8007 switch (encoder->type) { 8008 case INTEL_OUTPUT_LVDS: 8009 has_panel = true; 8010 has_lvds = true; 8011 break; 8012 case INTEL_OUTPUT_EDP: 8013 has_panel = true; 8014 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 8015 has_cpu_edp = true; 8016 break; 8017 default: 8018 break; 8019 } 8020 } 8021 8022 if (HAS_PCH_IBX(dev)) { 8023 has_ck505 = dev_priv->vbt.display_clock_mode; 8024 can_ssc = has_ck505; 8025 } else { 8026 has_ck505 = false; 8027 can_ssc = true; 8028 } 8029 8030 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 8031 has_panel, has_lvds, has_ck505); 8032 8033 /* Ironlake: try to setup display ref clock before DPLL 8034 * enabling. This is only under driver's control after 8035 * PCH B stepping, previous chipset stepping should be 8036 * ignoring this setting. 8037 */ 8038 val = I915_READ(PCH_DREF_CONTROL); 8039 8040 /* As we must carefully and slowly disable/enable each source in turn, 8041 * compute the final state we want first and check if we need to 8042 * make any changes at all. 8043 */ 8044 final = val; 8045 final &= ~DREF_NONSPREAD_SOURCE_MASK; 8046 if (has_ck505) 8047 final |= DREF_NONSPREAD_CK505_ENABLE; 8048 else 8049 final |= DREF_NONSPREAD_SOURCE_ENABLE; 8050 8051 final &= ~DREF_SSC_SOURCE_MASK; 8052 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8053 final &= ~DREF_SSC1_ENABLE; 8054 8055 if (has_panel) { 8056 final |= DREF_SSC_SOURCE_ENABLE; 8057 8058 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8059 final |= DREF_SSC1_ENABLE; 8060 8061 if (has_cpu_edp) { 8062 if (intel_panel_use_ssc(dev_priv) && can_ssc) 8063 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8064 else 8065 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8066 } else 8067 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8068 } else { 8069 final |= DREF_SSC_SOURCE_DISABLE; 8070 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8071 } 8072 8073 if (final == val) 8074 return; 8075 8076 /* Always enable nonspread source */ 8077 val &= ~DREF_NONSPREAD_SOURCE_MASK; 8078 8079 if (has_ck505) 8080 val |= DREF_NONSPREAD_CK505_ENABLE; 8081 else 8082 val |= DREF_NONSPREAD_SOURCE_ENABLE; 8083 8084 if (has_panel) { 8085 val &= ~DREF_SSC_SOURCE_MASK; 8086 val |= DREF_SSC_SOURCE_ENABLE; 8087 8088 /* SSC must be turned on before enabling the CPU output */ 8089 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8090 DRM_DEBUG_KMS("Using SSC on panel\n"); 8091 val |= DREF_SSC1_ENABLE; 8092 } else 8093 val &= ~DREF_SSC1_ENABLE; 8094 8095 /* Get SSC going before enabling the outputs */ 8096 I915_WRITE(PCH_DREF_CONTROL, val); 8097 POSTING_READ(PCH_DREF_CONTROL); 8098 udelay(200); 8099 8100 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8101 8102 /* Enable CPU source on CPU attached eDP */ 8103 if (has_cpu_edp) { 8104 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 8105 DRM_DEBUG_KMS("Using SSC on eDP\n"); 8106 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 8107 } else 8108 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 8109 } else 8110 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8111 8112 I915_WRITE(PCH_DREF_CONTROL, val); 8113 POSTING_READ(PCH_DREF_CONTROL); 8114 udelay(200); 8115 } else { 8116 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 8117 8118 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 8119 8120 /* Turn off CPU output */ 8121 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 8122 8123 I915_WRITE(PCH_DREF_CONTROL, val); 8124 POSTING_READ(PCH_DREF_CONTROL); 8125 udelay(200); 8126 8127 /* Turn off the SSC source */ 8128 val &= ~DREF_SSC_SOURCE_MASK; 8129 val |= DREF_SSC_SOURCE_DISABLE; 8130 8131 /* Turn off SSC1 */ 8132 val &= ~DREF_SSC1_ENABLE; 8133 8134 I915_WRITE(PCH_DREF_CONTROL, val); 8135 POSTING_READ(PCH_DREF_CONTROL); 8136 udelay(200); 8137 } 8138 8139 BUG_ON(val != final); 8140 } 8141 8142 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 8143 { 8144 uint32_t tmp; 8145 8146 tmp = I915_READ(SOUTH_CHICKEN2); 8147 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 8148 I915_WRITE(SOUTH_CHICKEN2, tmp); 8149 8150 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 8151 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 8152 DRM_ERROR("FDI mPHY reset assert timeout\n"); 8153 8154 tmp = I915_READ(SOUTH_CHICKEN2); 8155 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 8156 I915_WRITE(SOUTH_CHICKEN2, tmp); 8157 8158 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 8159 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 8160 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 8161 } 8162 8163 /* WaMPhyProgramming:hsw */ 8164 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 8165 { 8166 uint32_t tmp; 8167 8168 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 8169 tmp &= ~(0xFF << 24); 8170 tmp |= (0x12 << 24); 8171 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 8172 8173 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 8174 tmp |= (1 << 11); 8175 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 8176 8177 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 8178 tmp |= (1 << 11); 8179 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 8180 8181 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 8182 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8183 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 8184 8185 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 8186 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 8187 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 8188 8189 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 8190 tmp &= ~(7 << 13); 8191 tmp |= (5 << 13); 8192 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 8193 8194 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 8195 tmp &= ~(7 << 13); 8196 tmp |= (5 << 13); 8197 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 8198 8199 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 8200 tmp &= ~0xFF; 8201 tmp |= 0x1C; 8202 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 8203 8204 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 8205 tmp &= ~0xFF; 8206 tmp |= 0x1C; 8207 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 8208 8209 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 8210 tmp &= ~(0xFF << 16); 8211 tmp |= (0x1C << 16); 8212 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 8213 8214 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 8215 tmp &= ~(0xFF << 16); 8216 tmp |= (0x1C << 16); 8217 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 8218 8219 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 8220 tmp |= (1 << 27); 8221 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 8222 8223 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 8224 tmp |= (1 << 27); 8225 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 8226 8227 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 8228 tmp &= ~(0xF << 28); 8229 tmp |= (4 << 28); 8230 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 8231 8232 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 8233 tmp &= ~(0xF << 28); 8234 tmp |= (4 << 28); 8235 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 8236 } 8237 8238 /* Implements 3 different sequences from BSpec chapter "Display iCLK 8239 * Programming" based on the parameters passed: 8240 * - Sequence to enable CLKOUT_DP 8241 * - Sequence to enable CLKOUT_DP without spread 8242 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 8243 */ 8244 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8245 bool with_fdi) 8246 { 8247 struct drm_i915_private *dev_priv = dev->dev_private; 8248 uint32_t reg, tmp; 8249 8250 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8251 with_spread = true; 8252 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 8253 with_fdi, "LP PCH doesn't have FDI\n")) 8254 with_fdi = false; 8255 8256 mutex_lock(&dev_priv->sb_lock); 8257 8258 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8259 tmp &= ~SBI_SSCCTL_DISABLE; 8260 tmp |= SBI_SSCCTL_PATHALT; 8261 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8262 8263 udelay(24); 8264 8265 if (with_spread) { 8266 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8267 tmp &= ~SBI_SSCCTL_PATHALT; 8268 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8269 8270 if (with_fdi) { 8271 lpt_reset_fdi_mphy(dev_priv); 8272 lpt_program_fdi_mphy(dev_priv); 8273 } 8274 } 8275 8276 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8277 SBI_GEN0 : SBI_DBUFF0; 8278 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8279 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8280 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8281 8282 mutex_unlock(&dev_priv->sb_lock); 8283 } 8284 8285 /* Sequence to disable CLKOUT_DP */ 8286 static void lpt_disable_clkout_dp(struct drm_device *dev) 8287 { 8288 struct drm_i915_private *dev_priv = dev->dev_private; 8289 uint32_t reg, tmp; 8290 8291 mutex_lock(&dev_priv->sb_lock); 8292 8293 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 8294 SBI_GEN0 : SBI_DBUFF0; 8295 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 8296 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 8297 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 8298 8299 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 8300 if (!(tmp & SBI_SSCCTL_DISABLE)) { 8301 if (!(tmp & SBI_SSCCTL_PATHALT)) { 8302 tmp |= SBI_SSCCTL_PATHALT; 8303 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8304 udelay(32); 8305 } 8306 tmp |= SBI_SSCCTL_DISABLE; 8307 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 8308 } 8309 8310 mutex_unlock(&dev_priv->sb_lock); 8311 } 8312 8313 static void lpt_init_pch_refclk(struct drm_device *dev) 8314 { 8315 struct intel_encoder *encoder; 8316 bool has_vga = false; 8317 8318 for_each_intel_encoder(dev, encoder) { 8319 switch (encoder->type) { 8320 case INTEL_OUTPUT_ANALOG: 8321 has_vga = true; 8322 break; 8323 default: 8324 break; 8325 } 8326 } 8327 8328 if (has_vga) 8329 lpt_enable_clkout_dp(dev, true, true); 8330 else 8331 lpt_disable_clkout_dp(dev); 8332 } 8333 8334 /* 8335 * Initialize reference clocks when the driver loads 8336 */ 8337 void intel_init_pch_refclk(struct drm_device *dev) 8338 { 8339 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 8340 ironlake_init_pch_refclk(dev); 8341 else if (HAS_PCH_LPT(dev)) 8342 lpt_init_pch_refclk(dev); 8343 } 8344 8345 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) 8346 { 8347 struct drm_device *dev = crtc_state->base.crtc->dev; 8348 struct drm_i915_private *dev_priv = dev->dev_private; 8349 struct drm_atomic_state *state = crtc_state->base.state; 8350 struct drm_connector *connector; 8351 struct drm_connector_state *connector_state; 8352 struct intel_encoder *encoder; 8353 int num_connectors = 0, i; 8354 bool is_lvds = false; 8355 8356 for_each_connector_in_state(state, connector, connector_state, i) { 8357 if (connector_state->crtc != crtc_state->base.crtc) 8358 continue; 8359 8360 encoder = to_intel_encoder(connector_state->best_encoder); 8361 8362 switch (encoder->type) { 8363 case INTEL_OUTPUT_LVDS: 8364 is_lvds = true; 8365 break; 8366 default: 8367 break; 8368 } 8369 num_connectors++; 8370 } 8371 8372 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 8373 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8374 dev_priv->vbt.lvds_ssc_freq); 8375 return dev_priv->vbt.lvds_ssc_freq; 8376 } 8377 8378 return 120000; 8379 } 8380 8381 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8382 { 8383 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8385 int pipe = intel_crtc->pipe; 8386 uint32_t val; 8387 8388 val = 0; 8389 8390 switch (intel_crtc->config->pipe_bpp) { 8391 case 18: 8392 val |= PIPECONF_6BPC; 8393 break; 8394 case 24: 8395 val |= PIPECONF_8BPC; 8396 break; 8397 case 30: 8398 val |= PIPECONF_10BPC; 8399 break; 8400 case 36: 8401 val |= PIPECONF_12BPC; 8402 break; 8403 default: 8404 /* Case prevented by intel_choose_pipe_bpp_dither. */ 8405 BUG(); 8406 } 8407 8408 if (intel_crtc->config->dither) 8409 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8410 8411 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8412 val |= PIPECONF_INTERLACED_ILK; 8413 else 8414 val |= PIPECONF_PROGRESSIVE; 8415 8416 if (intel_crtc->config->limited_color_range) 8417 val |= PIPECONF_COLOR_RANGE_SELECT; 8418 8419 I915_WRITE(PIPECONF(pipe), val); 8420 POSTING_READ(PIPECONF(pipe)); 8421 } 8422 8423 /* 8424 * Set up the pipe CSC unit. 8425 * 8426 * Currently only full range RGB to limited range RGB conversion 8427 * is supported, but eventually this should handle various 8428 * RGB<->YCbCr scenarios as well. 8429 */ 8430 static void intel_set_pipe_csc(struct drm_crtc *crtc) 8431 { 8432 struct drm_device *dev = crtc->dev; 8433 struct drm_i915_private *dev_priv = dev->dev_private; 8434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8435 int pipe = intel_crtc->pipe; 8436 uint16_t coeff = 0x7800; /* 1.0 */ 8437 8438 /* 8439 * TODO: Check what kind of values actually come out of the pipe 8440 * with these coeff/postoff values and adjust to get the best 8441 * accuracy. Perhaps we even need to take the bpc value into 8442 * consideration. 8443 */ 8444 8445 if (intel_crtc->config->limited_color_range) 8446 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 8447 8448 /* 8449 * GY/GU and RY/RU should be the other way around according 8450 * to BSpec, but reality doesn't agree. Just set them up in 8451 * a way that results in the correct picture. 8452 */ 8453 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 8454 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 8455 8456 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 8457 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 8458 8459 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 8460 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 8461 8462 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 8463 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 8464 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 8465 8466 if (INTEL_INFO(dev)->gen > 6) { 8467 uint16_t postoff = 0; 8468 8469 if (intel_crtc->config->limited_color_range) 8470 postoff = (16 * (1 << 12) / 255) & 0x1fff; 8471 8472 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 8473 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 8474 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 8475 8476 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 8477 } else { 8478 uint32_t mode = CSC_MODE_YUV_TO_RGB; 8479 8480 if (intel_crtc->config->limited_color_range) 8481 mode |= CSC_BLACK_SCREEN_OFFSET; 8482 8483 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 8484 } 8485 } 8486 8487 static void haswell_set_pipeconf(struct drm_crtc *crtc) 8488 { 8489 struct drm_device *dev = crtc->dev; 8490 struct drm_i915_private *dev_priv = dev->dev_private; 8491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8492 enum i915_pipe pipe = intel_crtc->pipe; 8493 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8494 uint32_t val; 8495 8496 val = 0; 8497 8498 if (IS_HASWELL(dev) && intel_crtc->config->dither) 8499 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 8500 8501 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 8502 val |= PIPECONF_INTERLACED_ILK; 8503 else 8504 val |= PIPECONF_PROGRESSIVE; 8505 8506 I915_WRITE(PIPECONF(cpu_transcoder), val); 8507 POSTING_READ(PIPECONF(cpu_transcoder)); 8508 8509 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 8510 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 8511 8512 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 8513 val = 0; 8514 8515 switch (intel_crtc->config->pipe_bpp) { 8516 case 18: 8517 val |= PIPEMISC_DITHER_6_BPC; 8518 break; 8519 case 24: 8520 val |= PIPEMISC_DITHER_8_BPC; 8521 break; 8522 case 30: 8523 val |= PIPEMISC_DITHER_10_BPC; 8524 break; 8525 case 36: 8526 val |= PIPEMISC_DITHER_12_BPC; 8527 break; 8528 default: 8529 /* Case prevented by pipe_config_set_bpp. */ 8530 BUG(); 8531 } 8532 8533 if (intel_crtc->config->dither) 8534 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 8535 8536 I915_WRITE(PIPEMISC(pipe), val); 8537 } 8538 } 8539 8540 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 8541 struct intel_crtc_state *crtc_state, 8542 intel_clock_t *clock, 8543 bool *has_reduced_clock, 8544 intel_clock_t *reduced_clock) 8545 { 8546 struct drm_device *dev = crtc->dev; 8547 struct drm_i915_private *dev_priv = dev->dev_private; 8548 int refclk; 8549 const intel_limit_t *limit; 8550 bool ret, is_lvds = false; 8551 8552 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); 8553 8554 refclk = ironlake_get_refclk(crtc_state); 8555 8556 /* 8557 * Returns a set of divisors for the desired target clock with the given 8558 * refclk, or FALSE. The returned values represent the clock equation: 8559 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 8560 */ 8561 limit = intel_limit(crtc_state, refclk); 8562 ret = dev_priv->display.find_dpll(limit, crtc_state, 8563 crtc_state->port_clock, 8564 refclk, NULL, clock); 8565 if (!ret) 8566 return false; 8567 8568 if (is_lvds && dev_priv->lvds_downclock_avail) { 8569 /* 8570 * Ensure we match the reduced clock's P to the target clock. 8571 * If the clocks don't match, we can't switch the display clock 8572 * by using the FP0/FP1. In such case we will disable the LVDS 8573 * downclock feature. 8574 */ 8575 *has_reduced_clock = 8576 dev_priv->display.find_dpll(limit, crtc_state, 8577 dev_priv->lvds_downclock, 8578 refclk, clock, 8579 reduced_clock); 8580 } 8581 8582 return true; 8583 } 8584 8585 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 8586 { 8587 /* 8588 * Account for spread spectrum to avoid 8589 * oversubscribing the link. Max center spread 8590 * is 2.5%; use 5% for safety's sake. 8591 */ 8592 u32 bps = target_clock * bpp * 21 / 20; 8593 return DIV_ROUND_UP(bps, link_bw * 8); 8594 } 8595 8596 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 8597 { 8598 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 8599 } 8600 8601 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8602 struct intel_crtc_state *crtc_state, 8603 u32 *fp, 8604 intel_clock_t *reduced_clock, u32 *fp2) 8605 { 8606 struct drm_crtc *crtc = &intel_crtc->base; 8607 struct drm_device *dev = crtc->dev; 8608 struct drm_i915_private *dev_priv = dev->dev_private; 8609 struct drm_atomic_state *state = crtc_state->base.state; 8610 struct drm_connector *connector; 8611 struct drm_connector_state *connector_state; 8612 struct intel_encoder *encoder; 8613 uint32_t dpll; 8614 int factor, num_connectors = 0, i; 8615 bool is_lvds = false, is_sdvo = false; 8616 8617 for_each_connector_in_state(state, connector, connector_state, i) { 8618 if (connector_state->crtc != crtc_state->base.crtc) 8619 continue; 8620 8621 encoder = to_intel_encoder(connector_state->best_encoder); 8622 8623 switch (encoder->type) { 8624 case INTEL_OUTPUT_LVDS: 8625 is_lvds = true; 8626 break; 8627 case INTEL_OUTPUT_SDVO: 8628 case INTEL_OUTPUT_HDMI: 8629 is_sdvo = true; 8630 break; 8631 default: 8632 break; 8633 } 8634 8635 num_connectors++; 8636 } 8637 8638 /* Enable autotuning of the PLL clock (if permissible) */ 8639 factor = 21; 8640 if (is_lvds) { 8641 if ((intel_panel_use_ssc(dev_priv) && 8642 dev_priv->vbt.lvds_ssc_freq == 100000) || 8643 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8644 factor = 25; 8645 } else if (crtc_state->sdvo_tv_clock) 8646 factor = 20; 8647 8648 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 8649 *fp |= FP_CB_TUNE; 8650 8651 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 8652 *fp2 |= FP_CB_TUNE; 8653 8654 dpll = 0; 8655 8656 if (is_lvds) 8657 dpll |= DPLLB_MODE_LVDS; 8658 else 8659 dpll |= DPLLB_MODE_DAC_SERIAL; 8660 8661 dpll |= (crtc_state->pixel_multiplier - 1) 8662 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8663 8664 if (is_sdvo) 8665 dpll |= DPLL_SDVO_HIGH_SPEED; 8666 if (crtc_state->has_dp_encoder) 8667 dpll |= DPLL_SDVO_HIGH_SPEED; 8668 8669 /* compute bitmask from p1 value */ 8670 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 8671 /* also FPA1 */ 8672 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 8673 8674 switch (crtc_state->dpll.p2) { 8675 case 5: 8676 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 8677 break; 8678 case 7: 8679 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 8680 break; 8681 case 10: 8682 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 8683 break; 8684 case 14: 8685 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 8686 break; 8687 } 8688 8689 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 8690 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8691 else 8692 dpll |= PLL_REF_INPUT_DREFCLK; 8693 8694 return dpll | DPLL_VCO_ENABLE; 8695 } 8696 8697 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 8698 struct intel_crtc_state *crtc_state) 8699 { 8700 struct drm_device *dev = crtc->base.dev; 8701 intel_clock_t clock, reduced_clock; 8702 u32 dpll = 0, fp = 0, fp2 = 0; 8703 bool ok, has_reduced_clock = false; 8704 bool is_lvds = false; 8705 struct intel_shared_dpll *pll; 8706 8707 memset(&crtc_state->dpll_hw_state, 0, 8708 sizeof(crtc_state->dpll_hw_state)); 8709 8710 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 8711 8712 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 8713 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 8714 8715 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 8716 &has_reduced_clock, &reduced_clock); 8717 if (!ok && !crtc_state->clock_set) { 8718 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 8719 return -EINVAL; 8720 } 8721 /* Compat-code for transition, will disappear. */ 8722 if (!crtc_state->clock_set) { 8723 crtc_state->dpll.n = clock.n; 8724 crtc_state->dpll.m1 = clock.m1; 8725 crtc_state->dpll.m2 = clock.m2; 8726 crtc_state->dpll.p1 = clock.p1; 8727 crtc_state->dpll.p2 = clock.p2; 8728 } 8729 8730 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 8731 if (crtc_state->has_pch_encoder) { 8732 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 8733 if (has_reduced_clock) 8734 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 8735 8736 dpll = ironlake_compute_dpll(crtc, crtc_state, 8737 &fp, &reduced_clock, 8738 has_reduced_clock ? &fp2 : NULL); 8739 8740 crtc_state->dpll_hw_state.dpll = dpll; 8741 crtc_state->dpll_hw_state.fp0 = fp; 8742 if (has_reduced_clock) 8743 crtc_state->dpll_hw_state.fp1 = fp2; 8744 else 8745 crtc_state->dpll_hw_state.fp1 = fp; 8746 8747 pll = intel_get_shared_dpll(crtc, crtc_state); 8748 if (pll == NULL) { 8749 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 8750 pipe_name(crtc->pipe)); 8751 return -EINVAL; 8752 } 8753 } 8754 8755 if (is_lvds && has_reduced_clock) 8756 crtc->lowfreq_avail = true; 8757 else 8758 crtc->lowfreq_avail = false; 8759 8760 return 0; 8761 } 8762 8763 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 8764 struct intel_link_m_n *m_n) 8765 { 8766 struct drm_device *dev = crtc->base.dev; 8767 struct drm_i915_private *dev_priv = dev->dev_private; 8768 enum i915_pipe pipe = crtc->pipe; 8769 8770 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 8771 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 8772 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 8773 & ~TU_SIZE_MASK; 8774 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 8775 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 8776 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8777 } 8778 8779 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 8780 enum transcoder transcoder, 8781 struct intel_link_m_n *m_n, 8782 struct intel_link_m_n *m2_n2) 8783 { 8784 struct drm_device *dev = crtc->base.dev; 8785 struct drm_i915_private *dev_priv = dev->dev_private; 8786 enum i915_pipe pipe = crtc->pipe; 8787 8788 if (INTEL_INFO(dev)->gen >= 5) { 8789 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 8790 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 8791 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 8792 & ~TU_SIZE_MASK; 8793 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 8794 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 8795 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8796 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 8797 * gen < 8) and if DRRS is supported (to make sure the 8798 * registers are not unnecessarily read). 8799 */ 8800 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 8801 crtc->config->has_drrs) { 8802 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 8803 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 8804 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 8805 & ~TU_SIZE_MASK; 8806 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 8807 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 8808 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8809 } 8810 } else { 8811 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 8812 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 8813 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 8814 & ~TU_SIZE_MASK; 8815 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 8816 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 8817 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 8818 } 8819 } 8820 8821 void intel_dp_get_m_n(struct intel_crtc *crtc, 8822 struct intel_crtc_state *pipe_config) 8823 { 8824 if (pipe_config->has_pch_encoder) 8825 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 8826 else 8827 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8828 &pipe_config->dp_m_n, 8829 &pipe_config->dp_m2_n2); 8830 } 8831 8832 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 8833 struct intel_crtc_state *pipe_config) 8834 { 8835 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 8836 &pipe_config->fdi_m_n, NULL); 8837 } 8838 8839 static void skylake_get_pfit_config(struct intel_crtc *crtc, 8840 struct intel_crtc_state *pipe_config) 8841 { 8842 struct drm_device *dev = crtc->base.dev; 8843 struct drm_i915_private *dev_priv = dev->dev_private; 8844 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 8845 uint32_t ps_ctrl = 0; 8846 int id = -1; 8847 int i; 8848 8849 /* find scaler attached to this pipe */ 8850 for (i = 0; i < crtc->num_scalers; i++) { 8851 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i)); 8852 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) { 8853 id = i; 8854 pipe_config->pch_pfit.enabled = true; 8855 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i)); 8856 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i)); 8857 break; 8858 } 8859 } 8860 8861 scaler_state->scaler_id = id; 8862 if (id >= 0) { 8863 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX); 8864 } else { 8865 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); 8866 } 8867 } 8868 8869 static void 8870 skylake_get_initial_plane_config(struct intel_crtc *crtc, 8871 struct intel_initial_plane_config *plane_config) 8872 { 8873 struct drm_device *dev = crtc->base.dev; 8874 struct drm_i915_private *dev_priv = dev->dev_private; 8875 u32 val, base, offset, stride_mult, tiling; 8876 int pipe = crtc->pipe; 8877 int fourcc, pixel_format; 8878 unsigned int aligned_height; 8879 struct drm_framebuffer *fb; 8880 struct intel_framebuffer *intel_fb; 8881 8882 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8883 if (!intel_fb) { 8884 DRM_DEBUG_KMS("failed to alloc fb\n"); 8885 return; 8886 } 8887 8888 fb = &intel_fb->base; 8889 8890 val = I915_READ(PLANE_CTL(pipe, 0)); 8891 if (!(val & PLANE_CTL_ENABLE)) 8892 goto error; 8893 8894 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8895 fourcc = skl_format_to_fourcc(pixel_format, 8896 val & PLANE_CTL_ORDER_RGBX, 8897 val & PLANE_CTL_ALPHA_MASK); 8898 fb->pixel_format = fourcc; 8899 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8900 8901 tiling = val & PLANE_CTL_TILED_MASK; 8902 switch (tiling) { 8903 case PLANE_CTL_TILED_LINEAR: 8904 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 8905 break; 8906 case PLANE_CTL_TILED_X: 8907 plane_config->tiling = I915_TILING_X; 8908 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8909 break; 8910 case PLANE_CTL_TILED_Y: 8911 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 8912 break; 8913 case PLANE_CTL_TILED_YF: 8914 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 8915 break; 8916 default: 8917 MISSING_CASE(tiling); 8918 goto error; 8919 } 8920 8921 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8922 plane_config->base = base; 8923 8924 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8925 8926 val = I915_READ(PLANE_SIZE(pipe, 0)); 8927 fb->height = ((val >> 16) & 0xfff) + 1; 8928 fb->width = ((val >> 0) & 0x1fff) + 1; 8929 8930 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8931 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], 8932 fb->pixel_format); 8933 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8934 8935 aligned_height = intel_fb_align_height(dev, fb->height, 8936 fb->pixel_format, 8937 fb->modifier[0]); 8938 8939 plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); 8940 8941 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8942 pipe_name(pipe), fb->width, fb->height, 8943 fb->bits_per_pixel, base, fb->pitches[0], 8944 plane_config->size); 8945 8946 plane_config->fb = intel_fb; 8947 return; 8948 8949 error: 8950 kfree(fb); 8951 } 8952 8953 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8954 struct intel_crtc_state *pipe_config) 8955 { 8956 struct drm_device *dev = crtc->base.dev; 8957 struct drm_i915_private *dev_priv = dev->dev_private; 8958 uint32_t tmp; 8959 8960 tmp = I915_READ(PF_CTL(crtc->pipe)); 8961 8962 if (tmp & PF_ENABLE) { 8963 pipe_config->pch_pfit.enabled = true; 8964 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8965 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8966 8967 /* We currently do not free assignements of panel fitters on 8968 * ivb/hsw (since we don't use the higher upscaling modes which 8969 * differentiates them) so just WARN about this case for now. */ 8970 if (IS_GEN7(dev)) { 8971 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8972 PF_PIPE_SEL_IVB(crtc->pipe)); 8973 } 8974 } 8975 } 8976 8977 static void 8978 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8979 struct intel_initial_plane_config *plane_config) 8980 { 8981 struct drm_device *dev = crtc->base.dev; 8982 struct drm_i915_private *dev_priv = dev->dev_private; 8983 u32 val, base, offset; 8984 int pipe = crtc->pipe; 8985 int fourcc, pixel_format; 8986 unsigned int aligned_height; 8987 struct drm_framebuffer *fb; 8988 struct intel_framebuffer *intel_fb; 8989 8990 val = I915_READ(DSPCNTR(pipe)); 8991 if (!(val & DISPLAY_PLANE_ENABLE)) 8992 return; 8993 8994 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8995 if (!intel_fb) { 8996 DRM_DEBUG_KMS("failed to alloc fb\n"); 8997 return; 8998 } 8999 9000 fb = &intel_fb->base; 9001 9002 if (INTEL_INFO(dev)->gen >= 4) { 9003 if (val & DISPPLANE_TILED) { 9004 plane_config->tiling = I915_TILING_X; 9005 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 9006 } 9007 } 9008 9009 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 9010 fourcc = i9xx_format_to_fourcc(pixel_format); 9011 fb->pixel_format = fourcc; 9012 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 9013 9014 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 9015 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 9016 offset = I915_READ(DSPOFFSET(pipe)); 9017 } else { 9018 if (plane_config->tiling) 9019 offset = I915_READ(DSPTILEOFF(pipe)); 9020 else 9021 offset = I915_READ(DSPLINOFF(pipe)); 9022 } 9023 plane_config->base = base; 9024 9025 val = I915_READ(PIPESRC(pipe)); 9026 fb->width = ((val >> 16) & 0xfff) + 1; 9027 fb->height = ((val >> 0) & 0xfff) + 1; 9028 9029 val = I915_READ(DSPSTRIDE(pipe)); 9030 fb->pitches[0] = val & 0xffffffc0; 9031 9032 aligned_height = intel_fb_align_height(dev, fb->height, 9033 fb->pixel_format, 9034 fb->modifier[0]); 9035 9036 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 9037 9038 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 9039 pipe_name(pipe), fb->width, fb->height, 9040 fb->bits_per_pixel, base, fb->pitches[0], 9041 plane_config->size); 9042 9043 plane_config->fb = intel_fb; 9044 } 9045 9046 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 9047 struct intel_crtc_state *pipe_config) 9048 { 9049 struct drm_device *dev = crtc->base.dev; 9050 struct drm_i915_private *dev_priv = dev->dev_private; 9051 uint32_t tmp; 9052 9053 if (!intel_display_power_is_enabled(dev_priv, 9054 POWER_DOMAIN_PIPE(crtc->pipe))) 9055 return false; 9056 9057 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9058 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9059 9060 tmp = I915_READ(PIPECONF(crtc->pipe)); 9061 if (!(tmp & PIPECONF_ENABLE)) 9062 return false; 9063 9064 switch (tmp & PIPECONF_BPC_MASK) { 9065 case PIPECONF_6BPC: 9066 pipe_config->pipe_bpp = 18; 9067 break; 9068 case PIPECONF_8BPC: 9069 pipe_config->pipe_bpp = 24; 9070 break; 9071 case PIPECONF_10BPC: 9072 pipe_config->pipe_bpp = 30; 9073 break; 9074 case PIPECONF_12BPC: 9075 pipe_config->pipe_bpp = 36; 9076 break; 9077 default: 9078 break; 9079 } 9080 9081 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 9082 pipe_config->limited_color_range = true; 9083 9084 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 9085 struct intel_shared_dpll *pll; 9086 9087 pipe_config->has_pch_encoder = true; 9088 9089 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 9090 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9091 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9092 9093 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9094 9095 if (HAS_PCH_IBX(dev_priv->dev)) { 9096 pipe_config->shared_dpll = 9097 (enum intel_dpll_id) crtc->pipe; 9098 } else { 9099 tmp = I915_READ(PCH_DPLL_SEL); 9100 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 9101 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 9102 else 9103 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 9104 } 9105 9106 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9107 9108 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9109 &pipe_config->dpll_hw_state)); 9110 9111 tmp = pipe_config->dpll_hw_state.dpll; 9112 pipe_config->pixel_multiplier = 9113 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 9114 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 9115 9116 ironlake_pch_clock_get(crtc, pipe_config); 9117 } else { 9118 pipe_config->pixel_multiplier = 1; 9119 } 9120 9121 intel_get_pipe_timings(crtc, pipe_config); 9122 9123 ironlake_get_pfit_config(crtc, pipe_config); 9124 9125 return true; 9126 } 9127 9128 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9129 { 9130 struct drm_device *dev = dev_priv->dev; 9131 struct intel_crtc *crtc; 9132 9133 for_each_intel_crtc(dev, crtc) 9134 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 9135 pipe_name(crtc->pipe)); 9136 9137 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 9138 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 9139 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 9140 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 9141 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 9142 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 9143 "CPU PWM1 enabled\n"); 9144 if (IS_HASWELL(dev)) 9145 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 9146 "CPU PWM2 enabled\n"); 9147 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 9148 "PCH PWM1 enabled\n"); 9149 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 9150 "Utility pin enabled\n"); 9151 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 9152 9153 /* 9154 * In theory we can still leave IRQs enabled, as long as only the HPD 9155 * interrupts remain enabled. We used to check for that, but since it's 9156 * gen-specific and since we only disable LCPLL after we fully disable 9157 * the interrupts, the check below should be enough. 9158 */ 9159 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 9160 } 9161 9162 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9163 { 9164 struct drm_device *dev = dev_priv->dev; 9165 9166 if (IS_HASWELL(dev)) 9167 return I915_READ(D_COMP_HSW); 9168 else 9169 return I915_READ(D_COMP_BDW); 9170 } 9171 9172 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9173 { 9174 struct drm_device *dev = dev_priv->dev; 9175 9176 if (IS_HASWELL(dev)) { 9177 mutex_lock(&dev_priv->rps.hw_lock); 9178 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 9179 val)) 9180 DRM_ERROR("Failed to write to D_COMP\n"); 9181 mutex_unlock(&dev_priv->rps.hw_lock); 9182 } else { 9183 I915_WRITE(D_COMP_BDW, val); 9184 POSTING_READ(D_COMP_BDW); 9185 } 9186 } 9187 9188 /* 9189 * This function implements pieces of two sequences from BSpec: 9190 * - Sequence for display software to disable LCPLL 9191 * - Sequence for display software to allow package C8+ 9192 * The steps implemented here are just the steps that actually touch the LCPLL 9193 * register. Callers should take care of disabling all the display engine 9194 * functions, doing the mode unset, fixing interrupts, etc. 9195 */ 9196 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 9197 bool switch_to_fclk, bool allow_power_down) 9198 { 9199 uint32_t val; 9200 9201 assert_can_disable_lcpll(dev_priv); 9202 9203 val = I915_READ(LCPLL_CTL); 9204 9205 if (switch_to_fclk) { 9206 val |= LCPLL_CD_SOURCE_FCLK; 9207 I915_WRITE(LCPLL_CTL, val); 9208 9209 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 9210 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 9211 DRM_ERROR("Switching to FCLK failed\n"); 9212 9213 val = I915_READ(LCPLL_CTL); 9214 } 9215 9216 val |= LCPLL_PLL_DISABLE; 9217 I915_WRITE(LCPLL_CTL, val); 9218 POSTING_READ(LCPLL_CTL); 9219 9220 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9221 DRM_ERROR("LCPLL still locked\n"); 9222 9223 val = hsw_read_dcomp(dev_priv); 9224 val |= D_COMP_COMP_DISABLE; 9225 hsw_write_dcomp(dev_priv, val); 9226 ndelay(100); 9227 9228 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 9229 1)) 9230 DRM_ERROR("D_COMP RCOMP still in progress\n"); 9231 9232 if (allow_power_down) { 9233 val = I915_READ(LCPLL_CTL); 9234 val |= LCPLL_POWER_DOWN_ALLOW; 9235 I915_WRITE(LCPLL_CTL, val); 9236 POSTING_READ(LCPLL_CTL); 9237 } 9238 } 9239 9240 /* 9241 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 9242 * source. 9243 */ 9244 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 9245 { 9246 uint32_t val; 9247 9248 val = I915_READ(LCPLL_CTL); 9249 9250 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 9251 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 9252 return; 9253 9254 /* 9255 * Make sure we're not on PC8 state before disabling PC8, otherwise 9256 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 9257 */ 9258 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 9259 9260 if (val & LCPLL_POWER_DOWN_ALLOW) { 9261 val &= ~LCPLL_POWER_DOWN_ALLOW; 9262 I915_WRITE(LCPLL_CTL, val); 9263 POSTING_READ(LCPLL_CTL); 9264 } 9265 9266 val = hsw_read_dcomp(dev_priv); 9267 val |= D_COMP_COMP_FORCE; 9268 val &= ~D_COMP_COMP_DISABLE; 9269 hsw_write_dcomp(dev_priv, val); 9270 9271 val = I915_READ(LCPLL_CTL); 9272 val &= ~LCPLL_PLL_DISABLE; 9273 I915_WRITE(LCPLL_CTL, val); 9274 9275 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9276 DRM_ERROR("LCPLL not locked yet\n"); 9277 9278 if (val & LCPLL_CD_SOURCE_FCLK) { 9279 val = I915_READ(LCPLL_CTL); 9280 val &= ~LCPLL_CD_SOURCE_FCLK; 9281 I915_WRITE(LCPLL_CTL, val); 9282 9283 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 9284 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 9285 DRM_ERROR("Switching back to LCPLL failed\n"); 9286 } 9287 9288 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9289 } 9290 9291 /* 9292 * Package states C8 and deeper are really deep PC states that can only be 9293 * reached when all the devices on the system allow it, so even if the graphics 9294 * device allows PC8+, it doesn't mean the system will actually get to these 9295 * states. Our driver only allows PC8+ when going into runtime PM. 9296 * 9297 * The requirements for PC8+ are that all the outputs are disabled, the power 9298 * well is disabled and most interrupts are disabled, and these are also 9299 * requirements for runtime PM. When these conditions are met, we manually do 9300 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 9301 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 9302 * hang the machine. 9303 * 9304 * When we really reach PC8 or deeper states (not just when we allow it) we lose 9305 * the state of some registers, so when we come back from PC8+ we need to 9306 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 9307 * need to take care of the registers kept by RC6. Notice that this happens even 9308 * if we don't put the device in PCI D3 state (which is what currently happens 9309 * because of the runtime PM support). 9310 * 9311 * For more, read "Display Sequences for Package C8" on the hardware 9312 * documentation. 9313 */ 9314 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9315 { 9316 struct drm_device *dev = dev_priv->dev; 9317 uint32_t val; 9318 9319 DRM_DEBUG_KMS("Enabling package C8+\n"); 9320 9321 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9322 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9323 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 9324 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9325 } 9326 9327 lpt_disable_clkout_dp(dev); 9328 hsw_disable_lcpll(dev_priv, true, true); 9329 } 9330 9331 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9332 { 9333 struct drm_device *dev = dev_priv->dev; 9334 uint32_t val; 9335 9336 DRM_DEBUG_KMS("Disabling package C8+\n"); 9337 9338 hsw_restore_lcpll(dev_priv); 9339 lpt_init_pch_refclk(dev); 9340 9341 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 9342 val = I915_READ(SOUTH_DSPCLK_GATE_D); 9343 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 9344 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 9345 } 9346 9347 intel_prepare_ddi(dev); 9348 } 9349 9350 static void broxton_modeset_global_resources(struct drm_atomic_state *old_state) 9351 { 9352 struct drm_device *dev = old_state->dev; 9353 struct drm_i915_private *dev_priv = dev->dev_private; 9354 int max_pixclk = intel_mode_max_pixclk(dev, NULL); 9355 int req_cdclk; 9356 9357 /* see the comment in valleyview_modeset_global_resources */ 9358 if (WARN_ON(max_pixclk < 0)) 9359 return; 9360 9361 req_cdclk = broxton_calc_cdclk(dev_priv, max_pixclk); 9362 9363 if (req_cdclk != dev_priv->cdclk_freq) 9364 broxton_set_cdclk(dev, req_cdclk); 9365 } 9366 9367 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9368 struct intel_crtc_state *crtc_state) 9369 { 9370 if (!intel_ddi_pll_select(crtc, crtc_state)) 9371 return -EINVAL; 9372 9373 crtc->lowfreq_avail = false; 9374 9375 return 0; 9376 } 9377 9378 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv, 9379 enum port port, 9380 struct intel_crtc_state *pipe_config) 9381 { 9382 switch (port) { 9383 case PORT_A: 9384 pipe_config->ddi_pll_sel = SKL_DPLL0; 9385 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9386 break; 9387 case PORT_B: 9388 pipe_config->ddi_pll_sel = SKL_DPLL1; 9389 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9390 break; 9391 case PORT_C: 9392 pipe_config->ddi_pll_sel = SKL_DPLL2; 9393 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9394 break; 9395 default: 9396 DRM_ERROR("Incorrect port type\n"); 9397 } 9398 } 9399 9400 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 9401 enum port port, 9402 struct intel_crtc_state *pipe_config) 9403 { 9404 u32 temp, dpll_ctl1; 9405 9406 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 9407 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 9408 9409 switch (pipe_config->ddi_pll_sel) { 9410 case SKL_DPLL0: 9411 /* 9412 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 9413 * of the shared DPLL framework and thus needs to be read out 9414 * separately 9415 */ 9416 dpll_ctl1 = I915_READ(DPLL_CTRL1); 9417 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 9418 break; 9419 case SKL_DPLL1: 9420 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 9421 break; 9422 case SKL_DPLL2: 9423 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 9424 break; 9425 case SKL_DPLL3: 9426 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 9427 break; 9428 } 9429 } 9430 9431 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 9432 enum port port, 9433 struct intel_crtc_state *pipe_config) 9434 { 9435 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 9436 9437 switch (pipe_config->ddi_pll_sel) { 9438 case PORT_CLK_SEL_WRPLL1: 9439 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 9440 break; 9441 case PORT_CLK_SEL_WRPLL2: 9442 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 9443 break; 9444 } 9445 } 9446 9447 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 9448 struct intel_crtc_state *pipe_config) 9449 { 9450 struct drm_device *dev = crtc->base.dev; 9451 struct drm_i915_private *dev_priv = dev->dev_private; 9452 struct intel_shared_dpll *pll; 9453 enum port port; 9454 uint32_t tmp; 9455 9456 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 9457 9458 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 9459 9460 if (IS_SKYLAKE(dev)) 9461 skylake_get_ddi_pll(dev_priv, port, pipe_config); 9462 else if (IS_BROXTON(dev)) 9463 bxt_get_ddi_pll(dev_priv, port, pipe_config); 9464 else 9465 haswell_get_ddi_pll(dev_priv, port, pipe_config); 9466 9467 if (pipe_config->shared_dpll >= 0) { 9468 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 9469 9470 WARN_ON(!pll->get_hw_state(dev_priv, pll, 9471 &pipe_config->dpll_hw_state)); 9472 } 9473 9474 /* 9475 * Haswell has only FDI/PCH transcoder A. It is which is connected to 9476 * DDI E. So just check whether this pipe is wired to DDI E and whether 9477 * the PCH transcoder is on. 9478 */ 9479 if (INTEL_INFO(dev)->gen < 9 && 9480 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 9481 pipe_config->has_pch_encoder = true; 9482 9483 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 9484 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 9485 FDI_DP_PORT_WIDTH_SHIFT) + 1; 9486 9487 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9488 } 9489 } 9490 9491 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 9492 struct intel_crtc_state *pipe_config) 9493 { 9494 struct drm_device *dev = crtc->base.dev; 9495 struct drm_i915_private *dev_priv = dev->dev_private; 9496 enum intel_display_power_domain pfit_domain; 9497 uint32_t tmp; 9498 9499 if (!intel_display_power_is_enabled(dev_priv, 9500 POWER_DOMAIN_PIPE(crtc->pipe))) 9501 return false; 9502 9503 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9504 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 9505 9506 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 9507 if (tmp & TRANS_DDI_FUNC_ENABLE) { 9508 enum i915_pipe trans_edp_pipe; 9509 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 9510 default: 9511 WARN(1, "unknown pipe linked to edp transcoder\n"); 9512 case TRANS_DDI_EDP_INPUT_A_ONOFF: 9513 case TRANS_DDI_EDP_INPUT_A_ON: 9514 trans_edp_pipe = PIPE_A; 9515 break; 9516 case TRANS_DDI_EDP_INPUT_B_ONOFF: 9517 trans_edp_pipe = PIPE_B; 9518 break; 9519 case TRANS_DDI_EDP_INPUT_C_ONOFF: 9520 trans_edp_pipe = PIPE_C; 9521 break; 9522 } 9523 9524 if (trans_edp_pipe == crtc->pipe) 9525 pipe_config->cpu_transcoder = TRANSCODER_EDP; 9526 } 9527 9528 if (!intel_display_power_is_enabled(dev_priv, 9529 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 9530 return false; 9531 9532 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 9533 if (!(tmp & PIPECONF_ENABLE)) 9534 return false; 9535 9536 haswell_get_ddi_port_state(crtc, pipe_config); 9537 9538 intel_get_pipe_timings(crtc, pipe_config); 9539 9540 if (INTEL_INFO(dev)->gen >= 9) { 9541 skl_init_scalers(dev, crtc, pipe_config); 9542 } 9543 9544 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9545 9546 if (INTEL_INFO(dev)->gen >= 9) { 9547 pipe_config->scaler_state.scaler_id = -1; 9548 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9549 } 9550 9551 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 9552 if (INTEL_INFO(dev)->gen == 9) 9553 skylake_get_pfit_config(crtc, pipe_config); 9554 else if (INTEL_INFO(dev)->gen < 9) 9555 ironlake_get_pfit_config(crtc, pipe_config); 9556 else 9557 MISSING_CASE(INTEL_INFO(dev)->gen); 9558 } 9559 9560 if (IS_HASWELL(dev)) 9561 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 9562 (I915_READ(IPS_CTL) & IPS_ENABLE); 9563 9564 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 9565 pipe_config->pixel_multiplier = 9566 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 9567 } else { 9568 pipe_config->pixel_multiplier = 1; 9569 } 9570 9571 return true; 9572 } 9573 9574 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 9575 { 9576 struct drm_device *dev = crtc->dev; 9577 struct drm_i915_private *dev_priv = dev->dev_private; 9578 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9579 uint32_t cntl = 0, size = 0; 9580 9581 if (base) { 9582 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 9583 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 9584 unsigned int stride = roundup_pow_of_two(width) * 4; 9585 9586 switch (stride) { 9587 default: 9588 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 9589 width, stride); 9590 stride = 256; 9591 /* fallthrough */ 9592 case 256: 9593 case 512: 9594 case 1024: 9595 case 2048: 9596 break; 9597 } 9598 9599 cntl |= CURSOR_ENABLE | 9600 CURSOR_GAMMA_ENABLE | 9601 CURSOR_FORMAT_ARGB | 9602 CURSOR_STRIDE(stride); 9603 9604 size = (height << 12) | width; 9605 } 9606 9607 if (intel_crtc->cursor_cntl != 0 && 9608 (intel_crtc->cursor_base != base || 9609 intel_crtc->cursor_size != size || 9610 intel_crtc->cursor_cntl != cntl)) { 9611 /* On these chipsets we can only modify the base/size/stride 9612 * whilst the cursor is disabled. 9613 */ 9614 I915_WRITE(_CURACNTR, 0); 9615 POSTING_READ(_CURACNTR); 9616 intel_crtc->cursor_cntl = 0; 9617 } 9618 9619 if (intel_crtc->cursor_base != base) { 9620 I915_WRITE(_CURABASE, base); 9621 intel_crtc->cursor_base = base; 9622 } 9623 9624 if (intel_crtc->cursor_size != size) { 9625 I915_WRITE(CURSIZE, size); 9626 intel_crtc->cursor_size = size; 9627 } 9628 9629 if (intel_crtc->cursor_cntl != cntl) { 9630 I915_WRITE(_CURACNTR, cntl); 9631 POSTING_READ(_CURACNTR); 9632 intel_crtc->cursor_cntl = cntl; 9633 } 9634 } 9635 9636 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 9637 { 9638 struct drm_device *dev = crtc->dev; 9639 struct drm_i915_private *dev_priv = dev->dev_private; 9640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9641 int pipe = intel_crtc->pipe; 9642 uint32_t cntl; 9643 9644 cntl = 0; 9645 if (base) { 9646 cntl = MCURSOR_GAMMA_ENABLE; 9647 switch (intel_crtc->base.cursor->state->crtc_w) { 9648 case 64: 9649 cntl |= CURSOR_MODE_64_ARGB_AX; 9650 break; 9651 case 128: 9652 cntl |= CURSOR_MODE_128_ARGB_AX; 9653 break; 9654 case 256: 9655 cntl |= CURSOR_MODE_256_ARGB_AX; 9656 break; 9657 default: 9658 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); 9659 return; 9660 } 9661 cntl |= pipe << 28; /* Connect to correct pipe */ 9662 9663 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 9664 cntl |= CURSOR_PIPE_CSC_ENABLE; 9665 } 9666 9667 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) 9668 cntl |= CURSOR_ROTATE_180; 9669 9670 if (intel_crtc->cursor_cntl != cntl) { 9671 I915_WRITE(CURCNTR(pipe), cntl); 9672 POSTING_READ(CURCNTR(pipe)); 9673 intel_crtc->cursor_cntl = cntl; 9674 } 9675 9676 /* and commit changes on next vblank */ 9677 I915_WRITE(CURBASE(pipe), base); 9678 POSTING_READ(CURBASE(pipe)); 9679 9680 intel_crtc->cursor_base = base; 9681 } 9682 9683 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 9684 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 9685 bool on) 9686 { 9687 struct drm_device *dev = crtc->dev; 9688 struct drm_i915_private *dev_priv = dev->dev_private; 9689 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9690 int pipe = intel_crtc->pipe; 9691 int x = crtc->cursor_x; 9692 int y = crtc->cursor_y; 9693 u32 base = 0, pos = 0; 9694 9695 if (on) 9696 base = intel_crtc->cursor_addr; 9697 9698 if (x >= intel_crtc->config->pipe_src_w) 9699 base = 0; 9700 9701 if (y >= intel_crtc->config->pipe_src_h) 9702 base = 0; 9703 9704 if (x < 0) { 9705 if (x + intel_crtc->base.cursor->state->crtc_w <= 0) 9706 base = 0; 9707 9708 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 9709 x = -x; 9710 } 9711 pos |= x << CURSOR_X_SHIFT; 9712 9713 if (y < 0) { 9714 if (y + intel_crtc->base.cursor->state->crtc_h <= 0) 9715 base = 0; 9716 9717 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 9718 y = -y; 9719 } 9720 pos |= y << CURSOR_Y_SHIFT; 9721 9722 if (base == 0 && intel_crtc->cursor_base == 0) 9723 return; 9724 9725 I915_WRITE(CURPOS(pipe), pos); 9726 9727 /* ILK+ do this automagically */ 9728 if (HAS_GMCH_DISPLAY(dev) && 9729 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 9730 base += (intel_crtc->base.cursor->state->crtc_h * 9731 intel_crtc->base.cursor->state->crtc_w - 1) * 4; 9732 } 9733 9734 if (IS_845G(dev) || IS_I865G(dev)) 9735 i845_update_cursor(crtc, base); 9736 else 9737 i9xx_update_cursor(crtc, base); 9738 } 9739 9740 static bool cursor_size_ok(struct drm_device *dev, 9741 uint32_t width, uint32_t height) 9742 { 9743 if (width == 0 || height == 0) 9744 return false; 9745 9746 /* 9747 * 845g/865g are special in that they are only limited by 9748 * the width of their cursors, the height is arbitrary up to 9749 * the precision of the register. Everything else requires 9750 * square cursors, limited to a few power-of-two sizes. 9751 */ 9752 if (IS_845G(dev) || IS_I865G(dev)) { 9753 if ((width & 63) != 0) 9754 return false; 9755 9756 if (width > (IS_845G(dev) ? 64 : 512)) 9757 return false; 9758 9759 if (height > 1023) 9760 return false; 9761 } else { 9762 switch (width | height) { 9763 case 256: 9764 case 128: 9765 if (IS_GEN2(dev)) 9766 return false; 9767 case 64: 9768 break; 9769 default: 9770 return false; 9771 } 9772 } 9773 9774 return true; 9775 } 9776 9777 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 9778 u16 *blue, uint32_t start, uint32_t size) 9779 { 9780 int end = (start + size > 256) ? 256 : start + size, i; 9781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9782 9783 for (i = start; i < end; i++) { 9784 intel_crtc->lut_r[i] = red[i] >> 8; 9785 intel_crtc->lut_g[i] = green[i] >> 8; 9786 intel_crtc->lut_b[i] = blue[i] >> 8; 9787 } 9788 9789 intel_crtc_load_lut(crtc); 9790 } 9791 9792 /* VESA 640x480x72Hz mode to set on the pipe */ 9793 static struct drm_display_mode load_detect_mode = { 9794 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 9795 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 9796 }; 9797 9798 struct drm_framebuffer * 9799 __intel_framebuffer_create(struct drm_device *dev, 9800 struct drm_mode_fb_cmd2 *mode_cmd, 9801 struct drm_i915_gem_object *obj) 9802 { 9803 struct intel_framebuffer *intel_fb; 9804 int ret; 9805 9806 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 9807 if (!intel_fb) { 9808 drm_gem_object_unreference(&obj->base); 9809 return ERR_PTR(-ENOMEM); 9810 } 9811 9812 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 9813 if (ret) 9814 goto err; 9815 9816 return &intel_fb->base; 9817 err: 9818 drm_gem_object_unreference(&obj->base); 9819 kfree(intel_fb); 9820 9821 return ERR_PTR(ret); 9822 } 9823 9824 static struct drm_framebuffer * 9825 intel_framebuffer_create(struct drm_device *dev, 9826 struct drm_mode_fb_cmd2 *mode_cmd, 9827 struct drm_i915_gem_object *obj) 9828 { 9829 struct drm_framebuffer *fb; 9830 int ret; 9831 9832 ret = i915_mutex_lock_interruptible(dev); 9833 if (ret) 9834 return ERR_PTR(ret); 9835 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 9836 mutex_unlock(&dev->struct_mutex); 9837 9838 return fb; 9839 } 9840 9841 static u32 9842 intel_framebuffer_pitch_for_width(int width, int bpp) 9843 { 9844 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 9845 return ALIGN(pitch, 64); 9846 } 9847 9848 static u32 9849 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 9850 { 9851 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 9852 return PAGE_ALIGN(pitch * mode->vdisplay); 9853 } 9854 9855 static struct drm_framebuffer * 9856 intel_framebuffer_create_for_mode(struct drm_device *dev, 9857 struct drm_display_mode *mode, 9858 int depth, int bpp) 9859 { 9860 struct drm_i915_gem_object *obj; 9861 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 9862 9863 obj = i915_gem_alloc_object(dev, 9864 intel_framebuffer_size_for_mode(mode, bpp)); 9865 if (obj == NULL) 9866 return ERR_PTR(-ENOMEM); 9867 9868 mode_cmd.width = mode->hdisplay; 9869 mode_cmd.height = mode->vdisplay; 9870 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 9871 bpp); 9872 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 9873 9874 return intel_framebuffer_create(dev, &mode_cmd, obj); 9875 } 9876 9877 static struct drm_framebuffer * 9878 mode_fits_in_fbdev(struct drm_device *dev, 9879 struct drm_display_mode *mode) 9880 { 9881 #ifdef CONFIG_DRM_I915_FBDEV 9882 struct drm_i915_private *dev_priv = dev->dev_private; 9883 struct drm_i915_gem_object *obj; 9884 struct drm_framebuffer *fb; 9885 9886 if (!dev_priv->fbdev) 9887 return NULL; 9888 9889 if (!dev_priv->fbdev->fb) 9890 return NULL; 9891 9892 obj = dev_priv->fbdev->fb->obj; 9893 BUG_ON(!obj); 9894 9895 fb = &dev_priv->fbdev->fb->base; 9896 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 9897 fb->bits_per_pixel)) 9898 return NULL; 9899 9900 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 9901 return NULL; 9902 9903 return fb; 9904 #else 9905 return NULL; 9906 #endif 9907 } 9908 9909 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state, 9910 struct drm_crtc *crtc, 9911 struct drm_display_mode *mode, 9912 struct drm_framebuffer *fb, 9913 int x, int y) 9914 { 9915 struct drm_plane_state *plane_state; 9916 int hdisplay, vdisplay; 9917 int ret; 9918 9919 plane_state = drm_atomic_get_plane_state(state, crtc->primary); 9920 if (IS_ERR(plane_state)) 9921 return PTR_ERR(plane_state); 9922 9923 if (mode) 9924 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 9925 else 9926 hdisplay = vdisplay = 0; 9927 9928 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL); 9929 if (ret) 9930 return ret; 9931 drm_atomic_set_fb_for_plane(plane_state, fb); 9932 plane_state->crtc_x = 0; 9933 plane_state->crtc_y = 0; 9934 plane_state->crtc_w = hdisplay; 9935 plane_state->crtc_h = vdisplay; 9936 plane_state->src_x = x << 16; 9937 plane_state->src_y = y << 16; 9938 plane_state->src_w = hdisplay << 16; 9939 plane_state->src_h = vdisplay << 16; 9940 9941 return 0; 9942 } 9943 9944 bool intel_get_load_detect_pipe(struct drm_connector *connector, 9945 struct drm_display_mode *mode, 9946 struct intel_load_detect_pipe *old, 9947 struct drm_modeset_acquire_ctx *ctx) 9948 { 9949 struct intel_crtc *intel_crtc; 9950 struct intel_encoder *intel_encoder = 9951 intel_attached_encoder(connector); 9952 struct drm_crtc *possible_crtc; 9953 struct drm_encoder *encoder = &intel_encoder->base; 9954 struct drm_crtc *crtc = NULL; 9955 struct drm_device *dev = encoder->dev; 9956 struct drm_framebuffer *fb; 9957 struct drm_mode_config *config = &dev->mode_config; 9958 struct drm_atomic_state *state = NULL; 9959 struct drm_connector_state *connector_state; 9960 struct intel_crtc_state *crtc_state; 9961 int ret, i = -1; 9962 9963 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9964 connector->base.id, connector->name, 9965 encoder->base.id, encoder->name); 9966 9967 retry: 9968 ret = drm_modeset_lock(&config->connection_mutex, ctx); 9969 if (ret) 9970 goto fail_unlock; 9971 9972 /* 9973 * Algorithm gets a little messy: 9974 * 9975 * - if the connector already has an assigned crtc, use it (but make 9976 * sure it's on first) 9977 * 9978 * - try to find the first unused crtc that can drive this connector, 9979 * and use that if we find one 9980 */ 9981 9982 /* See if we already have a CRTC for this connector */ 9983 if (encoder->crtc) { 9984 crtc = encoder->crtc; 9985 9986 ret = drm_modeset_lock(&crtc->mutex, ctx); 9987 if (ret) 9988 goto fail_unlock; 9989 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9990 if (ret) 9991 goto fail_unlock; 9992 9993 old->dpms_mode = connector->dpms; 9994 old->load_detect_temp = false; 9995 9996 /* Make sure the crtc and connector are running */ 9997 if (connector->dpms != DRM_MODE_DPMS_ON) 9998 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 9999 10000 return true; 10001 } 10002 10003 /* Find an unused one (if possible) */ 10004 for_each_crtc(dev, possible_crtc) { 10005 i++; 10006 if (!(encoder->possible_crtcs & (1 << i))) 10007 continue; 10008 if (possible_crtc->state->enable) 10009 continue; 10010 /* This can occur when applying the pipe A quirk on resume. */ 10011 if (to_intel_crtc(possible_crtc)->new_enabled) 10012 continue; 10013 10014 crtc = possible_crtc; 10015 break; 10016 } 10017 10018 /* 10019 * If we didn't find an unused CRTC, don't use any. 10020 */ 10021 if (!crtc) { 10022 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 10023 goto fail_unlock; 10024 } 10025 10026 ret = drm_modeset_lock(&crtc->mutex, ctx); 10027 if (ret) 10028 goto fail_unlock; 10029 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 10030 if (ret) 10031 goto fail_unlock; 10032 intel_encoder->new_crtc = to_intel_crtc(crtc); 10033 to_intel_connector(connector)->new_encoder = intel_encoder; 10034 10035 intel_crtc = to_intel_crtc(crtc); 10036 intel_crtc->new_enabled = true; 10037 old->dpms_mode = connector->dpms; 10038 old->load_detect_temp = true; 10039 old->release_fb = NULL; 10040 10041 state = drm_atomic_state_alloc(dev); 10042 if (!state) 10043 return false; 10044 10045 state->acquire_ctx = ctx; 10046 10047 connector_state = drm_atomic_get_connector_state(state, connector); 10048 if (IS_ERR(connector_state)) { 10049 ret = PTR_ERR(connector_state); 10050 goto fail; 10051 } 10052 10053 connector_state->crtc = crtc; 10054 connector_state->best_encoder = &intel_encoder->base; 10055 10056 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10057 if (IS_ERR(crtc_state)) { 10058 ret = PTR_ERR(crtc_state); 10059 goto fail; 10060 } 10061 10062 crtc_state->base.active = crtc_state->base.enable = true; 10063 10064 if (!mode) 10065 mode = &load_detect_mode; 10066 10067 /* We need a framebuffer large enough to accommodate all accesses 10068 * that the plane may generate whilst we perform load detection. 10069 * We can not rely on the fbcon either being present (we get called 10070 * during its initialisation to detect all boot displays, or it may 10071 * not even exist) or that it is large enough to satisfy the 10072 * requested mode. 10073 */ 10074 fb = mode_fits_in_fbdev(dev, mode); 10075 if (fb == NULL) { 10076 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 10077 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 10078 old->release_fb = fb; 10079 } else 10080 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 10081 if (IS_ERR(fb)) { 10082 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 10083 goto fail; 10084 } 10085 10086 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 10087 if (ret) 10088 goto fail; 10089 10090 drm_mode_copy(&crtc_state->base.mode, mode); 10091 10092 if (intel_set_mode(crtc, state, true)) { 10093 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 10094 if (old->release_fb) 10095 old->release_fb->funcs->destroy(old->release_fb); 10096 goto fail; 10097 } 10098 crtc->primary->crtc = crtc; 10099 10100 /* let the connector get through one full cycle before testing */ 10101 intel_wait_for_vblank(dev, intel_crtc->pipe); 10102 return true; 10103 10104 fail: 10105 intel_crtc->new_enabled = crtc->state->enable; 10106 fail_unlock: 10107 drm_atomic_state_free(state); 10108 state = NULL; 10109 10110 if (ret == -EDEADLK) { 10111 drm_modeset_backoff(ctx); 10112 goto retry; 10113 } 10114 10115 return false; 10116 } 10117 10118 void intel_release_load_detect_pipe(struct drm_connector *connector, 10119 struct intel_load_detect_pipe *old, 10120 struct drm_modeset_acquire_ctx *ctx) 10121 { 10122 struct drm_device *dev = connector->dev; 10123 struct intel_encoder *intel_encoder = 10124 intel_attached_encoder(connector); 10125 struct drm_encoder *encoder = &intel_encoder->base; 10126 struct drm_crtc *crtc = encoder->crtc; 10127 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10128 struct drm_atomic_state *state; 10129 struct drm_connector_state *connector_state; 10130 struct intel_crtc_state *crtc_state; 10131 int ret; 10132 10133 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 10134 connector->base.id, connector->name, 10135 encoder->base.id, encoder->name); 10136 10137 if (old->load_detect_temp) { 10138 state = drm_atomic_state_alloc(dev); 10139 if (!state) 10140 goto fail; 10141 10142 state->acquire_ctx = ctx; 10143 10144 connector_state = drm_atomic_get_connector_state(state, connector); 10145 if (IS_ERR(connector_state)) 10146 goto fail; 10147 10148 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc); 10149 if (IS_ERR(crtc_state)) 10150 goto fail; 10151 10152 to_intel_connector(connector)->new_encoder = NULL; 10153 intel_encoder->new_crtc = NULL; 10154 intel_crtc->new_enabled = false; 10155 10156 connector_state->best_encoder = NULL; 10157 connector_state->crtc = NULL; 10158 10159 crtc_state->base.enable = crtc_state->base.active = false; 10160 10161 ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL, 10162 0, 0); 10163 if (ret) 10164 goto fail; 10165 10166 ret = intel_set_mode(crtc, state, true); 10167 if (ret) 10168 goto fail; 10169 10170 if (old->release_fb) { 10171 drm_framebuffer_unregister_private(old->release_fb); 10172 drm_framebuffer_unreference(old->release_fb); 10173 } 10174 10175 return; 10176 } 10177 10178 /* Switch crtc and encoder back off if necessary */ 10179 if (old->dpms_mode != DRM_MODE_DPMS_ON) 10180 connector->funcs->dpms(connector, old->dpms_mode); 10181 10182 return; 10183 fail: 10184 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); 10185 drm_atomic_state_free(state); 10186 } 10187 10188 static int i9xx_pll_refclk(struct drm_device *dev, 10189 const struct intel_crtc_state *pipe_config) 10190 { 10191 struct drm_i915_private *dev_priv = dev->dev_private; 10192 u32 dpll = pipe_config->dpll_hw_state.dpll; 10193 10194 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10195 return dev_priv->vbt.lvds_ssc_freq; 10196 else if (HAS_PCH_SPLIT(dev)) 10197 return 120000; 10198 else if (!IS_GEN2(dev)) 10199 return 96000; 10200 else 10201 return 48000; 10202 } 10203 10204 /* Returns the clock of the currently programmed mode of the given pipe. */ 10205 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 10206 struct intel_crtc_state *pipe_config) 10207 { 10208 struct drm_device *dev = crtc->base.dev; 10209 struct drm_i915_private *dev_priv = dev->dev_private; 10210 int pipe = pipe_config->cpu_transcoder; 10211 u32 dpll = pipe_config->dpll_hw_state.dpll; 10212 u32 fp; 10213 intel_clock_t clock; 10214 int refclk = i9xx_pll_refclk(dev, pipe_config); 10215 10216 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 10217 fp = pipe_config->dpll_hw_state.fp0; 10218 else 10219 fp = pipe_config->dpll_hw_state.fp1; 10220 10221 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 10222 if (IS_PINEVIEW(dev)) { 10223 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 10224 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 10225 } else { 10226 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 10227 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 10228 } 10229 10230 if (!IS_GEN2(dev)) { 10231 if (IS_PINEVIEW(dev)) 10232 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 10233 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 10234 else 10235 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 10236 DPLL_FPA01_P1_POST_DIV_SHIFT); 10237 10238 switch (dpll & DPLL_MODE_MASK) { 10239 case DPLLB_MODE_DAC_SERIAL: 10240 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 10241 5 : 10; 10242 break; 10243 case DPLLB_MODE_LVDS: 10244 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 10245 7 : 14; 10246 break; 10247 default: 10248 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 10249 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 10250 return; 10251 } 10252 10253 if (IS_PINEVIEW(dev)) 10254 pineview_clock(refclk, &clock); 10255 else 10256 i9xx_clock(refclk, &clock); 10257 } else { 10258 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 10259 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 10260 10261 if (is_lvds) { 10262 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 10263 DPLL_FPA01_P1_POST_DIV_SHIFT); 10264 10265 if (lvds & LVDS_CLKB_POWER_UP) 10266 clock.p2 = 7; 10267 else 10268 clock.p2 = 14; 10269 } else { 10270 if (dpll & PLL_P1_DIVIDE_BY_TWO) 10271 clock.p1 = 2; 10272 else { 10273 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 10274 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 10275 } 10276 if (dpll & PLL_P2_DIVIDE_BY_4) 10277 clock.p2 = 4; 10278 else 10279 clock.p2 = 2; 10280 } 10281 10282 i9xx_clock(refclk, &clock); 10283 } 10284 10285 /* 10286 * This value includes pixel_multiplier. We will use 10287 * port_clock to compute adjusted_mode.crtc_clock in the 10288 * encoder's get_config() function. 10289 */ 10290 pipe_config->port_clock = clock.dot; 10291 } 10292 10293 int intel_dotclock_calculate(int link_freq, 10294 const struct intel_link_m_n *m_n) 10295 { 10296 /* 10297 * The calculation for the data clock is: 10298 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 10299 * But we want to avoid losing precison if possible, so: 10300 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 10301 * 10302 * and the link clock is simpler: 10303 * link_clock = (m * link_clock) / n 10304 */ 10305 10306 if (!m_n->link_n) 10307 return 0; 10308 10309 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 10310 } 10311 10312 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 10313 struct intel_crtc_state *pipe_config) 10314 { 10315 struct drm_device *dev = crtc->base.dev; 10316 10317 /* read out port_clock from the DPLL */ 10318 i9xx_crtc_clock_get(crtc, pipe_config); 10319 10320 /* 10321 * This value does not include pixel_multiplier. 10322 * We will check that port_clock and adjusted_mode.crtc_clock 10323 * agree once we know their relationship in the encoder's 10324 * get_config() function. 10325 */ 10326 pipe_config->base.adjusted_mode.crtc_clock = 10327 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 10328 &pipe_config->fdi_m_n); 10329 } 10330 10331 /** Returns the currently programmed mode of the given pipe. */ 10332 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10333 struct drm_crtc *crtc) 10334 { 10335 struct drm_i915_private *dev_priv = dev->dev_private; 10336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10337 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10338 struct drm_display_mode *mode; 10339 struct intel_crtc_state pipe_config; 10340 int htot = I915_READ(HTOTAL(cpu_transcoder)); 10341 int hsync = I915_READ(HSYNC(cpu_transcoder)); 10342 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 10343 int vsync = I915_READ(VSYNC(cpu_transcoder)); 10344 enum i915_pipe pipe = intel_crtc->pipe; 10345 10346 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 10347 if (!mode) 10348 return NULL; 10349 10350 /* 10351 * Construct a pipe_config sufficient for getting the clock info 10352 * back out of crtc_clock_get. 10353 * 10354 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 10355 * to use a real value here instead. 10356 */ 10357 pipe_config.cpu_transcoder = (enum transcoder) pipe; 10358 pipe_config.pixel_multiplier = 1; 10359 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 10360 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 10361 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 10362 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 10363 10364 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 10365 mode->hdisplay = (htot & 0xffff) + 1; 10366 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 10367 mode->hsync_start = (hsync & 0xffff) + 1; 10368 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 10369 mode->vdisplay = (vtot & 0xffff) + 1; 10370 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 10371 mode->vsync_start = (vsync & 0xffff) + 1; 10372 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 10373 10374 drm_mode_set_name(mode); 10375 10376 return mode; 10377 } 10378 10379 static void intel_decrease_pllclock(struct drm_crtc *crtc) 10380 { 10381 struct drm_device *dev = crtc->dev; 10382 struct drm_i915_private *dev_priv = dev->dev_private; 10383 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10384 10385 if (!HAS_GMCH_DISPLAY(dev)) 10386 return; 10387 10388 if (!dev_priv->lvds_downclock_avail) 10389 return; 10390 10391 /* 10392 * Since this is called by a timer, we should never get here in 10393 * the manual case. 10394 */ 10395 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 10396 int pipe = intel_crtc->pipe; 10397 int dpll_reg = DPLL(pipe); 10398 int dpll; 10399 10400 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 10401 10402 assert_panel_unlocked(dev_priv, pipe); 10403 10404 dpll = I915_READ(dpll_reg); 10405 dpll |= DISPLAY_RATE_SELECT_FPA1; 10406 I915_WRITE(dpll_reg, dpll); 10407 intel_wait_for_vblank(dev, pipe); 10408 dpll = I915_READ(dpll_reg); 10409 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 10410 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 10411 } 10412 10413 } 10414 10415 void intel_mark_busy(struct drm_device *dev) 10416 { 10417 struct drm_i915_private *dev_priv = dev->dev_private; 10418 10419 if (dev_priv->mm.busy) 10420 return; 10421 10422 intel_runtime_pm_get(dev_priv); 10423 i915_update_gfx_val(dev_priv); 10424 if (INTEL_INFO(dev)->gen >= 6) 10425 gen6_rps_busy(dev_priv); 10426 dev_priv->mm.busy = true; 10427 } 10428 10429 void intel_mark_idle(struct drm_device *dev) 10430 { 10431 struct drm_i915_private *dev_priv = dev->dev_private; 10432 struct drm_crtc *crtc; 10433 10434 if (!dev_priv->mm.busy) 10435 return; 10436 10437 dev_priv->mm.busy = false; 10438 10439 for_each_crtc(dev, crtc) { 10440 if (!crtc->primary->fb) 10441 continue; 10442 10443 intel_decrease_pllclock(crtc); 10444 } 10445 10446 if (INTEL_INFO(dev)->gen >= 6) 10447 gen6_rps_idle(dev->dev_private); 10448 10449 intel_runtime_pm_put(dev_priv); 10450 } 10451 10452 static void intel_crtc_destroy(struct drm_crtc *crtc) 10453 { 10454 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10455 struct drm_device *dev = crtc->dev; 10456 struct intel_unpin_work *work; 10457 10458 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10459 work = intel_crtc->unpin_work; 10460 intel_crtc->unpin_work = NULL; 10461 lockmgr(&dev->event_lock, LK_RELEASE); 10462 10463 if (work) { 10464 cancel_work_sync(&work->work); 10465 kfree(work); 10466 } 10467 10468 drm_crtc_cleanup(crtc); 10469 10470 kfree(intel_crtc); 10471 } 10472 10473 static void intel_unpin_work_fn(struct work_struct *__work) 10474 { 10475 struct intel_unpin_work *work = 10476 container_of(__work, struct intel_unpin_work, work); 10477 struct drm_device *dev = work->crtc->dev; 10478 enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; 10479 10480 mutex_lock(&dev->struct_mutex); 10481 intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state); 10482 drm_gem_object_unreference(&work->pending_flip_obj->base); 10483 10484 intel_fbc_update(dev); 10485 10486 if (work->flip_queued_req) 10487 i915_gem_request_assign(&work->flip_queued_req, NULL); 10488 mutex_unlock(&dev->struct_mutex); 10489 10490 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 10491 drm_framebuffer_unreference(work->old_fb); 10492 10493 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 10494 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 10495 10496 kfree(work); 10497 } 10498 10499 static void do_intel_finish_page_flip(struct drm_device *dev, 10500 struct drm_crtc *crtc) 10501 { 10502 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10503 struct intel_unpin_work *work; 10504 10505 /* Ignore early vblank irqs */ 10506 if (intel_crtc == NULL) 10507 return; 10508 10509 /* 10510 * This is called both by irq handlers and the reset code (to complete 10511 * lost pageflips) so needs the full irqsave spinlocks. 10512 */ 10513 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10514 work = intel_crtc->unpin_work; 10515 10516 /* Ensure we don't miss a work->pending update ... */ 10517 smp_rmb(); 10518 10519 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 10520 lockmgr(&dev->event_lock, LK_RELEASE); 10521 return; 10522 } 10523 10524 page_flip_completed(intel_crtc); 10525 10526 lockmgr(&dev->event_lock, LK_RELEASE); 10527 } 10528 10529 void intel_finish_page_flip(struct drm_device *dev, int pipe) 10530 { 10531 struct drm_i915_private *dev_priv = dev->dev_private; 10532 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 10533 10534 do_intel_finish_page_flip(dev, crtc); 10535 } 10536 10537 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 10538 { 10539 struct drm_i915_private *dev_priv = dev->dev_private; 10540 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 10541 10542 do_intel_finish_page_flip(dev, crtc); 10543 } 10544 10545 /* Is 'a' after or equal to 'b'? */ 10546 static bool g4x_flip_count_after_eq(u32 a, u32 b) 10547 { 10548 return !((a - b) & 0x80000000); 10549 } 10550 10551 static bool page_flip_finished(struct intel_crtc *crtc) 10552 { 10553 struct drm_device *dev = crtc->base.dev; 10554 struct drm_i915_private *dev_priv = dev->dev_private; 10555 10556 if (i915_reset_in_progress(&dev_priv->gpu_error) || 10557 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 10558 return true; 10559 10560 /* 10561 * The relevant registers doen't exist on pre-ctg. 10562 * As the flip done interrupt doesn't trigger for mmio 10563 * flips on gmch platforms, a flip count check isn't 10564 * really needed there. But since ctg has the registers, 10565 * include it in the check anyway. 10566 */ 10567 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 10568 return true; 10569 10570 /* 10571 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 10572 * used the same base address. In that case the mmio flip might 10573 * have completed, but the CS hasn't even executed the flip yet. 10574 * 10575 * A flip count check isn't enough as the CS might have updated 10576 * the base address just after start of vblank, but before we 10577 * managed to process the interrupt. This means we'd complete the 10578 * CS flip too soon. 10579 * 10580 * Combining both checks should get us a good enough result. It may 10581 * still happen that the CS flip has been executed, but has not 10582 * yet actually completed. But in case the base address is the same 10583 * anyway, we don't really care. 10584 */ 10585 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 10586 crtc->unpin_work->gtt_offset && 10587 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 10588 crtc->unpin_work->flip_count); 10589 } 10590 10591 void intel_prepare_page_flip(struct drm_device *dev, int plane) 10592 { 10593 struct drm_i915_private *dev_priv = dev->dev_private; 10594 struct intel_crtc *intel_crtc = 10595 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 10596 10597 10598 /* 10599 * This is called both by irq handlers and the reset code (to complete 10600 * lost pageflips) so needs the full irqsave spinlocks. 10601 * 10602 * NB: An MMIO update of the plane base pointer will also 10603 * generate a page-flip completion irq, i.e. every modeset 10604 * is also accompanied by a spurious intel_prepare_page_flip(). 10605 */ 10606 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10607 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 10608 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 10609 lockmgr(&dev->event_lock, LK_RELEASE); 10610 } 10611 10612 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 10613 { 10614 /* Ensure that the work item is consistent when activating it ... */ 10615 smp_wmb(); 10616 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 10617 /* and that it is marked active as soon as the irq could fire. */ 10618 smp_wmb(); 10619 } 10620 10621 static int intel_gen2_queue_flip(struct drm_device *dev, 10622 struct drm_crtc *crtc, 10623 struct drm_framebuffer *fb, 10624 struct drm_i915_gem_object *obj, 10625 struct intel_engine_cs *ring, 10626 uint32_t flags) 10627 { 10628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10629 u32 flip_mask; 10630 int ret; 10631 10632 ret = intel_ring_begin(ring, 6); 10633 if (ret) 10634 return ret; 10635 10636 /* Can't queue multiple flips, so wait for the previous 10637 * one to finish before executing the next. 10638 */ 10639 if (intel_crtc->plane) 10640 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10641 else 10642 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10643 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 10644 intel_ring_emit(ring, MI_NOOP); 10645 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10646 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10647 intel_ring_emit(ring, fb->pitches[0]); 10648 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10649 intel_ring_emit(ring, 0); /* aux display base address, unused */ 10650 10651 intel_mark_page_flip_active(intel_crtc); 10652 __intel_ring_advance(ring); 10653 return 0; 10654 } 10655 10656 static int intel_gen3_queue_flip(struct drm_device *dev, 10657 struct drm_crtc *crtc, 10658 struct drm_framebuffer *fb, 10659 struct drm_i915_gem_object *obj, 10660 struct intel_engine_cs *ring, 10661 uint32_t flags) 10662 { 10663 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10664 u32 flip_mask; 10665 int ret; 10666 10667 ret = intel_ring_begin(ring, 6); 10668 if (ret) 10669 return ret; 10670 10671 if (intel_crtc->plane) 10672 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 10673 else 10674 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 10675 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 10676 intel_ring_emit(ring, MI_NOOP); 10677 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 10678 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10679 intel_ring_emit(ring, fb->pitches[0]); 10680 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10681 intel_ring_emit(ring, MI_NOOP); 10682 10683 intel_mark_page_flip_active(intel_crtc); 10684 __intel_ring_advance(ring); 10685 return 0; 10686 } 10687 10688 static int intel_gen4_queue_flip(struct drm_device *dev, 10689 struct drm_crtc *crtc, 10690 struct drm_framebuffer *fb, 10691 struct drm_i915_gem_object *obj, 10692 struct intel_engine_cs *ring, 10693 uint32_t flags) 10694 { 10695 struct drm_i915_private *dev_priv = dev->dev_private; 10696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10697 uint32_t pf, pipesrc; 10698 int ret; 10699 10700 ret = intel_ring_begin(ring, 4); 10701 if (ret) 10702 return ret; 10703 10704 /* i965+ uses the linear or tiled offsets from the 10705 * Display Registers (which do not change across a page-flip) 10706 * so we need only reprogram the base address. 10707 */ 10708 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10709 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10710 intel_ring_emit(ring, fb->pitches[0]); 10711 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 10712 obj->tiling_mode); 10713 10714 /* XXX Enabling the panel-fitter across page-flip is so far 10715 * untested on non-native modes, so ignore it for now. 10716 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 10717 */ 10718 pf = 0; 10719 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10720 intel_ring_emit(ring, pf | pipesrc); 10721 10722 intel_mark_page_flip_active(intel_crtc); 10723 __intel_ring_advance(ring); 10724 return 0; 10725 } 10726 10727 static int intel_gen6_queue_flip(struct drm_device *dev, 10728 struct drm_crtc *crtc, 10729 struct drm_framebuffer *fb, 10730 struct drm_i915_gem_object *obj, 10731 struct intel_engine_cs *ring, 10732 uint32_t flags) 10733 { 10734 struct drm_i915_private *dev_priv = dev->dev_private; 10735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10736 uint32_t pf, pipesrc; 10737 int ret; 10738 10739 ret = intel_ring_begin(ring, 4); 10740 if (ret) 10741 return ret; 10742 10743 intel_ring_emit(ring, MI_DISPLAY_FLIP | 10744 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 10745 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 10746 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10747 10748 /* Contrary to the suggestions in the documentation, 10749 * "Enable Panel Fitter" does not seem to be required when page 10750 * flipping with a non-native mode, and worse causes a normal 10751 * modeset to fail. 10752 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 10753 */ 10754 pf = 0; 10755 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 10756 intel_ring_emit(ring, pf | pipesrc); 10757 10758 intel_mark_page_flip_active(intel_crtc); 10759 __intel_ring_advance(ring); 10760 return 0; 10761 } 10762 10763 static int intel_gen7_queue_flip(struct drm_device *dev, 10764 struct drm_crtc *crtc, 10765 struct drm_framebuffer *fb, 10766 struct drm_i915_gem_object *obj, 10767 struct intel_engine_cs *ring, 10768 uint32_t flags) 10769 { 10770 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10771 uint32_t plane_bit = 0; 10772 int len, ret; 10773 10774 switch (intel_crtc->plane) { 10775 case PLANE_A: 10776 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 10777 break; 10778 case PLANE_B: 10779 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 10780 break; 10781 case PLANE_C: 10782 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 10783 break; 10784 default: 10785 WARN_ONCE(1, "unknown plane in flip command\n"); 10786 return -ENODEV; 10787 } 10788 10789 len = 4; 10790 if (ring->id == RCS) { 10791 len += 6; 10792 /* 10793 * On Gen 8, SRM is now taking an extra dword to accommodate 10794 * 48bits addresses, and we need a NOOP for the batch size to 10795 * stay even. 10796 */ 10797 if (IS_GEN8(dev)) 10798 len += 2; 10799 } 10800 10801 /* 10802 * BSpec MI_DISPLAY_FLIP for IVB: 10803 * "The full packet must be contained within the same cache line." 10804 * 10805 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 10806 * cacheline, if we ever start emitting more commands before 10807 * the MI_DISPLAY_FLIP we may need to first emit everything else, 10808 * then do the cacheline alignment, and finally emit the 10809 * MI_DISPLAY_FLIP. 10810 */ 10811 ret = intel_ring_cacheline_align(ring); 10812 if (ret) 10813 return ret; 10814 10815 ret = intel_ring_begin(ring, len); 10816 if (ret) 10817 return ret; 10818 10819 /* Unmask the flip-done completion message. Note that the bspec says that 10820 * we should do this for both the BCS and RCS, and that we must not unmask 10821 * more than one flip event at any time (or ensure that one flip message 10822 * can be sent by waiting for flip-done prior to queueing new flips). 10823 * Experimentation says that BCS works despite DERRMR masking all 10824 * flip-done completion events and that unmasking all planes at once 10825 * for the RCS also doesn't appear to drop events. Setting the DERRMR 10826 * to zero does lead to lockups within MI_DISPLAY_FLIP. 10827 */ 10828 if (ring->id == RCS) { 10829 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 10830 intel_ring_emit(ring, DERRMR); 10831 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 10832 DERRMR_PIPEB_PRI_FLIP_DONE | 10833 DERRMR_PIPEC_PRI_FLIP_DONE)); 10834 if (IS_GEN8(dev)) 10835 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 10836 MI_SRM_LRM_GLOBAL_GTT); 10837 else 10838 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 10839 MI_SRM_LRM_GLOBAL_GTT); 10840 intel_ring_emit(ring, DERRMR); 10841 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 10842 if (IS_GEN8(dev)) { 10843 intel_ring_emit(ring, 0); 10844 intel_ring_emit(ring, MI_NOOP); 10845 } 10846 } 10847 10848 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 10849 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 10850 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 10851 intel_ring_emit(ring, (MI_NOOP)); 10852 10853 intel_mark_page_flip_active(intel_crtc); 10854 __intel_ring_advance(ring); 10855 return 0; 10856 } 10857 10858 static bool use_mmio_flip(struct intel_engine_cs *ring, 10859 struct drm_i915_gem_object *obj) 10860 { 10861 /* 10862 * This is not being used for older platforms, because 10863 * non-availability of flip done interrupt forces us to use 10864 * CS flips. Older platforms derive flip done using some clever 10865 * tricks involving the flip_pending status bits and vblank irqs. 10866 * So using MMIO flips there would disrupt this mechanism. 10867 */ 10868 10869 if (ring == NULL) 10870 return true; 10871 10872 if (INTEL_INFO(ring->dev)->gen < 5) 10873 return false; 10874 10875 if (i915.use_mmio_flip < 0) 10876 return false; 10877 else if (i915.use_mmio_flip > 0) 10878 return true; 10879 else if (i915.enable_execlists) 10880 return true; 10881 else 10882 return ring != i915_gem_request_get_ring(obj->last_write_req); 10883 } 10884 10885 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) 10886 { 10887 struct drm_device *dev = intel_crtc->base.dev; 10888 struct drm_i915_private *dev_priv = dev->dev_private; 10889 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 10890 const enum i915_pipe pipe = intel_crtc->pipe; 10891 u32 ctl, stride; 10892 10893 ctl = I915_READ(PLANE_CTL(pipe, 0)); 10894 ctl &= ~PLANE_CTL_TILED_MASK; 10895 switch (fb->modifier[0]) { 10896 case DRM_FORMAT_MOD_NONE: 10897 break; 10898 case I915_FORMAT_MOD_X_TILED: 10899 ctl |= PLANE_CTL_TILED_X; 10900 break; 10901 case I915_FORMAT_MOD_Y_TILED: 10902 ctl |= PLANE_CTL_TILED_Y; 10903 break; 10904 case I915_FORMAT_MOD_Yf_TILED: 10905 ctl |= PLANE_CTL_TILED_YF; 10906 break; 10907 default: 10908 MISSING_CASE(fb->modifier[0]); 10909 } 10910 10911 /* 10912 * The stride is either expressed as a multiple of 64 bytes chunks for 10913 * linear buffers or in number of tiles for tiled buffers. 10914 */ 10915 stride = fb->pitches[0] / 10916 intel_fb_stride_alignment(dev, fb->modifier[0], 10917 fb->pixel_format); 10918 10919 /* 10920 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 10921 * PLANE_SURF updates, the update is then guaranteed to be atomic. 10922 */ 10923 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 10924 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 10925 10926 I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); 10927 POSTING_READ(PLANE_SURF(pipe, 0)); 10928 } 10929 10930 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) 10931 { 10932 struct drm_device *dev = intel_crtc->base.dev; 10933 struct drm_i915_private *dev_priv = dev->dev_private; 10934 struct intel_framebuffer *intel_fb = 10935 to_intel_framebuffer(intel_crtc->base.primary->fb); 10936 struct drm_i915_gem_object *obj = intel_fb->obj; 10937 u32 dspcntr; 10938 u32 reg; 10939 10940 reg = DSPCNTR(intel_crtc->plane); 10941 dspcntr = I915_READ(reg); 10942 10943 if (obj->tiling_mode != I915_TILING_NONE) 10944 dspcntr |= DISPPLANE_TILED; 10945 else 10946 dspcntr &= ~DISPPLANE_TILED; 10947 10948 I915_WRITE(reg, dspcntr); 10949 10950 I915_WRITE(DSPSURF(intel_crtc->plane), 10951 intel_crtc->unpin_work->gtt_offset); 10952 POSTING_READ(DSPSURF(intel_crtc->plane)); 10953 10954 } 10955 10956 /* 10957 * XXX: This is the temporary way to update the plane registers until we get 10958 * around to using the usual plane update functions for MMIO flips 10959 */ 10960 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 10961 { 10962 struct drm_device *dev = intel_crtc->base.dev; 10963 bool atomic_update; 10964 u32 start_vbl_count; 10965 10966 intel_mark_page_flip_active(intel_crtc); 10967 10968 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 10969 10970 if (INTEL_INFO(dev)->gen >= 9) 10971 skl_do_mmio_flip(intel_crtc); 10972 else 10973 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 10974 ilk_do_mmio_flip(intel_crtc); 10975 10976 if (atomic_update) 10977 intel_pipe_update_end(intel_crtc, start_vbl_count); 10978 } 10979 10980 static void intel_mmio_flip_work_func(struct work_struct *work) 10981 { 10982 struct intel_mmio_flip *mmio_flip = 10983 container_of(work, struct intel_mmio_flip, work); 10984 10985 if (mmio_flip->req) 10986 WARN_ON(__i915_wait_request(mmio_flip->req, 10987 mmio_flip->crtc->reset_counter, 10988 false, NULL, 10989 &mmio_flip->i915->rps.mmioflips)); 10990 10991 intel_do_mmio_flip(mmio_flip->crtc); 10992 10993 i915_gem_request_unreference__unlocked(mmio_flip->req); 10994 kfree(mmio_flip); 10995 } 10996 10997 static int intel_queue_mmio_flip(struct drm_device *dev, 10998 struct drm_crtc *crtc, 10999 struct drm_framebuffer *fb, 11000 struct drm_i915_gem_object *obj, 11001 struct intel_engine_cs *ring, 11002 uint32_t flags) 11003 { 11004 struct intel_mmio_flip *mmio_flip; 11005 11006 mmio_flip = kmalloc(sizeof(*mmio_flip), M_DRM, M_WAITOK); 11007 if (mmio_flip == NULL) 11008 return -ENOMEM; 11009 11010 mmio_flip->i915 = to_i915(dev); 11011 mmio_flip->req = i915_gem_request_reference(obj->last_write_req); 11012 mmio_flip->crtc = to_intel_crtc(crtc); 11013 11014 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11015 schedule_work(&mmio_flip->work); 11016 11017 return 0; 11018 } 11019 11020 static int intel_default_queue_flip(struct drm_device *dev, 11021 struct drm_crtc *crtc, 11022 struct drm_framebuffer *fb, 11023 struct drm_i915_gem_object *obj, 11024 struct intel_engine_cs *ring, 11025 uint32_t flags) 11026 { 11027 return -ENODEV; 11028 } 11029 11030 static bool __intel_pageflip_stall_check(struct drm_device *dev, 11031 struct drm_crtc *crtc) 11032 { 11033 struct drm_i915_private *dev_priv = dev->dev_private; 11034 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11035 struct intel_unpin_work *work = intel_crtc->unpin_work; 11036 u32 addr; 11037 11038 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 11039 return true; 11040 11041 if (!work->enable_stall_check) 11042 return false; 11043 11044 if (work->flip_ready_vblank == 0) { 11045 if (work->flip_queued_req && 11046 !i915_gem_request_completed(work->flip_queued_req, true)) 11047 return false; 11048 11049 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11050 } 11051 11052 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11053 return false; 11054 11055 /* Potential stall - if we see that the flip has happened, 11056 * assume a missed interrupt. */ 11057 if (INTEL_INFO(dev)->gen >= 4) 11058 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11059 else 11060 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11061 11062 /* There is a potential issue here with a false positive after a flip 11063 * to the same address. We could address this by checking for a 11064 * non-incrementing frame counter. 11065 */ 11066 return addr == work->gtt_offset; 11067 } 11068 11069 void intel_check_page_flip(struct drm_device *dev, int pipe) 11070 { 11071 struct drm_i915_private *dev_priv = dev->dev_private; 11072 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11074 struct intel_unpin_work *work; 11075 11076 // WARN_ON(!in_interrupt()); 11077 11078 if (crtc == NULL) 11079 return; 11080 11081 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11082 work = intel_crtc->unpin_work; 11083 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11084 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11085 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11086 page_flip_completed(intel_crtc); 11087 work = NULL; 11088 } 11089 if (work != NULL && 11090 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11091 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11092 lockmgr(&dev->event_lock, LK_RELEASE); 11093 } 11094 11095 static int intel_crtc_page_flip(struct drm_crtc *crtc, 11096 struct drm_framebuffer *fb, 11097 struct drm_pending_vblank_event *event, 11098 uint32_t page_flip_flags) 11099 { 11100 struct drm_device *dev = crtc->dev; 11101 struct drm_i915_private *dev_priv = dev->dev_private; 11102 struct drm_framebuffer *old_fb = crtc->primary->fb; 11103 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11104 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11105 struct drm_plane *primary = crtc->primary; 11106 enum i915_pipe pipe = intel_crtc->pipe; 11107 struct intel_unpin_work *work; 11108 struct intel_engine_cs *ring; 11109 bool mmio_flip; 11110 int ret; 11111 11112 /* 11113 * drm_mode_page_flip_ioctl() should already catch this, but double 11114 * check to be safe. In the future we may enable pageflipping from 11115 * a disabled primary plane. 11116 */ 11117 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 11118 return -EBUSY; 11119 11120 /* Can't change pixel format via MI display flips. */ 11121 if (fb->pixel_format != crtc->primary->fb->pixel_format) 11122 return -EINVAL; 11123 11124 /* 11125 * TILEOFF/LINOFF registers can't be changed via MI display flips. 11126 * Note that pitch changes could also affect these register. 11127 */ 11128 if (INTEL_INFO(dev)->gen > 3 && 11129 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 11130 fb->pitches[0] != crtc->primary->fb->pitches[0])) 11131 return -EINVAL; 11132 11133 if (i915_terminally_wedged(&dev_priv->gpu_error)) 11134 goto out_hang; 11135 11136 work = kzalloc(sizeof(*work), GFP_KERNEL); 11137 if (work == NULL) 11138 return -ENOMEM; 11139 11140 work->event = event; 11141 work->crtc = crtc; 11142 work->old_fb = old_fb; 11143 INIT_WORK(&work->work, intel_unpin_work_fn); 11144 11145 ret = drm_crtc_vblank_get(crtc); 11146 if (ret) 11147 goto free_work; 11148 11149 /* We borrow the event spin lock for protecting unpin_work */ 11150 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11151 if (intel_crtc->unpin_work) { 11152 /* Before declaring the flip queue wedged, check if 11153 * the hardware completed the operation behind our backs. 11154 */ 11155 if (__intel_pageflip_stall_check(dev, crtc)) { 11156 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11157 page_flip_completed(intel_crtc); 11158 } else { 11159 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 11160 lockmgr(&dev->event_lock, LK_RELEASE); 11161 11162 drm_crtc_vblank_put(crtc); 11163 kfree(work); 11164 return -EBUSY; 11165 } 11166 } 11167 intel_crtc->unpin_work = work; 11168 lockmgr(&dev->event_lock, LK_RELEASE); 11169 11170 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11171 flush_workqueue(dev_priv->wq); 11172 11173 /* Reference the objects for the scheduled work. */ 11174 drm_framebuffer_reference(work->old_fb); 11175 drm_gem_object_reference(&obj->base); 11176 11177 crtc->primary->fb = fb; 11178 update_state_fb(crtc->primary); 11179 11180 /* Keep state structure in sync */ 11181 if (crtc->primary->state->fb) 11182 drm_framebuffer_unreference(crtc->primary->state->fb); 11183 crtc->primary->state->fb = fb; 11184 if (crtc->primary->state->fb) 11185 drm_framebuffer_reference(crtc->primary->state->fb); 11186 11187 work->pending_flip_obj = obj; 11188 11189 ret = i915_mutex_lock_interruptible(dev); 11190 if (ret) 11191 goto cleanup; 11192 11193 atomic_inc(&intel_crtc->unpin_work_count); 11194 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 11195 11196 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 11197 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 11198 11199 if (IS_VALLEYVIEW(dev)) { 11200 ring = &dev_priv->ring[BCS]; 11201 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 11202 /* vlv: DISPLAY_FLIP fails to change tiling */ 11203 ring = NULL; 11204 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 11205 ring = &dev_priv->ring[BCS]; 11206 } else if (INTEL_INFO(dev)->gen >= 7) { 11207 ring = i915_gem_request_get_ring(obj->last_write_req); 11208 if (ring == NULL || ring->id != RCS) 11209 ring = &dev_priv->ring[BCS]; 11210 } else { 11211 ring = &dev_priv->ring[RCS]; 11212 } 11213 11214 mmio_flip = use_mmio_flip(ring, obj); 11215 11216 /* When using CS flips, we want to emit semaphores between rings. 11217 * However, when using mmio flips we will create a task to do the 11218 * synchronisation, so all we want here is to pin the framebuffer 11219 * into the display plane and skip any waits. 11220 */ 11221 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 11222 crtc->primary->state, 11223 mmio_flip ? i915_gem_request_get_ring(obj->last_write_req) : ring); 11224 if (ret) 11225 goto cleanup_pending; 11226 11227 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) 11228 + intel_crtc->dspaddr_offset; 11229 11230 if (mmio_flip) { 11231 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 11232 page_flip_flags); 11233 if (ret) 11234 goto cleanup_unpin; 11235 11236 i915_gem_request_assign(&work->flip_queued_req, 11237 obj->last_write_req); 11238 } else { 11239 if (obj->last_write_req) { 11240 ret = i915_gem_check_olr(obj->last_write_req); 11241 if (ret) 11242 goto cleanup_unpin; 11243 } 11244 11245 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 11246 page_flip_flags); 11247 if (ret) 11248 goto cleanup_unpin; 11249 11250 i915_gem_request_assign(&work->flip_queued_req, 11251 intel_ring_get_request(ring)); 11252 } 11253 11254 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11255 work->enable_stall_check = true; 11256 11257 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 11258 INTEL_FRONTBUFFER_PRIMARY(pipe)); 11259 11260 intel_fbc_disable(dev); 11261 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 11262 mutex_unlock(&dev->struct_mutex); 11263 11264 trace_i915_flip_request(intel_crtc->plane, obj); 11265 11266 return 0; 11267 11268 cleanup_unpin: 11269 intel_unpin_fb_obj(fb, crtc->primary->state); 11270 cleanup_pending: 11271 atomic_dec(&intel_crtc->unpin_work_count); 11272 mutex_unlock(&dev->struct_mutex); 11273 cleanup: 11274 crtc->primary->fb = old_fb; 11275 update_state_fb(crtc->primary); 11276 11277 drm_gem_object_unreference_unlocked(&obj->base); 11278 drm_framebuffer_unreference(work->old_fb); 11279 11280 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11281 intel_crtc->unpin_work = NULL; 11282 lockmgr(&dev->event_lock, LK_RELEASE); 11283 11284 drm_crtc_vblank_put(crtc); 11285 free_work: 11286 kfree(work); 11287 11288 if (ret == -EIO) { 11289 out_hang: 11290 ret = intel_plane_restore(primary); 11291 if (ret == 0 && event) { 11292 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 11293 drm_send_vblank_event(dev, pipe, event); 11294 lockmgr(&dev->event_lock, LK_RELEASE); 11295 } 11296 } 11297 return ret; 11298 } 11299 11300 static const struct drm_crtc_helper_funcs intel_helper_funcs = { 11301 .mode_set_base_atomic = intel_pipe_set_base_atomic, 11302 .load_lut = intel_crtc_load_lut, 11303 .atomic_begin = intel_begin_crtc_commit, 11304 .atomic_flush = intel_finish_crtc_commit, 11305 }; 11306 11307 /** 11308 * intel_modeset_update_staged_output_state 11309 * 11310 * Updates the staged output configuration state, e.g. after we've read out the 11311 * current hw state. 11312 */ 11313 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 11314 { 11315 struct intel_crtc *crtc; 11316 struct intel_encoder *encoder; 11317 struct intel_connector *connector; 11318 11319 for_each_intel_connector(dev, connector) { 11320 connector->new_encoder = 11321 to_intel_encoder(connector->base.encoder); 11322 } 11323 11324 for_each_intel_encoder(dev, encoder) { 11325 encoder->new_crtc = 11326 to_intel_crtc(encoder->base.crtc); 11327 } 11328 11329 for_each_intel_crtc(dev, crtc) { 11330 crtc->new_enabled = crtc->base.state->enable; 11331 } 11332 } 11333 11334 /* Transitional helper to copy current connector/encoder state to 11335 * connector->state. This is needed so that code that is partially 11336 * converted to atomic does the right thing. 11337 */ 11338 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 11339 { 11340 struct intel_connector *connector; 11341 11342 for_each_intel_connector(dev, connector) { 11343 if (connector->base.encoder) { 11344 connector->base.state->best_encoder = 11345 connector->base.encoder; 11346 connector->base.state->crtc = 11347 connector->base.encoder->crtc; 11348 } else { 11349 connector->base.state->best_encoder = NULL; 11350 connector->base.state->crtc = NULL; 11351 } 11352 } 11353 } 11354 11355 /* Fixup legacy state after an atomic state swap. 11356 */ 11357 static void intel_modeset_fixup_state(struct drm_atomic_state *state) 11358 { 11359 struct intel_crtc *crtc; 11360 struct intel_encoder *encoder; 11361 struct intel_connector *connector; 11362 11363 for_each_intel_connector(state->dev, connector) { 11364 connector->base.encoder = connector->base.state->best_encoder; 11365 if (connector->base.encoder) 11366 connector->base.encoder->crtc = 11367 connector->base.state->crtc; 11368 } 11369 11370 /* Update crtc of disabled encoders */ 11371 for_each_intel_encoder(state->dev, encoder) { 11372 int num_connectors = 0; 11373 11374 for_each_intel_connector(state->dev, connector) 11375 if (connector->base.encoder == &encoder->base) 11376 num_connectors++; 11377 11378 if (num_connectors == 0) 11379 encoder->base.crtc = NULL; 11380 } 11381 11382 for_each_intel_crtc(state->dev, crtc) { 11383 crtc->base.enabled = crtc->base.state->enable; 11384 crtc->config = to_intel_crtc_state(crtc->base.state); 11385 } 11386 } 11387 11388 static void 11389 connected_sink_compute_bpp(struct intel_connector *connector, 11390 struct intel_crtc_state *pipe_config) 11391 { 11392 int bpp = pipe_config->pipe_bpp; 11393 11394 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 11395 connector->base.base.id, 11396 connector->base.name); 11397 11398 /* Don't use an invalid EDID bpc value */ 11399 if (connector->base.display_info.bpc && 11400 connector->base.display_info.bpc * 3 < bpp) { 11401 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 11402 bpp, connector->base.display_info.bpc*3); 11403 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 11404 } 11405 11406 /* Clamp bpp to 8 on screens without EDID 1.4 */ 11407 if (connector->base.display_info.bpc == 0 && bpp > 24) { 11408 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 11409 bpp); 11410 pipe_config->pipe_bpp = 24; 11411 } 11412 } 11413 11414 static int 11415 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 11416 struct intel_crtc_state *pipe_config) 11417 { 11418 struct drm_device *dev = crtc->base.dev; 11419 struct drm_atomic_state *state; 11420 struct drm_connector *connector; 11421 struct drm_connector_state *connector_state; 11422 int bpp, i; 11423 11424 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev))) 11425 bpp = 10*3; 11426 else if (INTEL_INFO(dev)->gen >= 5) 11427 bpp = 12*3; 11428 else 11429 bpp = 8*3; 11430 11431 11432 pipe_config->pipe_bpp = bpp; 11433 11434 state = pipe_config->base.state; 11435 11436 /* Clamp display bpp to EDID value */ 11437 for_each_connector_in_state(state, connector, connector_state, i) { 11438 if (connector_state->crtc != &crtc->base) 11439 continue; 11440 11441 connected_sink_compute_bpp(to_intel_connector(connector), 11442 pipe_config); 11443 } 11444 11445 return bpp; 11446 } 11447 11448 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 11449 { 11450 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 11451 "type: 0x%x flags: 0x%x\n", 11452 mode->crtc_clock, 11453 mode->crtc_hdisplay, mode->crtc_hsync_start, 11454 mode->crtc_hsync_end, mode->crtc_htotal, 11455 mode->crtc_vdisplay, mode->crtc_vsync_start, 11456 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 11457 } 11458 11459 static void intel_dump_pipe_config(struct intel_crtc *crtc, 11460 struct intel_crtc_state *pipe_config, 11461 const char *context) 11462 { 11463 struct drm_device *dev = crtc->base.dev; 11464 struct drm_plane *plane; 11465 struct intel_plane *intel_plane; 11466 struct intel_plane_state *state; 11467 struct drm_framebuffer *fb; 11468 11469 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 11470 context, pipe_config, pipe_name(crtc->pipe)); 11471 11472 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 11473 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 11474 pipe_config->pipe_bpp, pipe_config->dither); 11475 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11476 pipe_config->has_pch_encoder, 11477 pipe_config->fdi_lanes, 11478 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 11479 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 11480 pipe_config->fdi_m_n.tu); 11481 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 11482 pipe_config->has_dp_encoder, 11483 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 11484 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 11485 pipe_config->dp_m_n.tu); 11486 11487 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 11488 pipe_config->has_dp_encoder, 11489 pipe_config->dp_m2_n2.gmch_m, 11490 pipe_config->dp_m2_n2.gmch_n, 11491 pipe_config->dp_m2_n2.link_m, 11492 pipe_config->dp_m2_n2.link_n, 11493 pipe_config->dp_m2_n2.tu); 11494 11495 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 11496 pipe_config->has_audio, 11497 pipe_config->has_infoframe); 11498 11499 DRM_DEBUG_KMS("requested mode:\n"); 11500 drm_mode_debug_printmodeline(&pipe_config->base.mode); 11501 DRM_DEBUG_KMS("adjusted mode:\n"); 11502 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 11503 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 11504 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 11505 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 11506 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 11507 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n", 11508 crtc->num_scalers, 11509 pipe_config->scaler_state.scaler_users, 11510 pipe_config->scaler_state.scaler_id); 11511 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 11512 pipe_config->gmch_pfit.control, 11513 pipe_config->gmch_pfit.pgm_ratios, 11514 pipe_config->gmch_pfit.lvds_border_bits); 11515 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 11516 pipe_config->pch_pfit.pos, 11517 pipe_config->pch_pfit.size, 11518 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 11519 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 11520 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 11521 11522 if (IS_BROXTON(dev)) { 11523 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, " 11524 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, " 11525 "pll6: 0x%x, pll8: 0x%x, pcsdw12: 0x%x\n", 11526 pipe_config->ddi_pll_sel, 11527 pipe_config->dpll_hw_state.ebb0, 11528 pipe_config->dpll_hw_state.pll0, 11529 pipe_config->dpll_hw_state.pll1, 11530 pipe_config->dpll_hw_state.pll2, 11531 pipe_config->dpll_hw_state.pll3, 11532 pipe_config->dpll_hw_state.pll6, 11533 pipe_config->dpll_hw_state.pll8, 11534 pipe_config->dpll_hw_state.pcsdw12); 11535 } else if (IS_SKYLAKE(dev)) { 11536 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: " 11537 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n", 11538 pipe_config->ddi_pll_sel, 11539 pipe_config->dpll_hw_state.ctrl1, 11540 pipe_config->dpll_hw_state.cfgcr1, 11541 pipe_config->dpll_hw_state.cfgcr2); 11542 } else if (HAS_DDI(dev)) { 11543 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n", 11544 pipe_config->ddi_pll_sel, 11545 pipe_config->dpll_hw_state.wrpll); 11546 } else { 11547 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, " 11548 "fp0: 0x%x, fp1: 0x%x\n", 11549 pipe_config->dpll_hw_state.dpll, 11550 pipe_config->dpll_hw_state.dpll_md, 11551 pipe_config->dpll_hw_state.fp0, 11552 pipe_config->dpll_hw_state.fp1); 11553 } 11554 11555 DRM_DEBUG_KMS("planes on this crtc\n"); 11556 list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 11557 intel_plane = to_intel_plane(plane); 11558 if (intel_plane->pipe != crtc->pipe) 11559 continue; 11560 11561 state = to_intel_plane_state(plane->state); 11562 fb = state->base.fb; 11563 if (!fb) { 11564 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 11565 "disabled, scaler_id = %d\n", 11566 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 11567 plane->base.id, intel_plane->pipe, 11568 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1, 11569 drm_plane_index(plane), state->scaler_id); 11570 continue; 11571 } 11572 11573 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 11574 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 11575 plane->base.id, intel_plane->pipe, 11576 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 11577 drm_plane_index(plane)); 11578 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 11579 fb->base.id, fb->width, fb->height, fb->pixel_format); 11580 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 11581 state->scaler_id, 11582 state->src.x1 >> 16, state->src.y1 >> 16, 11583 drm_rect_width(&state->src) >> 16, 11584 drm_rect_height(&state->src) >> 16, 11585 state->dst.x1, state->dst.y1, 11586 drm_rect_width(&state->dst), drm_rect_height(&state->dst)); 11587 } 11588 } 11589 11590 static bool encoders_cloneable(const struct intel_encoder *a, 11591 const struct intel_encoder *b) 11592 { 11593 /* masks could be asymmetric, so check both ways */ 11594 return a == b || (a->cloneable & (1 << b->type) && 11595 b->cloneable & (1 << a->type)); 11596 } 11597 11598 static bool check_single_encoder_cloning(struct drm_atomic_state *state, 11599 struct intel_crtc *crtc, 11600 struct intel_encoder *encoder) 11601 { 11602 struct intel_encoder *source_encoder; 11603 struct drm_connector *connector; 11604 struct drm_connector_state *connector_state; 11605 int i; 11606 11607 for_each_connector_in_state(state, connector, connector_state, i) { 11608 if (connector_state->crtc != &crtc->base) 11609 continue; 11610 11611 source_encoder = 11612 to_intel_encoder(connector_state->best_encoder); 11613 if (!encoders_cloneable(encoder, source_encoder)) 11614 return false; 11615 } 11616 11617 return true; 11618 } 11619 11620 static bool check_encoder_cloning(struct drm_atomic_state *state, 11621 struct intel_crtc *crtc) 11622 { 11623 struct intel_encoder *encoder; 11624 struct drm_connector *connector; 11625 struct drm_connector_state *connector_state; 11626 int i; 11627 11628 for_each_connector_in_state(state, connector, connector_state, i) { 11629 if (connector_state->crtc != &crtc->base) 11630 continue; 11631 11632 encoder = to_intel_encoder(connector_state->best_encoder); 11633 if (!check_single_encoder_cloning(state, crtc, encoder)) 11634 return false; 11635 } 11636 11637 return true; 11638 } 11639 11640 static bool check_digital_port_conflicts(struct drm_atomic_state *state) 11641 { 11642 struct drm_device *dev = state->dev; 11643 struct intel_encoder *encoder; 11644 struct drm_connector *connector; 11645 struct drm_connector_state *connector_state; 11646 unsigned int used_ports = 0; 11647 int i; 11648 11649 /* 11650 * Walk the connector list instead of the encoder 11651 * list to detect the problem on ddi platforms 11652 * where there's just one encoder per digital port. 11653 */ 11654 for_each_connector_in_state(state, connector, connector_state, i) { 11655 if (!connector_state->best_encoder) 11656 continue; 11657 11658 encoder = to_intel_encoder(connector_state->best_encoder); 11659 11660 WARN_ON(!connector_state->crtc); 11661 11662 switch (encoder->type) { 11663 unsigned int port_mask; 11664 case INTEL_OUTPUT_UNKNOWN: 11665 if (WARN_ON(!HAS_DDI(dev))) 11666 break; 11667 case INTEL_OUTPUT_DISPLAYPORT: 11668 case INTEL_OUTPUT_HDMI: 11669 case INTEL_OUTPUT_EDP: 11670 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 11671 11672 /* the same port mustn't appear more than once */ 11673 if (used_ports & port_mask) 11674 return false; 11675 11676 used_ports |= port_mask; 11677 default: 11678 break; 11679 } 11680 } 11681 11682 return true; 11683 } 11684 11685 static void 11686 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 11687 { 11688 struct drm_crtc_state tmp_state; 11689 struct intel_crtc_scaler_state scaler_state; 11690 struct intel_dpll_hw_state dpll_hw_state; 11691 enum intel_dpll_id shared_dpll; 11692 uint32_t ddi_pll_sel; 11693 11694 /* FIXME: before the switch to atomic started, a new pipe_config was 11695 * kzalloc'd. Code that depends on any field being zero should be 11696 * fixed, so that the crtc_state can be safely duplicated. For now, 11697 * only fields that are know to not cause problems are preserved. */ 11698 11699 tmp_state = crtc_state->base; 11700 scaler_state = crtc_state->scaler_state; 11701 shared_dpll = crtc_state->shared_dpll; 11702 dpll_hw_state = crtc_state->dpll_hw_state; 11703 ddi_pll_sel = crtc_state->ddi_pll_sel; 11704 11705 memset(crtc_state, 0, sizeof *crtc_state); 11706 11707 crtc_state->base = tmp_state; 11708 crtc_state->scaler_state = scaler_state; 11709 crtc_state->shared_dpll = shared_dpll; 11710 crtc_state->dpll_hw_state = dpll_hw_state; 11711 crtc_state->ddi_pll_sel = ddi_pll_sel; 11712 } 11713 11714 static int 11715 intel_modeset_pipe_config(struct drm_crtc *crtc, 11716 struct drm_atomic_state *state, 11717 struct intel_crtc_state *pipe_config) 11718 { 11719 struct intel_encoder *encoder; 11720 struct drm_connector *connector; 11721 struct drm_connector_state *connector_state; 11722 int base_bpp, ret = -EINVAL; 11723 int i; 11724 bool retry = true; 11725 11726 if (!check_encoder_cloning(state, to_intel_crtc(crtc))) { 11727 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 11728 return -EINVAL; 11729 } 11730 11731 if (!check_digital_port_conflicts(state)) { 11732 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 11733 return -EINVAL; 11734 } 11735 11736 clear_intel_crtc_state(pipe_config); 11737 11738 pipe_config->cpu_transcoder = 11739 (enum transcoder) to_intel_crtc(crtc)->pipe; 11740 11741 /* 11742 * Sanitize sync polarity flags based on requested ones. If neither 11743 * positive or negative polarity is requested, treat this as meaning 11744 * negative polarity. 11745 */ 11746 if (!(pipe_config->base.adjusted_mode.flags & 11747 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 11748 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 11749 11750 if (!(pipe_config->base.adjusted_mode.flags & 11751 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 11752 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 11753 11754 /* Compute a starting value for pipe_config->pipe_bpp taking the source 11755 * plane pixel format and any sink constraints into account. Returns the 11756 * source plane bpp so that dithering can be selected on mismatches 11757 * after encoders and crtc also have had their say. */ 11758 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 11759 pipe_config); 11760 if (base_bpp < 0) 11761 goto fail; 11762 11763 /* 11764 * Determine the real pipe dimensions. Note that stereo modes can 11765 * increase the actual pipe size due to the frame doubling and 11766 * insertion of additional space for blanks between the frame. This 11767 * is stored in the crtc timings. We use the requested mode to do this 11768 * computation to clearly distinguish it from the adjusted mode, which 11769 * can be changed by the connectors in the below retry loop. 11770 */ 11771 drm_crtc_get_hv_timing(&pipe_config->base.mode, 11772 &pipe_config->pipe_src_w, 11773 &pipe_config->pipe_src_h); 11774 11775 encoder_retry: 11776 /* Ensure the port clock defaults are reset when retrying. */ 11777 pipe_config->port_clock = 0; 11778 pipe_config->pixel_multiplier = 1; 11779 11780 /* Fill in default crtc timings, allow encoders to overwrite them. */ 11781 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 11782 CRTC_STEREO_DOUBLE); 11783 11784 /* Pass our mode to the connectors and the CRTC to give them a chance to 11785 * adjust it according to limitations or connector properties, and also 11786 * a chance to reject the mode entirely. 11787 */ 11788 for_each_connector_in_state(state, connector, connector_state, i) { 11789 if (connector_state->crtc != crtc) 11790 continue; 11791 11792 encoder = to_intel_encoder(connector_state->best_encoder); 11793 11794 if (!(encoder->compute_config(encoder, pipe_config))) { 11795 DRM_DEBUG_KMS("Encoder config failure\n"); 11796 goto fail; 11797 } 11798 } 11799 11800 /* Set default port clock if not overwritten by the encoder. Needs to be 11801 * done afterwards in case the encoder adjusts the mode. */ 11802 if (!pipe_config->port_clock) 11803 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 11804 * pipe_config->pixel_multiplier; 11805 11806 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 11807 if (ret < 0) { 11808 DRM_DEBUG_KMS("CRTC fixup failed\n"); 11809 goto fail; 11810 } 11811 11812 if (ret == RETRY) { 11813 if (WARN(!retry, "loop in pipe configuration computation\n")) { 11814 ret = -EINVAL; 11815 goto fail; 11816 } 11817 11818 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 11819 retry = false; 11820 goto encoder_retry; 11821 } 11822 11823 /* Dithering seems to not pass-through bits correctly when it should, so 11824 * only enable it on 6bpc panels. */ 11825 pipe_config->dither = pipe_config->pipe_bpp == 6*3; 11826 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 11827 base_bpp, pipe_config->pipe_bpp, pipe_config->dither); 11828 11829 return 0; 11830 fail: 11831 return ret; 11832 } 11833 11834 static bool intel_crtc_in_use(struct drm_crtc *crtc) 11835 { 11836 struct drm_encoder *encoder; 11837 struct drm_device *dev = crtc->dev; 11838 11839 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 11840 if (encoder->crtc == crtc) 11841 return true; 11842 11843 return false; 11844 } 11845 11846 static bool 11847 needs_modeset(struct drm_crtc_state *state) 11848 { 11849 return state->mode_changed || state->active_changed; 11850 } 11851 11852 static void 11853 intel_modeset_update_state(struct drm_atomic_state *state) 11854 { 11855 struct drm_device *dev = state->dev; 11856 struct drm_i915_private *dev_priv = dev->dev_private; 11857 struct intel_encoder *intel_encoder; 11858 struct drm_crtc *crtc; 11859 struct drm_crtc_state *crtc_state; 11860 struct drm_connector *connector; 11861 int i; 11862 11863 intel_shared_dpll_commit(dev_priv); 11864 11865 for_each_intel_encoder(dev, intel_encoder) { 11866 if (!intel_encoder->base.crtc) 11867 continue; 11868 11869 for_each_crtc_in_state(state, crtc, crtc_state, i) { 11870 if (crtc != intel_encoder->base.crtc) 11871 continue; 11872 11873 if (crtc_state->enable && needs_modeset(crtc_state)) 11874 intel_encoder->connectors_active = false; 11875 11876 break; 11877 } 11878 } 11879 11880 drm_atomic_helper_swap_state(state->dev, state); 11881 intel_modeset_fixup_state(state); 11882 11883 /* Double check state. */ 11884 for_each_crtc(dev, crtc) { 11885 WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc)); 11886 } 11887 11888 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11889 if (!connector->encoder || !connector->encoder->crtc) 11890 continue; 11891 11892 for_each_crtc_in_state(state, crtc, crtc_state, i) { 11893 if (crtc != connector->encoder->crtc) 11894 continue; 11895 11896 if (crtc->state->enable && needs_modeset(crtc->state)) { 11897 struct drm_property *dpms_property = 11898 dev->mode_config.dpms_property; 11899 11900 connector->dpms = DRM_MODE_DPMS_ON; 11901 drm_object_property_set_value(&connector->base, 11902 dpms_property, 11903 DRM_MODE_DPMS_ON); 11904 11905 intel_encoder = to_intel_encoder(connector->encoder); 11906 intel_encoder->connectors_active = true; 11907 } 11908 11909 break; 11910 } 11911 } 11912 11913 } 11914 11915 static bool intel_fuzzy_clock_check(int clock1, int clock2) 11916 { 11917 int diff; 11918 11919 if (clock1 == clock2) 11920 return true; 11921 11922 if (!clock1 || !clock2) 11923 return false; 11924 11925 diff = abs(clock1 - clock2); 11926 11927 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 11928 return true; 11929 11930 return false; 11931 } 11932 11933 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 11934 list_for_each_entry((intel_crtc), \ 11935 &(dev)->mode_config.crtc_list, \ 11936 base.head) \ 11937 if (mask & (1 <<(intel_crtc)->pipe)) 11938 11939 static bool 11940 intel_pipe_config_compare(struct drm_device *dev, 11941 struct intel_crtc_state *current_config, 11942 struct intel_crtc_state *pipe_config) 11943 { 11944 #define PIPE_CONF_CHECK_X(name) \ 11945 if (current_config->name != pipe_config->name) { \ 11946 DRM_ERROR("mismatch in " #name " " \ 11947 "(expected 0x%08x, found 0x%08x)\n", \ 11948 current_config->name, \ 11949 pipe_config->name); \ 11950 return false; \ 11951 } 11952 11953 #define PIPE_CONF_CHECK_I(name) \ 11954 if (current_config->name != pipe_config->name) { \ 11955 DRM_ERROR("mismatch in " #name " " \ 11956 "(expected %i, found %i)\n", \ 11957 current_config->name, \ 11958 pipe_config->name); \ 11959 return false; \ 11960 } 11961 11962 /* This is required for BDW+ where there is only one set of registers for 11963 * switching between high and low RR. 11964 * This macro can be used whenever a comparison has to be made between one 11965 * hw state and multiple sw state variables. 11966 */ 11967 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 11968 if ((current_config->name != pipe_config->name) && \ 11969 (current_config->alt_name != pipe_config->name)) { \ 11970 DRM_ERROR("mismatch in " #name " " \ 11971 "(expected %i or %i, found %i)\n", \ 11972 current_config->name, \ 11973 current_config->alt_name, \ 11974 pipe_config->name); \ 11975 return false; \ 11976 } 11977 11978 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 11979 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 11980 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 11981 "(expected %i, found %i)\n", \ 11982 current_config->name & (mask), \ 11983 pipe_config->name & (mask)); \ 11984 return false; \ 11985 } 11986 11987 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 11988 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 11989 DRM_ERROR("mismatch in " #name " " \ 11990 "(expected %i, found %i)\n", \ 11991 current_config->name, \ 11992 pipe_config->name); \ 11993 return false; \ 11994 } 11995 11996 #define PIPE_CONF_QUIRK(quirk) \ 11997 ((current_config->quirks | pipe_config->quirks) & (quirk)) 11998 11999 PIPE_CONF_CHECK_I(cpu_transcoder); 12000 12001 PIPE_CONF_CHECK_I(has_pch_encoder); 12002 PIPE_CONF_CHECK_I(fdi_lanes); 12003 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 12004 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); 12005 PIPE_CONF_CHECK_I(fdi_m_n.link_m); 12006 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 12007 PIPE_CONF_CHECK_I(fdi_m_n.tu); 12008 12009 PIPE_CONF_CHECK_I(has_dp_encoder); 12010 12011 if (INTEL_INFO(dev)->gen < 8) { 12012 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 12013 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 12014 PIPE_CONF_CHECK_I(dp_m_n.link_m); 12015 PIPE_CONF_CHECK_I(dp_m_n.link_n); 12016 PIPE_CONF_CHECK_I(dp_m_n.tu); 12017 12018 if (current_config->has_drrs) { 12019 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); 12020 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); 12021 PIPE_CONF_CHECK_I(dp_m2_n2.link_m); 12022 PIPE_CONF_CHECK_I(dp_m2_n2.link_n); 12023 PIPE_CONF_CHECK_I(dp_m2_n2.tu); 12024 } 12025 } else { 12026 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); 12027 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); 12028 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); 12029 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); 12030 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); 12031 } 12032 12033 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12034 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12035 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 12036 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 12037 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 12038 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 12039 12040 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 12041 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 12042 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 12043 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 12044 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 12045 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 12046 12047 PIPE_CONF_CHECK_I(pixel_multiplier); 12048 PIPE_CONF_CHECK_I(has_hdmi_sink); 12049 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 12050 IS_VALLEYVIEW(dev)) 12051 PIPE_CONF_CHECK_I(limited_color_range); 12052 PIPE_CONF_CHECK_I(has_infoframe); 12053 12054 PIPE_CONF_CHECK_I(has_audio); 12055 12056 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12057 DRM_MODE_FLAG_INTERLACE); 12058 12059 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 12060 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12061 DRM_MODE_FLAG_PHSYNC); 12062 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12063 DRM_MODE_FLAG_NHSYNC); 12064 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12065 DRM_MODE_FLAG_PVSYNC); 12066 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 12067 DRM_MODE_FLAG_NVSYNC); 12068 } 12069 12070 PIPE_CONF_CHECK_I(pipe_src_w); 12071 PIPE_CONF_CHECK_I(pipe_src_h); 12072 12073 /* 12074 * FIXME: BIOS likes to set up a cloned config with lvds+external 12075 * screen. Since we don't yet re-compute the pipe config when moving 12076 * just the lvds port away to another pipe the sw tracking won't match. 12077 * 12078 * Proper atomic modesets with recomputed global state will fix this. 12079 * Until then just don't check gmch state for inherited modes. 12080 */ 12081 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 12082 PIPE_CONF_CHECK_I(gmch_pfit.control); 12083 /* pfit ratios are autocomputed by the hw on gen4+ */ 12084 if (INTEL_INFO(dev)->gen < 4) 12085 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 12086 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 12087 } 12088 12089 PIPE_CONF_CHECK_I(pch_pfit.enabled); 12090 if (current_config->pch_pfit.enabled) { 12091 PIPE_CONF_CHECK_I(pch_pfit.pos); 12092 PIPE_CONF_CHECK_I(pch_pfit.size); 12093 } 12094 12095 PIPE_CONF_CHECK_I(scaler_state.scaler_id); 12096 12097 /* BDW+ don't expose a synchronous way to read the state */ 12098 if (IS_HASWELL(dev)) 12099 PIPE_CONF_CHECK_I(ips_enabled); 12100 12101 PIPE_CONF_CHECK_I(double_wide); 12102 12103 PIPE_CONF_CHECK_X(ddi_pll_sel); 12104 12105 PIPE_CONF_CHECK_I(shared_dpll); 12106 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 12107 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 12108 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 12109 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 12110 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 12111 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 12112 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 12113 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 12114 12115 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 12116 PIPE_CONF_CHECK_I(pipe_bpp); 12117 12118 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 12119 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 12120 12121 #undef PIPE_CONF_CHECK_X 12122 #undef PIPE_CONF_CHECK_I 12123 #undef PIPE_CONF_CHECK_I_ALT 12124 #undef PIPE_CONF_CHECK_FLAGS 12125 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 12126 #undef PIPE_CONF_QUIRK 12127 12128 return true; 12129 } 12130 12131 static void check_wm_state(struct drm_device *dev) 12132 { 12133 struct drm_i915_private *dev_priv = dev->dev_private; 12134 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12135 struct intel_crtc *intel_crtc; 12136 int plane; 12137 12138 if (INTEL_INFO(dev)->gen < 9) 12139 return; 12140 12141 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 12142 sw_ddb = &dev_priv->wm.skl_hw.ddb; 12143 12144 for_each_intel_crtc(dev, intel_crtc) { 12145 struct skl_ddb_entry *hw_entry, *sw_entry; 12146 const enum i915_pipe pipe = intel_crtc->pipe; 12147 12148 if (!intel_crtc->active) 12149 continue; 12150 12151 /* planes */ 12152 for_each_plane(dev_priv, pipe, plane) { 12153 hw_entry = &hw_ddb.plane[pipe][plane]; 12154 sw_entry = &sw_ddb->plane[pipe][plane]; 12155 12156 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12157 continue; 12158 12159 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 12160 "(expected (%u,%u), found (%u,%u))\n", 12161 pipe_name(pipe), plane + 1, 12162 sw_entry->start, sw_entry->end, 12163 hw_entry->start, hw_entry->end); 12164 } 12165 12166 /* cursor */ 12167 hw_entry = &hw_ddb.cursor[pipe]; 12168 sw_entry = &sw_ddb->cursor[pipe]; 12169 12170 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12171 continue; 12172 12173 DRM_ERROR("mismatch in DDB state pipe %c cursor " 12174 "(expected (%u,%u), found (%u,%u))\n", 12175 pipe_name(pipe), 12176 sw_entry->start, sw_entry->end, 12177 hw_entry->start, hw_entry->end); 12178 } 12179 } 12180 12181 static void 12182 check_connector_state(struct drm_device *dev) 12183 { 12184 struct intel_connector *connector; 12185 12186 for_each_intel_connector(dev, connector) { 12187 /* This also checks the encoder/connector hw state with the 12188 * ->get_hw_state callbacks. */ 12189 intel_connector_check_state(connector); 12190 12191 I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder, 12192 "connector's staged encoder doesn't match current encoder\n"); 12193 } 12194 } 12195 12196 static void 12197 check_encoder_state(struct drm_device *dev) 12198 { 12199 struct intel_encoder *encoder; 12200 struct intel_connector *connector; 12201 12202 for_each_intel_encoder(dev, encoder) { 12203 bool enabled = false; 12204 bool active = false; 12205 enum i915_pipe pipe, tracked_pipe; 12206 12207 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 12208 encoder->base.base.id, 12209 encoder->base.name); 12210 12211 I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc, 12212 "encoder's stage crtc doesn't match current crtc\n"); 12213 I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, 12214 "encoder's active_connectors set, but no crtc\n"); 12215 12216 for_each_intel_connector(dev, connector) { 12217 if (connector->base.encoder != &encoder->base) 12218 continue; 12219 enabled = true; 12220 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 12221 active = true; 12222 } 12223 /* 12224 * for MST connectors if we unplug the connector is gone 12225 * away but the encoder is still connected to a crtc 12226 * until a modeset happens in response to the hotplug. 12227 */ 12228 if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) 12229 continue; 12230 12231 I915_STATE_WARN(!!encoder->base.crtc != enabled, 12232 "encoder's enabled state mismatch " 12233 "(expected %i, found %i)\n", 12234 !!encoder->base.crtc, enabled); 12235 I915_STATE_WARN(active && !encoder->base.crtc, 12236 "active encoder with no crtc\n"); 12237 12238 I915_STATE_WARN(encoder->connectors_active != active, 12239 "encoder's computed active state doesn't match tracked active state " 12240 "(expected %i, found %i)\n", active, encoder->connectors_active); 12241 12242 active = encoder->get_hw_state(encoder, &pipe); 12243 I915_STATE_WARN(active != encoder->connectors_active, 12244 "encoder's hw state doesn't match sw tracking " 12245 "(expected %i, found %i)\n", 12246 encoder->connectors_active, active); 12247 12248 if (!encoder->base.crtc) 12249 continue; 12250 12251 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 12252 I915_STATE_WARN(active && pipe != tracked_pipe, 12253 "active encoder's pipe doesn't match" 12254 "(expected %i, found %i)\n", 12255 tracked_pipe, pipe); 12256 12257 } 12258 } 12259 12260 static void 12261 check_crtc_state(struct drm_device *dev) 12262 { 12263 struct drm_i915_private *dev_priv = dev->dev_private; 12264 struct intel_crtc *crtc; 12265 struct intel_encoder *encoder; 12266 struct intel_crtc_state pipe_config; 12267 12268 for_each_intel_crtc(dev, crtc) { 12269 bool enabled = false; 12270 bool active = false; 12271 12272 memset(&pipe_config, 0, sizeof(pipe_config)); 12273 12274 DRM_DEBUG_KMS("[CRTC:%d]\n", 12275 crtc->base.base.id); 12276 12277 I915_STATE_WARN(crtc->active && !crtc->base.state->enable, 12278 "active crtc, but not enabled in sw tracking\n"); 12279 12280 for_each_intel_encoder(dev, encoder) { 12281 if (encoder->base.crtc != &crtc->base) 12282 continue; 12283 enabled = true; 12284 if (encoder->connectors_active) 12285 active = true; 12286 } 12287 12288 I915_STATE_WARN(active != crtc->active, 12289 "crtc's computed active state doesn't match tracked active state " 12290 "(expected %i, found %i)\n", active, crtc->active); 12291 I915_STATE_WARN(enabled != crtc->base.state->enable, 12292 "crtc's computed enabled state doesn't match tracked enabled state " 12293 "(expected %i, found %i)\n", enabled, 12294 crtc->base.state->enable); 12295 12296 active = dev_priv->display.get_pipe_config(crtc, 12297 &pipe_config); 12298 12299 /* hw state is inconsistent with the pipe quirk */ 12300 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 12301 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 12302 active = crtc->active; 12303 12304 for_each_intel_encoder(dev, encoder) { 12305 enum i915_pipe pipe; 12306 if (encoder->base.crtc != &crtc->base) 12307 continue; 12308 if (encoder->get_hw_state(encoder, &pipe)) 12309 encoder->get_config(encoder, &pipe_config); 12310 } 12311 12312 I915_STATE_WARN(crtc->active != active, 12313 "crtc active state doesn't match with hw state " 12314 "(expected %i, found %i)\n", crtc->active, active); 12315 12316 if (active && 12317 !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) { 12318 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 12319 intel_dump_pipe_config(crtc, &pipe_config, 12320 "[hw state]"); 12321 intel_dump_pipe_config(crtc, crtc->config, 12322 "[sw state]"); 12323 } 12324 } 12325 } 12326 12327 static void 12328 check_shared_dpll_state(struct drm_device *dev) 12329 { 12330 struct drm_i915_private *dev_priv = dev->dev_private; 12331 struct intel_crtc *crtc; 12332 struct intel_dpll_hw_state dpll_hw_state; 12333 int i; 12334 12335 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12336 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 12337 int enabled_crtcs = 0, active_crtcs = 0; 12338 bool active; 12339 12340 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 12341 12342 DRM_DEBUG_KMS("%s\n", pll->name); 12343 12344 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 12345 12346 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 12347 "more active pll users than references: %i vs %i\n", 12348 pll->active, hweight32(pll->config.crtc_mask)); 12349 I915_STATE_WARN(pll->active && !pll->on, 12350 "pll in active use but not on in sw tracking\n"); 12351 I915_STATE_WARN(pll->on && !pll->active, 12352 "pll in on but not on in use in sw tracking\n"); 12353 I915_STATE_WARN(pll->on != active, 12354 "pll on state mismatch (expected %i, found %i)\n", 12355 pll->on, active); 12356 12357 for_each_intel_crtc(dev, crtc) { 12358 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) 12359 enabled_crtcs++; 12360 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 12361 active_crtcs++; 12362 } 12363 I915_STATE_WARN(pll->active != active_crtcs, 12364 "pll active crtcs mismatch (expected %i, found %i)\n", 12365 pll->active, active_crtcs); 12366 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 12367 "pll enabled crtcs mismatch (expected %i, found %i)\n", 12368 hweight32(pll->config.crtc_mask), enabled_crtcs); 12369 12370 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 12371 sizeof(dpll_hw_state)), 12372 "pll hw state mismatch\n"); 12373 } 12374 } 12375 12376 void 12377 intel_modeset_check_state(struct drm_device *dev) 12378 { 12379 check_wm_state(dev); 12380 check_connector_state(dev); 12381 check_encoder_state(dev); 12382 check_crtc_state(dev); 12383 check_shared_dpll_state(dev); 12384 } 12385 12386 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 12387 int dotclock) 12388 { 12389 /* 12390 * FDI already provided one idea for the dotclock. 12391 * Yell if the encoder disagrees. 12392 */ 12393 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 12394 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 12395 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 12396 } 12397 12398 static void update_scanline_offset(struct intel_crtc *crtc) 12399 { 12400 struct drm_device *dev = crtc->base.dev; 12401 12402 /* 12403 * The scanline counter increments at the leading edge of hsync. 12404 * 12405 * On most platforms it starts counting from vtotal-1 on the 12406 * first active line. That means the scanline counter value is 12407 * always one less than what we would expect. Ie. just after 12408 * start of vblank, which also occurs at start of hsync (on the 12409 * last active line), the scanline counter will read vblank_start-1. 12410 * 12411 * On gen2 the scanline counter starts counting from 1 instead 12412 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 12413 * to keep the value positive), instead of adding one. 12414 * 12415 * On HSW+ the behaviour of the scanline counter depends on the output 12416 * type. For DP ports it behaves like most other platforms, but on HDMI 12417 * there's an extra 1 line difference. So we need to add two instead of 12418 * one to the value. 12419 */ 12420 if (IS_GEN2(dev)) { 12421 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 12422 int vtotal; 12423 12424 vtotal = mode->crtc_vtotal; 12425 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 12426 vtotal /= 2; 12427 12428 crtc->scanline_offset = vtotal - 1; 12429 } else if (HAS_DDI(dev) && 12430 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 12431 crtc->scanline_offset = 2; 12432 } else 12433 crtc->scanline_offset = 1; 12434 } 12435 12436 static struct intel_crtc_state * 12437 intel_modeset_compute_config(struct drm_crtc *crtc, 12438 struct drm_atomic_state *state) 12439 { 12440 struct intel_crtc_state *pipe_config; 12441 int ret = 0; 12442 12443 ret = drm_atomic_add_affected_connectors(state, crtc); 12444 if (ret) 12445 return ERR_PTR(ret); 12446 12447 ret = drm_atomic_helper_check_modeset(state->dev, state); 12448 if (ret) 12449 return ERR_PTR(ret); 12450 12451 /* 12452 * Note this needs changes when we start tracking multiple modes 12453 * and crtcs. At that point we'll need to compute the whole config 12454 * (i.e. one pipe_config for each crtc) rather than just the one 12455 * for this crtc. 12456 */ 12457 pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); 12458 if (IS_ERR(pipe_config)) 12459 return pipe_config; 12460 12461 if (!pipe_config->base.enable) 12462 return pipe_config; 12463 12464 ret = intel_modeset_pipe_config(crtc, state, pipe_config); 12465 if (ret) 12466 return ERR_PTR(ret); 12467 12468 /* Check things that can only be changed through modeset */ 12469 if (pipe_config->has_audio != 12470 to_intel_crtc(crtc)->config->has_audio) 12471 pipe_config->base.mode_changed = true; 12472 12473 /* 12474 * Note we have an issue here with infoframes: current code 12475 * only updates them on the full mode set path per hw 12476 * requirements. So here we should be checking for any 12477 * required changes and forcing a mode set. 12478 */ 12479 12480 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]"); 12481 12482 ret = drm_atomic_helper_check_planes(state->dev, state); 12483 if (ret) 12484 return ERR_PTR(ret); 12485 12486 return pipe_config; 12487 } 12488 12489 static int __intel_set_mode_setup_plls(struct drm_atomic_state *state) 12490 { 12491 struct drm_device *dev = state->dev; 12492 struct drm_i915_private *dev_priv = to_i915(dev); 12493 unsigned clear_pipes = 0; 12494 struct intel_crtc *intel_crtc; 12495 struct intel_crtc_state *intel_crtc_state; 12496 struct drm_crtc *crtc; 12497 struct drm_crtc_state *crtc_state; 12498 int ret = 0; 12499 int i; 12500 12501 if (!dev_priv->display.crtc_compute_clock) 12502 return 0; 12503 12504 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12505 intel_crtc = to_intel_crtc(crtc); 12506 intel_crtc_state = to_intel_crtc_state(crtc_state); 12507 12508 if (needs_modeset(crtc_state)) { 12509 clear_pipes |= 1 << intel_crtc->pipe; 12510 intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE; 12511 } 12512 } 12513 12514 ret = intel_shared_dpll_start_config(dev_priv, clear_pipes); 12515 if (ret) 12516 goto done; 12517 12518 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12519 if (!needs_modeset(crtc_state) || !crtc_state->enable) 12520 continue; 12521 12522 intel_crtc = to_intel_crtc(crtc); 12523 intel_crtc_state = to_intel_crtc_state(crtc_state); 12524 12525 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 12526 intel_crtc_state); 12527 if (ret) { 12528 intel_shared_dpll_abort_config(dev_priv); 12529 goto done; 12530 } 12531 } 12532 12533 done: 12534 return ret; 12535 } 12536 12537 /* Code that should eventually be part of atomic_check() */ 12538 static int __intel_set_mode_checks(struct drm_atomic_state *state) 12539 { 12540 struct drm_device *dev = state->dev; 12541 int ret; 12542 12543 /* 12544 * See if the config requires any additional preparation, e.g. 12545 * to adjust global state with pipes off. We need to do this 12546 * here so we can get the modeset_pipe updated config for the new 12547 * mode set on this crtc. For other crtcs we need to use the 12548 * adjusted_mode bits in the crtc directly. 12549 */ 12550 if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) { 12551 ret = valleyview_modeset_global_pipes(state); 12552 if (ret) 12553 return ret; 12554 } 12555 12556 ret = __intel_set_mode_setup_plls(state); 12557 if (ret) 12558 return ret; 12559 12560 return 0; 12561 } 12562 12563 static int __intel_set_mode(struct drm_crtc *modeset_crtc, 12564 struct intel_crtc_state *pipe_config) 12565 { 12566 struct drm_device *dev = modeset_crtc->dev; 12567 struct drm_i915_private *dev_priv = dev->dev_private; 12568 struct drm_atomic_state *state = pipe_config->base.state; 12569 struct drm_crtc *crtc; 12570 struct drm_crtc_state *crtc_state; 12571 int ret = 0; 12572 int i; 12573 12574 ret = __intel_set_mode_checks(state); 12575 if (ret < 0) 12576 return ret; 12577 12578 ret = drm_atomic_helper_prepare_planes(dev, state); 12579 if (ret) 12580 return ret; 12581 12582 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12583 if (!needs_modeset(crtc_state)) 12584 continue; 12585 12586 if (!crtc_state->enable) { 12587 if (crtc->state->enable) 12588 intel_crtc_disable(crtc); 12589 } else if (crtc->state->enable) { 12590 intel_crtc_disable_planes(crtc); 12591 dev_priv->display.crtc_disable(crtc); 12592 } 12593 } 12594 12595 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 12596 * to set it here already despite that we pass it down the callchain. 12597 * 12598 * Note we'll need to fix this up when we start tracking multiple 12599 * pipes; here we assume a single modeset_pipe and only track the 12600 * single crtc and mode. 12601 */ 12602 if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) { 12603 modeset_crtc->mode = pipe_config->base.mode; 12604 12605 /* 12606 * Calculate and store various constants which 12607 * are later needed by vblank and swap-completion 12608 * timestamping. They are derived from true hwmode. 12609 */ 12610 drm_calc_timestamping_constants(modeset_crtc, 12611 &pipe_config->base.adjusted_mode); 12612 } 12613 12614 /* Only after disabling all output pipelines that will be changed can we 12615 * update the the output configuration. */ 12616 intel_modeset_update_state(state); 12617 12618 /* The state has been swaped above, so state actually contains the 12619 * old state now. */ 12620 12621 modeset_update_crtc_power_domains(state); 12622 12623 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 12624 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12625 if (!needs_modeset(crtc->state) || !crtc->state->enable) { 12626 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 12627 continue; 12628 } 12629 12630 update_scanline_offset(to_intel_crtc(crtc)); 12631 12632 dev_priv->display.crtc_enable(crtc); 12633 drm_atomic_helper_commit_planes_on_crtc(crtc_state); 12634 } 12635 12636 /* FIXME: add subpixel order */ 12637 12638 drm_atomic_helper_cleanup_planes(dev, state); 12639 12640 drm_atomic_state_free(state); 12641 12642 return 0; 12643 } 12644 12645 static int intel_set_mode_with_config(struct drm_crtc *crtc, 12646 struct intel_crtc_state *pipe_config, 12647 bool force_restore) 12648 { 12649 int ret; 12650 12651 ret = __intel_set_mode(crtc, pipe_config); 12652 12653 if (ret == 0 && force_restore) { 12654 intel_modeset_update_staged_output_state(crtc->dev); 12655 intel_modeset_check_state(crtc->dev); 12656 } 12657 12658 return ret; 12659 } 12660 12661 static int intel_set_mode(struct drm_crtc *crtc, 12662 struct drm_atomic_state *state, 12663 bool force_restore) 12664 { 12665 struct intel_crtc_state *pipe_config; 12666 int ret = 0; 12667 12668 pipe_config = intel_modeset_compute_config(crtc, state); 12669 if (IS_ERR(pipe_config)) { 12670 ret = PTR_ERR(pipe_config); 12671 goto out; 12672 } 12673 12674 ret = intel_set_mode_with_config(crtc, pipe_config, force_restore); 12675 if (ret) 12676 goto out; 12677 12678 out: 12679 return ret; 12680 } 12681 12682 void intel_crtc_restore_mode(struct drm_crtc *crtc) 12683 { 12684 struct drm_device *dev = crtc->dev; 12685 struct drm_atomic_state *state; 12686 struct intel_encoder *encoder; 12687 struct intel_connector *connector; 12688 struct drm_connector_state *connector_state; 12689 struct intel_crtc_state *crtc_state; 12690 int ret; 12691 12692 state = drm_atomic_state_alloc(dev); 12693 if (!state) { 12694 DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory", 12695 crtc->base.id); 12696 return; 12697 } 12698 12699 state->acquire_ctx = dev->mode_config.acquire_ctx; 12700 12701 /* The force restore path in the HW readout code relies on the staged 12702 * config still keeping the user requested config while the actual 12703 * state has been overwritten by the configuration read from HW. We 12704 * need to copy the staged config to the atomic state, otherwise the 12705 * mode set will just reapply the state the HW is already in. */ 12706 for_each_intel_encoder(dev, encoder) { 12707 if (&encoder->new_crtc->base != crtc) 12708 continue; 12709 12710 for_each_intel_connector(dev, connector) { 12711 if (connector->new_encoder != encoder) 12712 continue; 12713 12714 connector_state = drm_atomic_get_connector_state(state, &connector->base); 12715 if (IS_ERR(connector_state)) { 12716 DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n", 12717 connector->base.base.id, 12718 connector->base.name, 12719 PTR_ERR(connector_state)); 12720 continue; 12721 } 12722 12723 connector_state->crtc = crtc; 12724 connector_state->best_encoder = &encoder->base; 12725 } 12726 } 12727 12728 crtc_state = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); 12729 if (IS_ERR(crtc_state)) { 12730 DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n", 12731 crtc->base.id, PTR_ERR(crtc_state)); 12732 drm_atomic_state_free(state); 12733 return; 12734 } 12735 12736 crtc_state->base.active = crtc_state->base.enable = 12737 to_intel_crtc(crtc)->new_enabled; 12738 12739 drm_mode_copy(&crtc_state->base.mode, &crtc->mode); 12740 12741 intel_modeset_setup_plane_state(state, crtc, &crtc->mode, 12742 crtc->primary->fb, crtc->x, crtc->y); 12743 12744 ret = intel_set_mode(crtc, state, false); 12745 if (ret) 12746 drm_atomic_state_free(state); 12747 } 12748 12749 #undef for_each_intel_crtc_masked 12750 12751 static bool intel_connector_in_mode_set(struct intel_connector *connector, 12752 struct drm_mode_set *set) 12753 { 12754 int ro; 12755 12756 for (ro = 0; ro < set->num_connectors; ro++) 12757 if (set->connectors[ro] == &connector->base) 12758 return true; 12759 12760 return false; 12761 } 12762 12763 static int 12764 intel_modeset_stage_output_state(struct drm_device *dev, 12765 struct drm_mode_set *set, 12766 struct drm_atomic_state *state) 12767 { 12768 struct intel_connector *connector; 12769 struct drm_connector *drm_connector; 12770 struct drm_connector_state *connector_state; 12771 struct drm_crtc *crtc; 12772 struct drm_crtc_state *crtc_state; 12773 int i, ret; 12774 12775 /* The upper layers ensure that we either disable a crtc or have a list 12776 * of connectors. For paranoia, double-check this. */ 12777 WARN_ON(!set->fb && (set->num_connectors != 0)); 12778 WARN_ON(set->fb && (set->num_connectors == 0)); 12779 12780 for_each_intel_connector(dev, connector) { 12781 bool in_mode_set = intel_connector_in_mode_set(connector, set); 12782 12783 if (!in_mode_set && connector->base.state->crtc != set->crtc) 12784 continue; 12785 12786 connector_state = 12787 drm_atomic_get_connector_state(state, &connector->base); 12788 if (IS_ERR(connector_state)) 12789 return PTR_ERR(connector_state); 12790 12791 if (in_mode_set) { 12792 int pipe = to_intel_crtc(set->crtc)->pipe; 12793 connector_state->best_encoder = 12794 &intel_find_encoder(connector, pipe)->base; 12795 } 12796 12797 if (connector->base.state->crtc != set->crtc) 12798 continue; 12799 12800 /* If we disable the crtc, disable all its connectors. Also, if 12801 * the connector is on the changing crtc but not on the new 12802 * connector list, disable it. */ 12803 if (!set->fb || !in_mode_set) { 12804 connector_state->best_encoder = NULL; 12805 12806 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 12807 connector->base.base.id, 12808 connector->base.name); 12809 } 12810 } 12811 /* connector->new_encoder is now updated for all connectors. */ 12812 12813 for_each_connector_in_state(state, drm_connector, connector_state, i) { 12814 connector = to_intel_connector(drm_connector); 12815 12816 if (!connector_state->best_encoder) { 12817 ret = drm_atomic_set_crtc_for_connector(connector_state, 12818 NULL); 12819 if (ret) 12820 return ret; 12821 12822 continue; 12823 } 12824 12825 if (intel_connector_in_mode_set(connector, set)) { 12826 struct drm_crtc *crtc = connector->base.state->crtc; 12827 12828 /* If this connector was in a previous crtc, add it 12829 * to the state. We might need to disable it. */ 12830 if (crtc) { 12831 crtc_state = 12832 drm_atomic_get_crtc_state(state, crtc); 12833 if (IS_ERR(crtc_state)) 12834 return PTR_ERR(crtc_state); 12835 } 12836 12837 ret = drm_atomic_set_crtc_for_connector(connector_state, 12838 set->crtc); 12839 if (ret) 12840 return ret; 12841 } 12842 12843 /* Make sure the new CRTC will work with the encoder */ 12844 if (!drm_encoder_crtc_ok(connector_state->best_encoder, 12845 connector_state->crtc)) { 12846 return -EINVAL; 12847 } 12848 12849 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 12850 connector->base.base.id, 12851 connector->base.name, 12852 connector_state->crtc->base.id); 12853 12854 if (connector_state->best_encoder != &connector->encoder->base) 12855 connector->encoder = 12856 to_intel_encoder(connector_state->best_encoder); 12857 } 12858 12859 for_each_crtc_in_state(state, crtc, crtc_state, i) { 12860 bool has_connectors; 12861 12862 ret = drm_atomic_add_affected_connectors(state, crtc); 12863 if (ret) 12864 return ret; 12865 12866 has_connectors = !!drm_atomic_connectors_for_crtc(state, crtc); 12867 if (has_connectors != crtc_state->enable) 12868 crtc_state->enable = 12869 crtc_state->active = has_connectors; 12870 } 12871 12872 ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode, 12873 set->fb, set->x, set->y); 12874 if (ret) 12875 return ret; 12876 12877 crtc_state = drm_atomic_get_crtc_state(state, set->crtc); 12878 if (IS_ERR(crtc_state)) 12879 return PTR_ERR(crtc_state); 12880 12881 if (set->mode) 12882 drm_mode_copy(&crtc_state->mode, set->mode); 12883 12884 if (set->num_connectors) 12885 crtc_state->active = true; 12886 12887 return 0; 12888 } 12889 12890 static int intel_crtc_set_config(struct drm_mode_set *set) 12891 { 12892 struct drm_device *dev; 12893 struct drm_atomic_state *state = NULL; 12894 struct intel_crtc_state *pipe_config; 12895 int ret; 12896 12897 BUG_ON(!set); 12898 BUG_ON(!set->crtc); 12899 BUG_ON(!set->crtc->helper_private); 12900 12901 /* Enforce sane interface api - has been abused by the fb helper. */ 12902 BUG_ON(!set->mode && set->fb); 12903 BUG_ON(set->fb && set->num_connectors == 0); 12904 12905 if (set->fb) { 12906 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 12907 set->crtc->base.id, set->fb->base.id, 12908 (int)set->num_connectors, set->x, set->y); 12909 } else { 12910 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 12911 } 12912 12913 dev = set->crtc->dev; 12914 12915 state = drm_atomic_state_alloc(dev); 12916 if (!state) 12917 return -ENOMEM; 12918 12919 state->acquire_ctx = dev->mode_config.acquire_ctx; 12920 12921 ret = intel_modeset_stage_output_state(dev, set, state); 12922 if (ret) 12923 goto out; 12924 12925 pipe_config = intel_modeset_compute_config(set->crtc, state); 12926 if (IS_ERR(pipe_config)) { 12927 ret = PTR_ERR(pipe_config); 12928 goto out; 12929 } 12930 12931 intel_update_pipe_size(to_intel_crtc(set->crtc)); 12932 12933 ret = intel_set_mode_with_config(set->crtc, pipe_config, true); 12934 12935 if (ret) { 12936 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 12937 set->crtc->base.id, ret); 12938 } 12939 12940 out: 12941 if (ret) 12942 drm_atomic_state_free(state); 12943 return ret; 12944 } 12945 12946 static const struct drm_crtc_funcs intel_crtc_funcs = { 12947 .gamma_set = intel_crtc_gamma_set, 12948 .set_config = intel_crtc_set_config, 12949 .destroy = intel_crtc_destroy, 12950 .page_flip = intel_crtc_page_flip, 12951 .atomic_duplicate_state = intel_crtc_duplicate_state, 12952 .atomic_destroy_state = intel_crtc_destroy_state, 12953 }; 12954 12955 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 12956 struct intel_shared_dpll *pll, 12957 struct intel_dpll_hw_state *hw_state) 12958 { 12959 uint32_t val; 12960 12961 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 12962 return false; 12963 12964 val = I915_READ(PCH_DPLL(pll->id)); 12965 hw_state->dpll = val; 12966 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 12967 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 12968 12969 return val & DPLL_VCO_ENABLE; 12970 } 12971 12972 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 12973 struct intel_shared_dpll *pll) 12974 { 12975 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 12976 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 12977 } 12978 12979 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 12980 struct intel_shared_dpll *pll) 12981 { 12982 /* PCH refclock must be enabled first */ 12983 ibx_assert_pch_refclk_enabled(dev_priv); 12984 12985 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 12986 12987 /* Wait for the clocks to stabilize. */ 12988 POSTING_READ(PCH_DPLL(pll->id)); 12989 udelay(150); 12990 12991 /* The pixel multiplier can only be updated once the 12992 * DPLL is enabled and the clocks are stable. 12993 * 12994 * So write it again. 12995 */ 12996 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 12997 POSTING_READ(PCH_DPLL(pll->id)); 12998 udelay(200); 12999 } 13000 13001 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 13002 struct intel_shared_dpll *pll) 13003 { 13004 struct drm_device *dev = dev_priv->dev; 13005 struct intel_crtc *crtc; 13006 13007 /* Make sure no transcoder isn't still depending on us. */ 13008 for_each_intel_crtc(dev, crtc) { 13009 if (intel_crtc_to_shared_dpll(crtc) == pll) 13010 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 13011 } 13012 13013 I915_WRITE(PCH_DPLL(pll->id), 0); 13014 POSTING_READ(PCH_DPLL(pll->id)); 13015 udelay(200); 13016 } 13017 13018 static char *ibx_pch_dpll_names[] = { 13019 "PCH DPLL A", 13020 "PCH DPLL B", 13021 }; 13022 13023 static void ibx_pch_dpll_init(struct drm_device *dev) 13024 { 13025 struct drm_i915_private *dev_priv = dev->dev_private; 13026 int i; 13027 13028 dev_priv->num_shared_dpll = 2; 13029 13030 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13031 dev_priv->shared_dplls[i].id = i; 13032 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 13033 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 13034 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 13035 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 13036 dev_priv->shared_dplls[i].get_hw_state = 13037 ibx_pch_dpll_get_hw_state; 13038 } 13039 } 13040 13041 static void intel_shared_dpll_init(struct drm_device *dev) 13042 { 13043 struct drm_i915_private *dev_priv = dev->dev_private; 13044 13045 if (HAS_DDI(dev)) 13046 intel_ddi_pll_init(dev); 13047 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 13048 ibx_pch_dpll_init(dev); 13049 else 13050 dev_priv->num_shared_dpll = 0; 13051 13052 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 13053 } 13054 13055 /** 13056 * intel_wm_need_update - Check whether watermarks need updating 13057 * @plane: drm plane 13058 * @state: new plane state 13059 * 13060 * Check current plane state versus the new one to determine whether 13061 * watermarks need to be recalculated. 13062 * 13063 * Returns true or false. 13064 */ 13065 bool intel_wm_need_update(struct drm_plane *plane, 13066 struct drm_plane_state *state) 13067 { 13068 /* Update watermarks on tiling changes. */ 13069 if (!plane->state->fb || !state->fb || 13070 plane->state->fb->modifier[0] != state->fb->modifier[0] || 13071 plane->state->rotation != state->rotation) 13072 return true; 13073 13074 return false; 13075 } 13076 13077 /** 13078 * intel_prepare_plane_fb - Prepare fb for usage on plane 13079 * @plane: drm plane to prepare for 13080 * @fb: framebuffer to prepare for presentation 13081 * 13082 * Prepares a framebuffer for usage on a display plane. Generally this 13083 * involves pinning the underlying object and updating the frontbuffer tracking 13084 * bits. Some older platforms need special physical address handling for 13085 * cursor planes. 13086 * 13087 * Returns 0 on success, negative error code on failure. 13088 */ 13089 int 13090 intel_prepare_plane_fb(struct drm_plane *plane, 13091 struct drm_framebuffer *fb, 13092 const struct drm_plane_state *new_state) 13093 { 13094 struct drm_device *dev = plane->dev; 13095 struct intel_plane *intel_plane = to_intel_plane(plane); 13096 enum i915_pipe pipe = intel_plane->pipe; 13097 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13098 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 13099 unsigned frontbuffer_bits = 0; 13100 int ret = 0; 13101 13102 if (!obj) 13103 return 0; 13104 13105 switch (plane->type) { 13106 case DRM_PLANE_TYPE_PRIMARY: 13107 frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe); 13108 break; 13109 case DRM_PLANE_TYPE_CURSOR: 13110 frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe); 13111 break; 13112 case DRM_PLANE_TYPE_OVERLAY: 13113 frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe); 13114 break; 13115 } 13116 13117 mutex_lock(&dev->struct_mutex); 13118 13119 if (plane->type == DRM_PLANE_TYPE_CURSOR && 13120 INTEL_INFO(dev)->cursor_needs_physical) { 13121 int align = IS_I830(dev) ? 16 * 1024 : 256; 13122 ret = i915_gem_object_attach_phys(obj, align); 13123 if (ret) 13124 DRM_DEBUG_KMS("failed to attach phys object\n"); 13125 } else { 13126 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL); 13127 } 13128 13129 if (ret == 0) 13130 i915_gem_track_fb(old_obj, obj, frontbuffer_bits); 13131 13132 mutex_unlock(&dev->struct_mutex); 13133 13134 return ret; 13135 } 13136 13137 /** 13138 * intel_cleanup_plane_fb - Cleans up an fb after plane use 13139 * @plane: drm plane to clean up for 13140 * @fb: old framebuffer that was on plane 13141 * 13142 * Cleans up a framebuffer that has just been removed from a plane. 13143 */ 13144 void 13145 intel_cleanup_plane_fb(struct drm_plane *plane, 13146 struct drm_framebuffer *fb, 13147 const struct drm_plane_state *old_state) 13148 { 13149 struct drm_device *dev = plane->dev; 13150 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13151 13152 if (WARN_ON(!obj)) 13153 return; 13154 13155 if (plane->type != DRM_PLANE_TYPE_CURSOR || 13156 !INTEL_INFO(dev)->cursor_needs_physical) { 13157 mutex_lock(&dev->struct_mutex); 13158 intel_unpin_fb_obj(fb, old_state); 13159 mutex_unlock(&dev->struct_mutex); 13160 } 13161 } 13162 13163 int 13164 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 13165 { 13166 int max_scale; 13167 struct drm_device *dev; 13168 struct drm_i915_private *dev_priv; 13169 int crtc_clock, cdclk; 13170 13171 if (!intel_crtc || !crtc_state) 13172 return DRM_PLANE_HELPER_NO_SCALING; 13173 13174 dev = intel_crtc->base.dev; 13175 dev_priv = dev->dev_private; 13176 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 13177 cdclk = dev_priv->display.get_display_clock_speed(dev); 13178 13179 if (!crtc_clock || !cdclk) 13180 return DRM_PLANE_HELPER_NO_SCALING; 13181 13182 /* 13183 * skl max scale is lower of: 13184 * close to 3 but not 3, -1 is for that purpose 13185 * or 13186 * cdclk/crtc_clock 13187 */ 13188 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock)); 13189 13190 return max_scale; 13191 } 13192 13193 static int 13194 intel_check_primary_plane(struct drm_plane *plane, 13195 struct intel_plane_state *state) 13196 { 13197 struct drm_device *dev = plane->dev; 13198 struct drm_i915_private *dev_priv = dev->dev_private; 13199 struct drm_crtc *crtc = state->base.crtc; 13200 struct intel_crtc *intel_crtc; 13201 struct intel_crtc_state *crtc_state; 13202 struct drm_framebuffer *fb = state->base.fb; 13203 struct drm_rect *dest = &state->dst; 13204 struct drm_rect *src = &state->src; 13205 const struct drm_rect *clip = &state->clip; 13206 bool can_position = false; 13207 int max_scale = DRM_PLANE_HELPER_NO_SCALING; 13208 int min_scale = DRM_PLANE_HELPER_NO_SCALING; 13209 int ret; 13210 13211 crtc = crtc ? crtc : plane->crtc; 13212 intel_crtc = to_intel_crtc(crtc); 13213 crtc_state = state->base.state ? 13214 intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL; 13215 13216 if (INTEL_INFO(dev)->gen >= 9) { 13217 /* use scaler when colorkey is not required */ 13218 if (to_intel_plane(plane)->ckey.flags == I915_SET_COLORKEY_NONE) { 13219 min_scale = 1; 13220 max_scale = skl_max_scale(intel_crtc, crtc_state); 13221 } 13222 can_position = true; 13223 } 13224 13225 ret = drm_plane_helper_check_update(plane, crtc, fb, 13226 src, dest, clip, 13227 min_scale, 13228 max_scale, 13229 can_position, true, 13230 &state->visible); 13231 if (ret) 13232 return ret; 13233 13234 if (crtc_state ? crtc_state->base.active : intel_crtc->active) { 13235 struct intel_plane_state *old_state = 13236 to_intel_plane_state(plane->state); 13237 13238 intel_crtc->atomic.wait_for_flips = true; 13239 13240 /* 13241 * FBC does not work on some platforms for rotated 13242 * planes, so disable it when rotation is not 0 and 13243 * update it when rotation is set back to 0. 13244 * 13245 * FIXME: This is redundant with the fbc update done in 13246 * the primary plane enable function except that that 13247 * one is done too late. We eventually need to unify 13248 * this. 13249 */ 13250 if (state->visible && 13251 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 13252 dev_priv->fbc.crtc == intel_crtc && 13253 state->base.rotation != BIT(DRM_ROTATE_0)) { 13254 intel_crtc->atomic.disable_fbc = true; 13255 } 13256 13257 if (state->visible && !old_state->visible) { 13258 /* 13259 * BDW signals flip done immediately if the plane 13260 * is disabled, even if the plane enable is already 13261 * armed to occur at the next vblank :( 13262 */ 13263 if (IS_BROADWELL(dev)) 13264 intel_crtc->atomic.wait_vblank = true; 13265 13266 if (crtc_state) 13267 intel_crtc->atomic.post_enable_primary = true; 13268 } 13269 13270 /* 13271 * FIXME: Actually if we will still have any other plane enabled 13272 * on the pipe we could let IPS enabled still, but for 13273 * now lets consider that when we make primary invisible 13274 * by setting DSPCNTR to 0 on update_primary_plane function 13275 * IPS needs to be disable. 13276 */ 13277 if (!state->visible || !fb) 13278 intel_crtc->atomic.disable_ips = true; 13279 13280 if (!state->visible && old_state->visible && 13281 crtc_state && !needs_modeset(&crtc_state->base)) 13282 intel_crtc->atomic.pre_disable_primary = true; 13283 13284 intel_crtc->atomic.fb_bits |= 13285 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 13286 13287 intel_crtc->atomic.update_fbc = true; 13288 13289 if (intel_wm_need_update(plane, &state->base)) 13290 intel_crtc->atomic.update_wm = true; 13291 } 13292 13293 if (INTEL_INFO(dev)->gen >= 9) { 13294 ret = skl_update_scaler_users(intel_crtc, crtc_state, 13295 to_intel_plane(plane), state, 0); 13296 if (ret) 13297 return ret; 13298 } 13299 13300 return 0; 13301 } 13302 13303 static void 13304 intel_commit_primary_plane(struct drm_plane *plane, 13305 struct intel_plane_state *state) 13306 { 13307 struct drm_crtc *crtc = state->base.crtc; 13308 struct drm_framebuffer *fb = state->base.fb; 13309 struct drm_device *dev = plane->dev; 13310 struct drm_i915_private *dev_priv = dev->dev_private; 13311 struct intel_crtc *intel_crtc; 13312 struct drm_rect *src = &state->src; 13313 13314 crtc = crtc ? crtc : plane->crtc; 13315 intel_crtc = to_intel_crtc(crtc); 13316 13317 plane->fb = fb; 13318 crtc->x = src->x1 >> 16; 13319 crtc->y = src->y1 >> 16; 13320 13321 if (intel_crtc->active) { 13322 if (state->visible) 13323 /* FIXME: kill this fastboot hack */ 13324 intel_update_pipe_size(intel_crtc); 13325 13326 dev_priv->display.update_primary_plane(crtc, plane->fb, 13327 crtc->x, crtc->y); 13328 } 13329 } 13330 13331 static void 13332 intel_disable_primary_plane(struct drm_plane *plane, 13333 struct drm_crtc *crtc, 13334 bool force) 13335 { 13336 struct drm_device *dev = plane->dev; 13337 struct drm_i915_private *dev_priv = dev->dev_private; 13338 13339 dev_priv->display.update_primary_plane(crtc, NULL, 0, 0); 13340 } 13341 13342 static void intel_begin_crtc_commit(struct drm_crtc *crtc) 13343 { 13344 struct drm_device *dev = crtc->dev; 13345 struct drm_i915_private *dev_priv = dev->dev_private; 13346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13347 struct intel_plane *intel_plane; 13348 struct drm_plane *p; 13349 unsigned fb_bits = 0; 13350 13351 /* Track fb's for any planes being disabled */ 13352 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 13353 intel_plane = to_intel_plane(p); 13354 13355 if (intel_crtc->atomic.disabled_planes & 13356 (1 << drm_plane_index(p))) { 13357 switch (p->type) { 13358 case DRM_PLANE_TYPE_PRIMARY: 13359 fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe); 13360 break; 13361 case DRM_PLANE_TYPE_CURSOR: 13362 fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe); 13363 break; 13364 case DRM_PLANE_TYPE_OVERLAY: 13365 fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe); 13366 break; 13367 } 13368 13369 mutex_lock(&dev->struct_mutex); 13370 i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits); 13371 mutex_unlock(&dev->struct_mutex); 13372 } 13373 } 13374 13375 if (intel_crtc->atomic.wait_for_flips) 13376 intel_crtc_wait_for_pending_flips(crtc); 13377 13378 if (intel_crtc->atomic.disable_fbc) 13379 intel_fbc_disable(dev); 13380 13381 if (intel_crtc->atomic.disable_ips) 13382 hsw_disable_ips(intel_crtc); 13383 13384 if (intel_crtc->atomic.pre_disable_primary) 13385 intel_pre_disable_primary(crtc); 13386 13387 if (intel_crtc->atomic.update_wm) 13388 intel_update_watermarks(crtc); 13389 13390 intel_runtime_pm_get(dev_priv); 13391 13392 /* Perform vblank evasion around commit operation */ 13393 if (intel_crtc->active) 13394 intel_crtc->atomic.evade = 13395 intel_pipe_update_start(intel_crtc, 13396 &intel_crtc->atomic.start_vbl_count); 13397 } 13398 13399 static void intel_finish_crtc_commit(struct drm_crtc *crtc) 13400 { 13401 struct drm_device *dev = crtc->dev; 13402 struct drm_i915_private *dev_priv = dev->dev_private; 13403 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13404 struct drm_plane *p; 13405 13406 if (intel_crtc->atomic.evade) 13407 intel_pipe_update_end(intel_crtc, 13408 intel_crtc->atomic.start_vbl_count); 13409 13410 intel_runtime_pm_put(dev_priv); 13411 13412 if (intel_crtc->atomic.wait_vblank) 13413 intel_wait_for_vblank(dev, intel_crtc->pipe); 13414 13415 intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits); 13416 13417 if (intel_crtc->atomic.update_fbc) { 13418 mutex_lock(&dev->struct_mutex); 13419 intel_fbc_update(dev); 13420 mutex_unlock(&dev->struct_mutex); 13421 } 13422 13423 if (intel_crtc->atomic.post_enable_primary) 13424 intel_post_enable_primary(crtc); 13425 13426 drm_for_each_legacy_plane(p, &dev->mode_config.plane_list) 13427 if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p)) 13428 intel_update_sprite_watermarks(p, crtc, 0, 0, 0, 13429 false, false); 13430 13431 memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic)); 13432 } 13433 13434 /** 13435 * intel_plane_destroy - destroy a plane 13436 * @plane: plane to destroy 13437 * 13438 * Common destruction function for all types of planes (primary, cursor, 13439 * sprite). 13440 */ 13441 void intel_plane_destroy(struct drm_plane *plane) 13442 { 13443 struct intel_plane *intel_plane = to_intel_plane(plane); 13444 drm_plane_cleanup(plane); 13445 kfree(intel_plane); 13446 } 13447 13448 const struct drm_plane_funcs intel_plane_funcs = { 13449 .update_plane = drm_atomic_helper_update_plane, 13450 .disable_plane = drm_atomic_helper_disable_plane, 13451 .destroy = intel_plane_destroy, 13452 .set_property = drm_atomic_helper_plane_set_property, 13453 .atomic_get_property = intel_plane_atomic_get_property, 13454 .atomic_set_property = intel_plane_atomic_set_property, 13455 .atomic_duplicate_state = intel_plane_duplicate_state, 13456 .atomic_destroy_state = intel_plane_destroy_state, 13457 13458 }; 13459 13460 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 13461 int pipe) 13462 { 13463 struct intel_plane *primary; 13464 struct intel_plane_state *state; 13465 const uint32_t *intel_primary_formats; 13466 int num_formats; 13467 13468 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 13469 if (primary == NULL) 13470 return NULL; 13471 13472 state = intel_create_plane_state(&primary->base); 13473 if (!state) { 13474 kfree(primary); 13475 return NULL; 13476 } 13477 primary->base.state = &state->base; 13478 13479 primary->can_scale = false; 13480 primary->max_downscale = 1; 13481 if (INTEL_INFO(dev)->gen >= 9) { 13482 primary->can_scale = true; 13483 state->scaler_id = -1; 13484 } 13485 primary->pipe = pipe; 13486 primary->plane = pipe; 13487 primary->check_plane = intel_check_primary_plane; 13488 primary->commit_plane = intel_commit_primary_plane; 13489 primary->disable_plane = intel_disable_primary_plane; 13490 primary->ckey.flags = I915_SET_COLORKEY_NONE; 13491 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 13492 primary->plane = !pipe; 13493 13494 if (INTEL_INFO(dev)->gen >= 9) { 13495 intel_primary_formats = skl_primary_formats; 13496 num_formats = ARRAY_SIZE(skl_primary_formats); 13497 } else if (INTEL_INFO(dev)->gen >= 4) { 13498 intel_primary_formats = i965_primary_formats; 13499 num_formats = ARRAY_SIZE(i965_primary_formats); 13500 } else { 13501 intel_primary_formats = i8xx_primary_formats; 13502 num_formats = ARRAY_SIZE(i8xx_primary_formats); 13503 } 13504 13505 drm_universal_plane_init(dev, &primary->base, 0, 13506 &intel_plane_funcs, 13507 intel_primary_formats, num_formats, 13508 DRM_PLANE_TYPE_PRIMARY); 13509 13510 if (INTEL_INFO(dev)->gen >= 4) 13511 intel_create_rotation_property(dev, primary); 13512 13513 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 13514 13515 return &primary->base; 13516 } 13517 13518 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane) 13519 { 13520 if (!dev->mode_config.rotation_property) { 13521 unsigned long flags = BIT(DRM_ROTATE_0) | 13522 BIT(DRM_ROTATE_180); 13523 13524 if (INTEL_INFO(dev)->gen >= 9) 13525 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270); 13526 13527 dev->mode_config.rotation_property = 13528 drm_mode_create_rotation_property(dev, flags); 13529 } 13530 if (dev->mode_config.rotation_property) 13531 drm_object_attach_property(&plane->base.base, 13532 dev->mode_config.rotation_property, 13533 plane->base.state->rotation); 13534 } 13535 13536 static int 13537 intel_check_cursor_plane(struct drm_plane *plane, 13538 struct intel_plane_state *state) 13539 { 13540 struct drm_crtc *crtc = state->base.crtc; 13541 struct drm_device *dev = plane->dev; 13542 struct drm_framebuffer *fb = state->base.fb; 13543 struct drm_rect *dest = &state->dst; 13544 struct drm_rect *src = &state->src; 13545 const struct drm_rect *clip = &state->clip; 13546 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13547 struct intel_crtc *intel_crtc; 13548 unsigned stride; 13549 int ret; 13550 13551 crtc = crtc ? crtc : plane->crtc; 13552 intel_crtc = to_intel_crtc(crtc); 13553 13554 ret = drm_plane_helper_check_update(plane, crtc, fb, 13555 src, dest, clip, 13556 DRM_PLANE_HELPER_NO_SCALING, 13557 DRM_PLANE_HELPER_NO_SCALING, 13558 true, true, &state->visible); 13559 if (ret) 13560 return ret; 13561 13562 13563 /* if we want to turn off the cursor ignore width and height */ 13564 if (!obj) 13565 goto finish; 13566 13567 /* Check for which cursor types we support */ 13568 if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) { 13569 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 13570 state->base.crtc_w, state->base.crtc_h); 13571 return -EINVAL; 13572 } 13573 13574 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 13575 if (obj->base.size < stride * state->base.crtc_h) { 13576 DRM_DEBUG_KMS("buffer is too small\n"); 13577 return -ENOMEM; 13578 } 13579 13580 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 13581 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 13582 ret = -EINVAL; 13583 } 13584 13585 finish: 13586 if (intel_crtc->active) { 13587 if (plane->state->crtc_w != state->base.crtc_w) 13588 intel_crtc->atomic.update_wm = true; 13589 13590 intel_crtc->atomic.fb_bits |= 13591 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe); 13592 } 13593 13594 return ret; 13595 } 13596 13597 static void 13598 intel_disable_cursor_plane(struct drm_plane *plane, 13599 struct drm_crtc *crtc, 13600 bool force) 13601 { 13602 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 13603 13604 if (!force) { 13605 plane->fb = NULL; 13606 intel_crtc->cursor_bo = NULL; 13607 intel_crtc->cursor_addr = 0; 13608 } 13609 13610 intel_crtc_update_cursor(crtc, false); 13611 } 13612 13613 static void 13614 intel_commit_cursor_plane(struct drm_plane *plane, 13615 struct intel_plane_state *state) 13616 { 13617 struct drm_crtc *crtc = state->base.crtc; 13618 struct drm_device *dev = plane->dev; 13619 struct intel_crtc *intel_crtc; 13620 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 13621 uint32_t addr; 13622 13623 crtc = crtc ? crtc : plane->crtc; 13624 intel_crtc = to_intel_crtc(crtc); 13625 13626 plane->fb = state->base.fb; 13627 crtc->cursor_x = state->base.crtc_x; 13628 crtc->cursor_y = state->base.crtc_y; 13629 13630 if (intel_crtc->cursor_bo == obj) 13631 goto update; 13632 13633 if (!obj) 13634 addr = 0; 13635 else if (!INTEL_INFO(dev)->cursor_needs_physical) 13636 addr = i915_gem_obj_ggtt_offset(obj); 13637 else 13638 addr = obj->phys_handle->busaddr; 13639 13640 intel_crtc->cursor_addr = addr; 13641 intel_crtc->cursor_bo = obj; 13642 update: 13643 13644 if (intel_crtc->active) 13645 intel_crtc_update_cursor(crtc, state->visible); 13646 } 13647 13648 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 13649 int pipe) 13650 { 13651 struct intel_plane *cursor; 13652 struct intel_plane_state *state; 13653 13654 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 13655 if (cursor == NULL) 13656 return NULL; 13657 13658 state = intel_create_plane_state(&cursor->base); 13659 if (!state) { 13660 kfree(cursor); 13661 return NULL; 13662 } 13663 cursor->base.state = &state->base; 13664 13665 cursor->can_scale = false; 13666 cursor->max_downscale = 1; 13667 cursor->pipe = pipe; 13668 cursor->plane = pipe; 13669 cursor->check_plane = intel_check_cursor_plane; 13670 cursor->commit_plane = intel_commit_cursor_plane; 13671 cursor->disable_plane = intel_disable_cursor_plane; 13672 13673 drm_universal_plane_init(dev, &cursor->base, 0, 13674 &intel_plane_funcs, 13675 intel_cursor_formats, 13676 ARRAY_SIZE(intel_cursor_formats), 13677 DRM_PLANE_TYPE_CURSOR); 13678 13679 if (INTEL_INFO(dev)->gen >= 4) { 13680 if (!dev->mode_config.rotation_property) 13681 dev->mode_config.rotation_property = 13682 drm_mode_create_rotation_property(dev, 13683 BIT(DRM_ROTATE_0) | 13684 BIT(DRM_ROTATE_180)); 13685 if (dev->mode_config.rotation_property) 13686 drm_object_attach_property(&cursor->base.base, 13687 dev->mode_config.rotation_property, 13688 state->base.rotation); 13689 } 13690 13691 if (INTEL_INFO(dev)->gen >=9) 13692 state->scaler_id = -1; 13693 13694 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 13695 13696 return &cursor->base; 13697 } 13698 13699 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc, 13700 struct intel_crtc_state *crtc_state) 13701 { 13702 int i; 13703 struct intel_scaler *intel_scaler; 13704 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; 13705 13706 for (i = 0; i < intel_crtc->num_scalers; i++) { 13707 intel_scaler = &scaler_state->scalers[i]; 13708 intel_scaler->in_use = 0; 13709 intel_scaler->id = i; 13710 13711 intel_scaler->mode = PS_SCALER_MODE_DYN; 13712 } 13713 13714 scaler_state->scaler_id = -1; 13715 } 13716 13717 static void intel_crtc_init(struct drm_device *dev, int pipe) 13718 { 13719 struct drm_i915_private *dev_priv = dev->dev_private; 13720 struct intel_crtc *intel_crtc; 13721 struct intel_crtc_state *crtc_state = NULL; 13722 struct drm_plane *primary = NULL; 13723 struct drm_plane *cursor = NULL; 13724 int i, ret; 13725 13726 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 13727 if (intel_crtc == NULL) 13728 return; 13729 13730 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 13731 if (!crtc_state) 13732 goto fail; 13733 intel_crtc->config = crtc_state; 13734 intel_crtc->base.state = &crtc_state->base; 13735 crtc_state->base.crtc = &intel_crtc->base; 13736 13737 /* initialize shared scalers */ 13738 if (INTEL_INFO(dev)->gen >= 9) { 13739 if (pipe == PIPE_C) 13740 intel_crtc->num_scalers = 1; 13741 else 13742 intel_crtc->num_scalers = SKL_NUM_SCALERS; 13743 13744 skl_init_scalers(dev, intel_crtc, crtc_state); 13745 } 13746 13747 primary = intel_primary_plane_create(dev, pipe); 13748 if (!primary) 13749 goto fail; 13750 13751 cursor = intel_cursor_plane_create(dev, pipe); 13752 if (!cursor) 13753 goto fail; 13754 13755 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 13756 cursor, &intel_crtc_funcs); 13757 if (ret) 13758 goto fail; 13759 13760 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 13761 for (i = 0; i < 256; i++) { 13762 intel_crtc->lut_r[i] = i; 13763 intel_crtc->lut_g[i] = i; 13764 intel_crtc->lut_b[i] = i; 13765 } 13766 13767 /* 13768 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 13769 * is hooked to pipe B. Hence we want plane A feeding pipe B. 13770 */ 13771 intel_crtc->pipe = pipe; 13772 intel_crtc->plane = pipe; 13773 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 13774 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 13775 intel_crtc->plane = !pipe; 13776 } 13777 13778 intel_crtc->cursor_base = ~0; 13779 intel_crtc->cursor_cntl = ~0; 13780 intel_crtc->cursor_size = ~0; 13781 13782 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 13783 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 13784 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 13785 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 13786 13787 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 13788 13789 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 13790 return; 13791 13792 fail: 13793 if (primary) 13794 drm_plane_cleanup(primary); 13795 if (cursor) 13796 drm_plane_cleanup(cursor); 13797 kfree(crtc_state); 13798 kfree(intel_crtc); 13799 } 13800 13801 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 13802 { 13803 struct drm_encoder *encoder = connector->base.encoder; 13804 struct drm_device *dev = connector->base.dev; 13805 13806 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 13807 13808 if (!encoder || WARN_ON(!encoder->crtc)) 13809 return INVALID_PIPE; 13810 13811 return to_intel_crtc(encoder->crtc)->pipe; 13812 } 13813 13814 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13815 struct drm_file *file) 13816 { 13817 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13818 struct drm_crtc *drmmode_crtc; 13819 struct intel_crtc *crtc; 13820 13821 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 13822 13823 if (!drmmode_crtc) { 13824 DRM_ERROR("no such CRTC id\n"); 13825 return -ENOENT; 13826 } 13827 13828 crtc = to_intel_crtc(drmmode_crtc); 13829 pipe_from_crtc_id->pipe = crtc->pipe; 13830 13831 return 0; 13832 } 13833 13834 static int intel_encoder_clones(struct intel_encoder *encoder) 13835 { 13836 struct drm_device *dev = encoder->base.dev; 13837 struct intel_encoder *source_encoder; 13838 int index_mask = 0; 13839 int entry = 0; 13840 13841 for_each_intel_encoder(dev, source_encoder) { 13842 if (encoders_cloneable(encoder, source_encoder)) 13843 index_mask |= (1 << entry); 13844 13845 entry++; 13846 } 13847 13848 return index_mask; 13849 } 13850 13851 static bool has_edp_a(struct drm_device *dev) 13852 { 13853 struct drm_i915_private *dev_priv = dev->dev_private; 13854 13855 if (!IS_MOBILE(dev)) 13856 return false; 13857 13858 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 13859 return false; 13860 13861 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 13862 return false; 13863 13864 return true; 13865 } 13866 13867 static bool intel_crt_present(struct drm_device *dev) 13868 { 13869 struct drm_i915_private *dev_priv = dev->dev_private; 13870 13871 if (INTEL_INFO(dev)->gen >= 9) 13872 return false; 13873 13874 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 13875 return false; 13876 13877 if (IS_CHERRYVIEW(dev)) 13878 return false; 13879 13880 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 13881 return false; 13882 13883 return true; 13884 } 13885 13886 static void intel_setup_outputs(struct drm_device *dev) 13887 { 13888 struct drm_i915_private *dev_priv = dev->dev_private; 13889 struct intel_encoder *encoder; 13890 bool dpd_is_edp = false; 13891 13892 intel_lvds_init(dev); 13893 13894 if (intel_crt_present(dev)) 13895 intel_crt_init(dev); 13896 13897 if (IS_BROXTON(dev)) { 13898 /* 13899 * FIXME: Broxton doesn't support port detection via the 13900 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to 13901 * detect the ports. 13902 */ 13903 intel_ddi_init(dev, PORT_A); 13904 intel_ddi_init(dev, PORT_B); 13905 intel_ddi_init(dev, PORT_C); 13906 } else if (HAS_DDI(dev)) { 13907 int found; 13908 13909 /* 13910 * Haswell uses DDI functions to detect digital outputs. 13911 * On SKL pre-D0 the strap isn't connected, so we assume 13912 * it's there. 13913 */ 13914 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 13915 /* WaIgnoreDDIAStrap: skl */ 13916 if (found || 13917 (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0)) 13918 intel_ddi_init(dev, PORT_A); 13919 13920 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 13921 * register */ 13922 found = I915_READ(SFUSE_STRAP); 13923 13924 if (found & SFUSE_STRAP_DDIB_DETECTED) 13925 intel_ddi_init(dev, PORT_B); 13926 if (found & SFUSE_STRAP_DDIC_DETECTED) 13927 intel_ddi_init(dev, PORT_C); 13928 if (found & SFUSE_STRAP_DDID_DETECTED) 13929 intel_ddi_init(dev, PORT_D); 13930 } else if (HAS_PCH_SPLIT(dev)) { 13931 int found; 13932 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 13933 13934 if (has_edp_a(dev)) 13935 intel_dp_init(dev, DP_A, PORT_A); 13936 13937 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 13938 /* PCH SDVOB multiplex with HDMIB */ 13939 found = intel_sdvo_init(dev, PCH_SDVOB, true); 13940 if (!found) 13941 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 13942 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 13943 intel_dp_init(dev, PCH_DP_B, PORT_B); 13944 } 13945 13946 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 13947 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 13948 13949 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 13950 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 13951 13952 if (I915_READ(PCH_DP_C) & DP_DETECTED) 13953 intel_dp_init(dev, PCH_DP_C, PORT_C); 13954 13955 if (I915_READ(PCH_DP_D) & DP_DETECTED) 13956 intel_dp_init(dev, PCH_DP_D, PORT_D); 13957 } else if (IS_VALLEYVIEW(dev)) { 13958 /* 13959 * The DP_DETECTED bit is the latched state of the DDC 13960 * SDA pin at boot. However since eDP doesn't require DDC 13961 * (no way to plug in a DP->HDMI dongle) the DDC pins for 13962 * eDP ports may have been muxed to an alternate function. 13963 * Thus we can't rely on the DP_DETECTED bit alone to detect 13964 * eDP ports. Consult the VBT as well as DP_DETECTED to 13965 * detect eDP ports. 13966 */ 13967 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 13968 !intel_dp_is_edp(dev, PORT_B)) 13969 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 13970 PORT_B); 13971 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 13972 intel_dp_is_edp(dev, PORT_B)) 13973 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 13974 13975 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 13976 !intel_dp_is_edp(dev, PORT_C)) 13977 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 13978 PORT_C); 13979 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 13980 intel_dp_is_edp(dev, PORT_C)) 13981 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 13982 13983 if (IS_CHERRYVIEW(dev)) { 13984 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 13985 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 13986 PORT_D); 13987 /* eDP not supported on port D, so don't check VBT */ 13988 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 13989 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 13990 } 13991 13992 intel_dsi_init(dev); 13993 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 13994 bool found = false; 13995 13996 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 13997 DRM_DEBUG_KMS("probing SDVOB\n"); 13998 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 13999 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 14000 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 14001 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 14002 } 14003 14004 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 14005 intel_dp_init(dev, DP_B, PORT_B); 14006 } 14007 14008 /* Before G4X SDVOC doesn't have its own detect register */ 14009 14010 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 14011 DRM_DEBUG_KMS("probing SDVOC\n"); 14012 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 14013 } 14014 14015 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 14016 14017 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 14018 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 14019 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 14020 } 14021 if (SUPPORTS_INTEGRATED_DP(dev)) 14022 intel_dp_init(dev, DP_C, PORT_C); 14023 } 14024 14025 if (SUPPORTS_INTEGRATED_DP(dev) && 14026 (I915_READ(DP_D) & DP_DETECTED)) 14027 intel_dp_init(dev, DP_D, PORT_D); 14028 } else if (IS_GEN2(dev)) 14029 intel_dvo_init(dev); 14030 14031 if (SUPPORTS_TV(dev)) 14032 intel_tv_init(dev); 14033 14034 intel_psr_init(dev); 14035 14036 for_each_intel_encoder(dev, encoder) { 14037 encoder->base.possible_crtcs = encoder->crtc_mask; 14038 encoder->base.possible_clones = 14039 intel_encoder_clones(encoder); 14040 } 14041 14042 intel_init_pch_refclk(dev); 14043 14044 drm_helper_move_panel_connectors_to_head(dev); 14045 } 14046 14047 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 14048 { 14049 struct drm_device *dev = fb->dev; 14050 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14051 14052 drm_framebuffer_cleanup(fb); 14053 mutex_lock(&dev->struct_mutex); 14054 WARN_ON(!intel_fb->obj->framebuffer_references--); 14055 drm_gem_object_unreference(&intel_fb->obj->base); 14056 mutex_unlock(&dev->struct_mutex); 14057 kfree(intel_fb); 14058 } 14059 14060 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 14061 struct drm_file *file, 14062 unsigned int *handle) 14063 { 14064 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 14065 struct drm_i915_gem_object *obj = intel_fb->obj; 14066 14067 return drm_gem_handle_create(file, &obj->base, handle); 14068 } 14069 14070 static const struct drm_framebuffer_funcs intel_fb_funcs = { 14071 .destroy = intel_user_framebuffer_destroy, 14072 .create_handle = intel_user_framebuffer_create_handle, 14073 }; 14074 14075 static 14076 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 14077 uint32_t pixel_format) 14078 { 14079 u32 gen = INTEL_INFO(dev)->gen; 14080 14081 if (gen >= 9) { 14082 /* "The stride in bytes must not exceed the of the size of 8K 14083 * pixels and 32K bytes." 14084 */ 14085 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); 14086 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { 14087 return 32*1024; 14088 } else if (gen >= 4) { 14089 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14090 return 16*1024; 14091 else 14092 return 32*1024; 14093 } else if (gen >= 3) { 14094 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 14095 return 8*1024; 14096 else 14097 return 16*1024; 14098 } else { 14099 /* XXX DSPC is limited to 4k tiled */ 14100 return 8*1024; 14101 } 14102 } 14103 14104 static int intel_framebuffer_init(struct drm_device *dev, 14105 struct intel_framebuffer *intel_fb, 14106 struct drm_mode_fb_cmd2 *mode_cmd, 14107 struct drm_i915_gem_object *obj) 14108 { 14109 unsigned int aligned_height; 14110 int ret; 14111 u32 pitch_limit, stride_alignment; 14112 14113 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 14114 14115 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 14116 /* Enforce that fb modifier and tiling mode match, but only for 14117 * X-tiled. This is needed for FBC. */ 14118 if (!!(obj->tiling_mode == I915_TILING_X) != 14119 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 14120 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 14121 return -EINVAL; 14122 } 14123 } else { 14124 if (obj->tiling_mode == I915_TILING_X) 14125 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 14126 else if (obj->tiling_mode == I915_TILING_Y) { 14127 DRM_DEBUG("No Y tiling for legacy addfb\n"); 14128 return -EINVAL; 14129 } 14130 } 14131 14132 /* Passed in modifier sanity checking. */ 14133 switch (mode_cmd->modifier[0]) { 14134 case I915_FORMAT_MOD_Y_TILED: 14135 case I915_FORMAT_MOD_Yf_TILED: 14136 if (INTEL_INFO(dev)->gen < 9) { 14137 DRM_DEBUG("Unsupported tiling 0x%lx!\n", 14138 mode_cmd->modifier[0]); 14139 return -EINVAL; 14140 } 14141 case DRM_FORMAT_MOD_NONE: 14142 case I915_FORMAT_MOD_X_TILED: 14143 break; 14144 default: 14145 DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", 14146 mode_cmd->modifier[0]); 14147 return -EINVAL; 14148 } 14149 14150 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], 14151 mode_cmd->pixel_format); 14152 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 14153 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 14154 mode_cmd->pitches[0], stride_alignment); 14155 return -EINVAL; 14156 } 14157 14158 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 14159 mode_cmd->pixel_format); 14160 if (mode_cmd->pitches[0] > pitch_limit) { 14161 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 14162 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 14163 "tiled" : "linear", 14164 mode_cmd->pitches[0], pitch_limit); 14165 return -EINVAL; 14166 } 14167 14168 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 14169 mode_cmd->pitches[0] != obj->stride) { 14170 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 14171 mode_cmd->pitches[0], obj->stride); 14172 return -EINVAL; 14173 } 14174 14175 /* Reject formats not supported by any plane early. */ 14176 switch (mode_cmd->pixel_format) { 14177 case DRM_FORMAT_C8: 14178 case DRM_FORMAT_RGB565: 14179 case DRM_FORMAT_XRGB8888: 14180 case DRM_FORMAT_ARGB8888: 14181 break; 14182 case DRM_FORMAT_XRGB1555: 14183 if (INTEL_INFO(dev)->gen > 3) { 14184 DRM_DEBUG("unsupported pixel format: %s\n", 14185 drm_get_format_name(mode_cmd->pixel_format)); 14186 return -EINVAL; 14187 } 14188 break; 14189 case DRM_FORMAT_ABGR8888: 14190 if (!IS_VALLEYVIEW(dev) && INTEL_INFO(dev)->gen < 9) { 14191 DRM_DEBUG("unsupported pixel format: %s\n", 14192 drm_get_format_name(mode_cmd->pixel_format)); 14193 return -EINVAL; 14194 } 14195 break; 14196 case DRM_FORMAT_XBGR8888: 14197 case DRM_FORMAT_XRGB2101010: 14198 case DRM_FORMAT_XBGR2101010: 14199 if (INTEL_INFO(dev)->gen < 4) { 14200 DRM_DEBUG("unsupported pixel format: %s\n", 14201 drm_get_format_name(mode_cmd->pixel_format)); 14202 return -EINVAL; 14203 } 14204 break; 14205 case DRM_FORMAT_ABGR2101010: 14206 if (!IS_VALLEYVIEW(dev)) { 14207 DRM_DEBUG("unsupported pixel format: %s\n", 14208 drm_get_format_name(mode_cmd->pixel_format)); 14209 return -EINVAL; 14210 } 14211 break; 14212 case DRM_FORMAT_YUYV: 14213 case DRM_FORMAT_UYVY: 14214 case DRM_FORMAT_YVYU: 14215 case DRM_FORMAT_VYUY: 14216 if (INTEL_INFO(dev)->gen < 5) { 14217 DRM_DEBUG("unsupported pixel format: %s\n", 14218 drm_get_format_name(mode_cmd->pixel_format)); 14219 return -EINVAL; 14220 } 14221 break; 14222 default: 14223 DRM_DEBUG("unsupported pixel format: %s\n", 14224 drm_get_format_name(mode_cmd->pixel_format)); 14225 return -EINVAL; 14226 } 14227 14228 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 14229 if (mode_cmd->offsets[0] != 0) 14230 return -EINVAL; 14231 14232 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 14233 mode_cmd->pixel_format, 14234 mode_cmd->modifier[0]); 14235 /* FIXME drm helper for size checks (especially planar formats)? */ 14236 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 14237 return -EINVAL; 14238 14239 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 14240 intel_fb->obj = obj; 14241 intel_fb->obj->framebuffer_references++; 14242 14243 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 14244 if (ret) { 14245 DRM_ERROR("framebuffer init failed %d\n", ret); 14246 return ret; 14247 } 14248 14249 return 0; 14250 } 14251 14252 static struct drm_framebuffer * 14253 intel_user_framebuffer_create(struct drm_device *dev, 14254 struct drm_file *filp, 14255 struct drm_mode_fb_cmd2 *mode_cmd) 14256 { 14257 struct drm_i915_gem_object *obj; 14258 14259 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 14260 mode_cmd->handles[0])); 14261 if (&obj->base == NULL) 14262 return ERR_PTR(-ENOENT); 14263 14264 return intel_framebuffer_create(dev, mode_cmd, obj); 14265 } 14266 14267 #ifndef CONFIG_DRM_I915_FBDEV 14268 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 14269 { 14270 } 14271 #endif 14272 14273 static const struct drm_mode_config_funcs intel_mode_funcs = { 14274 .fb_create = intel_user_framebuffer_create, 14275 .output_poll_changed = intel_fbdev_output_poll_changed, 14276 .atomic_check = intel_atomic_check, 14277 .atomic_commit = intel_atomic_commit, 14278 }; 14279 14280 /* Set up chip specific display functions */ 14281 static void intel_init_display(struct drm_device *dev) 14282 { 14283 struct drm_i915_private *dev_priv = dev->dev_private; 14284 14285 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 14286 dev_priv->display.find_dpll = g4x_find_best_dpll; 14287 else if (IS_CHERRYVIEW(dev)) 14288 dev_priv->display.find_dpll = chv_find_best_dpll; 14289 else if (IS_VALLEYVIEW(dev)) 14290 dev_priv->display.find_dpll = vlv_find_best_dpll; 14291 else if (IS_PINEVIEW(dev)) 14292 dev_priv->display.find_dpll = pnv_find_best_dpll; 14293 else 14294 dev_priv->display.find_dpll = i9xx_find_best_dpll; 14295 14296 if (INTEL_INFO(dev)->gen >= 9) { 14297 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14298 dev_priv->display.get_initial_plane_config = 14299 skylake_get_initial_plane_config; 14300 dev_priv->display.crtc_compute_clock = 14301 haswell_crtc_compute_clock; 14302 dev_priv->display.crtc_enable = haswell_crtc_enable; 14303 dev_priv->display.crtc_disable = haswell_crtc_disable; 14304 dev_priv->display.off = ironlake_crtc_off; 14305 dev_priv->display.update_primary_plane = 14306 skylake_update_primary_plane; 14307 } else if (HAS_DDI(dev)) { 14308 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14309 dev_priv->display.get_initial_plane_config = 14310 ironlake_get_initial_plane_config; 14311 dev_priv->display.crtc_compute_clock = 14312 haswell_crtc_compute_clock; 14313 dev_priv->display.crtc_enable = haswell_crtc_enable; 14314 dev_priv->display.crtc_disable = haswell_crtc_disable; 14315 dev_priv->display.off = ironlake_crtc_off; 14316 dev_priv->display.update_primary_plane = 14317 ironlake_update_primary_plane; 14318 } else if (HAS_PCH_SPLIT(dev)) { 14319 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 14320 dev_priv->display.get_initial_plane_config = 14321 ironlake_get_initial_plane_config; 14322 dev_priv->display.crtc_compute_clock = 14323 ironlake_crtc_compute_clock; 14324 dev_priv->display.crtc_enable = ironlake_crtc_enable; 14325 dev_priv->display.crtc_disable = ironlake_crtc_disable; 14326 dev_priv->display.off = ironlake_crtc_off; 14327 dev_priv->display.update_primary_plane = 14328 ironlake_update_primary_plane; 14329 } else if (IS_VALLEYVIEW(dev)) { 14330 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14331 dev_priv->display.get_initial_plane_config = 14332 i9xx_get_initial_plane_config; 14333 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14334 dev_priv->display.crtc_enable = valleyview_crtc_enable; 14335 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14336 dev_priv->display.off = i9xx_crtc_off; 14337 dev_priv->display.update_primary_plane = 14338 i9xx_update_primary_plane; 14339 } else { 14340 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 14341 dev_priv->display.get_initial_plane_config = 14342 i9xx_get_initial_plane_config; 14343 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 14344 dev_priv->display.crtc_enable = i9xx_crtc_enable; 14345 dev_priv->display.crtc_disable = i9xx_crtc_disable; 14346 dev_priv->display.off = i9xx_crtc_off; 14347 dev_priv->display.update_primary_plane = 14348 i9xx_update_primary_plane; 14349 } 14350 14351 /* Returns the core display clock speed */ 14352 if (IS_SKYLAKE(dev)) 14353 dev_priv->display.get_display_clock_speed = 14354 skylake_get_display_clock_speed; 14355 else if (IS_BROADWELL(dev)) 14356 dev_priv->display.get_display_clock_speed = 14357 broadwell_get_display_clock_speed; 14358 else if (IS_HASWELL(dev)) 14359 dev_priv->display.get_display_clock_speed = 14360 haswell_get_display_clock_speed; 14361 else if (IS_VALLEYVIEW(dev)) 14362 dev_priv->display.get_display_clock_speed = 14363 valleyview_get_display_clock_speed; 14364 else if (IS_GEN5(dev)) 14365 dev_priv->display.get_display_clock_speed = 14366 ilk_get_display_clock_speed; 14367 else if (IS_I945G(dev) || IS_BROADWATER(dev) || 14368 IS_GEN6(dev) || IS_IVYBRIDGE(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 14369 dev_priv->display.get_display_clock_speed = 14370 i945_get_display_clock_speed; 14371 else if (IS_I915G(dev)) 14372 dev_priv->display.get_display_clock_speed = 14373 i915_get_display_clock_speed; 14374 else if (IS_I945GM(dev) || IS_845G(dev)) 14375 dev_priv->display.get_display_clock_speed = 14376 i9xx_misc_get_display_clock_speed; 14377 else if (IS_PINEVIEW(dev)) 14378 dev_priv->display.get_display_clock_speed = 14379 pnv_get_display_clock_speed; 14380 else if (IS_I915GM(dev)) 14381 dev_priv->display.get_display_clock_speed = 14382 i915gm_get_display_clock_speed; 14383 else if (IS_I865G(dev)) 14384 dev_priv->display.get_display_clock_speed = 14385 i865_get_display_clock_speed; 14386 else if (IS_I85X(dev)) 14387 dev_priv->display.get_display_clock_speed = 14388 i855_get_display_clock_speed; 14389 else /* 852, 830 */ 14390 dev_priv->display.get_display_clock_speed = 14391 i830_get_display_clock_speed; 14392 14393 if (IS_GEN5(dev)) { 14394 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 14395 } else if (IS_GEN6(dev)) { 14396 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 14397 } else if (IS_IVYBRIDGE(dev)) { 14398 /* FIXME: detect B0+ stepping and use auto training */ 14399 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 14400 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 14401 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 14402 } else if (IS_VALLEYVIEW(dev)) { 14403 dev_priv->display.modeset_global_resources = 14404 valleyview_modeset_global_resources; 14405 } else if (IS_BROXTON(dev)) { 14406 dev_priv->display.modeset_global_resources = 14407 broxton_modeset_global_resources; 14408 } 14409 14410 switch (INTEL_INFO(dev)->gen) { 14411 case 2: 14412 dev_priv->display.queue_flip = intel_gen2_queue_flip; 14413 break; 14414 14415 case 3: 14416 dev_priv->display.queue_flip = intel_gen3_queue_flip; 14417 break; 14418 14419 case 4: 14420 case 5: 14421 dev_priv->display.queue_flip = intel_gen4_queue_flip; 14422 break; 14423 14424 case 6: 14425 dev_priv->display.queue_flip = intel_gen6_queue_flip; 14426 break; 14427 case 7: 14428 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 14429 dev_priv->display.queue_flip = intel_gen7_queue_flip; 14430 break; 14431 case 9: 14432 /* Drop through - unsupported since execlist only. */ 14433 default: 14434 /* Default just returns -ENODEV to indicate unsupported */ 14435 dev_priv->display.queue_flip = intel_default_queue_flip; 14436 } 14437 14438 intel_panel_init_backlight_funcs(dev); 14439 14440 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 14441 } 14442 14443 /* 14444 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 14445 * resume, or other times. This quirk makes sure that's the case for 14446 * affected systems. 14447 */ 14448 static void quirk_pipea_force(struct drm_device *dev) 14449 { 14450 struct drm_i915_private *dev_priv = dev->dev_private; 14451 14452 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 14453 DRM_INFO("applying pipe a force quirk\n"); 14454 } 14455 14456 static void quirk_pipeb_force(struct drm_device *dev) 14457 { 14458 struct drm_i915_private *dev_priv = dev->dev_private; 14459 14460 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 14461 DRM_INFO("applying pipe b force quirk\n"); 14462 } 14463 14464 /* 14465 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 14466 */ 14467 static void quirk_ssc_force_disable(struct drm_device *dev) 14468 { 14469 struct drm_i915_private *dev_priv = dev->dev_private; 14470 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 14471 DRM_INFO("applying lvds SSC disable quirk\n"); 14472 } 14473 14474 /* 14475 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 14476 * brightness value 14477 */ 14478 static void quirk_invert_brightness(struct drm_device *dev) 14479 { 14480 struct drm_i915_private *dev_priv = dev->dev_private; 14481 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 14482 DRM_INFO("applying inverted panel brightness quirk\n"); 14483 } 14484 14485 /* Some VBT's incorrectly indicate no backlight is present */ 14486 static void quirk_backlight_present(struct drm_device *dev) 14487 { 14488 struct drm_i915_private *dev_priv = dev->dev_private; 14489 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 14490 DRM_INFO("applying backlight present quirk\n"); 14491 } 14492 14493 struct intel_quirk { 14494 int device; 14495 int subsystem_vendor; 14496 int subsystem_device; 14497 void (*hook)(struct drm_device *dev); 14498 }; 14499 14500 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 14501 struct intel_dmi_quirk { 14502 void (*hook)(struct drm_device *dev); 14503 const struct dmi_system_id (*dmi_id_list)[]; 14504 }; 14505 14506 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 14507 { 14508 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 14509 return 1; 14510 } 14511 14512 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 14513 { 14514 .dmi_id_list = &(const struct dmi_system_id[]) { 14515 { 14516 .callback = intel_dmi_reverse_brightness, 14517 .ident = "NCR Corporation", 14518 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 14519 DMI_MATCH(DMI_PRODUCT_NAME, ""), 14520 }, 14521 }, 14522 { } /* terminating entry */ 14523 }, 14524 .hook = quirk_invert_brightness, 14525 }, 14526 }; 14527 14528 static struct intel_quirk intel_quirks[] = { 14529 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 14530 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 14531 14532 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 14533 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 14534 14535 /* 830 needs to leave pipe A & dpll A up */ 14536 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 14537 14538 /* 830 needs to leave pipe B & dpll B up */ 14539 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 14540 14541 /* Lenovo U160 cannot use SSC on LVDS */ 14542 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 14543 14544 /* Sony Vaio Y cannot use SSC on LVDS */ 14545 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 14546 14547 /* Acer Aspire 5734Z must invert backlight brightness */ 14548 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 14549 14550 /* Acer/eMachines G725 */ 14551 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 14552 14553 /* Acer/eMachines e725 */ 14554 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 14555 14556 /* Acer/Packard Bell NCL20 */ 14557 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 14558 14559 /* Acer Aspire 4736Z */ 14560 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 14561 14562 /* Acer Aspire 5336 */ 14563 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 14564 14565 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 14566 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 14567 14568 /* Acer C720 Chromebook (Core i3 4005U) */ 14569 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 14570 14571 /* Apple Macbook 2,1 (Core 2 T7400) */ 14572 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 14573 14574 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 14575 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 14576 14577 /* HP Chromebook 14 (Celeron 2955U) */ 14578 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 14579 14580 /* Dell Chromebook 11 */ 14581 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 14582 }; 14583 14584 static void intel_init_quirks(struct drm_device *dev) 14585 { 14586 struct device *d = dev->dev; 14587 int i; 14588 14589 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 14590 struct intel_quirk *q = &intel_quirks[i]; 14591 14592 if (pci_get_device(d) == q->device && 14593 (pci_get_subvendor(d) == q->subsystem_vendor || 14594 q->subsystem_vendor == PCI_ANY_ID) && 14595 (pci_get_subdevice(d) == q->subsystem_device || 14596 q->subsystem_device == PCI_ANY_ID)) 14597 q->hook(dev); 14598 } 14599 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 14600 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 14601 intel_dmi_quirks[i].hook(dev); 14602 } 14603 } 14604 14605 /* Disable the VGA plane that we never use */ 14606 static void i915_disable_vga(struct drm_device *dev) 14607 { 14608 struct drm_i915_private *dev_priv = dev->dev_private; 14609 u8 sr1; 14610 u32 vga_reg = i915_vgacntrl_reg(dev); 14611 14612 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 14613 #if 0 14614 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 14615 #endif 14616 outb(VGA_SR_INDEX, SR01); 14617 sr1 = inb(VGA_SR_DATA); 14618 outb(VGA_SR_DATA, sr1 | 1 << 5); 14619 #if 0 14620 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 14621 #endif 14622 udelay(300); 14623 14624 /* 14625 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 14626 * from S3 without preserving (some of?) the other bits. 14627 */ 14628 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 14629 POSTING_READ(vga_reg); 14630 } 14631 14632 void intel_modeset_init_hw(struct drm_device *dev) 14633 { 14634 intel_prepare_ddi(dev); 14635 14636 if (IS_VALLEYVIEW(dev)) 14637 vlv_update_cdclk(dev); 14638 14639 intel_init_clock_gating(dev); 14640 14641 intel_enable_gt_powersave(dev); 14642 } 14643 14644 void intel_modeset_init(struct drm_device *dev) 14645 { 14646 struct drm_i915_private *dev_priv = dev->dev_private; 14647 int sprite, ret; 14648 enum i915_pipe pipe; 14649 struct intel_crtc *crtc; 14650 14651 drm_mode_config_init(dev); 14652 14653 dev->mode_config.min_width = 0; 14654 dev->mode_config.min_height = 0; 14655 14656 dev->mode_config.preferred_depth = 24; 14657 dev->mode_config.prefer_shadow = 1; 14658 14659 dev->mode_config.allow_fb_modifiers = true; 14660 14661 dev->mode_config.funcs = &intel_mode_funcs; 14662 14663 intel_init_quirks(dev); 14664 14665 intel_init_pm(dev); 14666 14667 if (INTEL_INFO(dev)->num_pipes == 0) 14668 return; 14669 14670 intel_init_display(dev); 14671 intel_init_audio(dev); 14672 14673 if (IS_GEN2(dev)) { 14674 dev->mode_config.max_width = 2048; 14675 dev->mode_config.max_height = 2048; 14676 } else if (IS_GEN3(dev)) { 14677 dev->mode_config.max_width = 4096; 14678 dev->mode_config.max_height = 4096; 14679 } else { 14680 dev->mode_config.max_width = 8192; 14681 dev->mode_config.max_height = 8192; 14682 } 14683 14684 if (IS_845G(dev) || IS_I865G(dev)) { 14685 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 14686 dev->mode_config.cursor_height = 1023; 14687 } else if (IS_GEN2(dev)) { 14688 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 14689 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 14690 } else { 14691 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 14692 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 14693 } 14694 14695 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 14696 14697 DRM_DEBUG_KMS("%d display pipe%s available.\n", 14698 INTEL_INFO(dev)->num_pipes, 14699 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 14700 14701 for_each_pipe(dev_priv, pipe) { 14702 intel_crtc_init(dev, pipe); 14703 for_each_sprite(dev_priv, pipe, sprite) { 14704 ret = intel_plane_init(dev, pipe, sprite); 14705 if (ret) 14706 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 14707 pipe_name(pipe), sprite_name(pipe, sprite), ret); 14708 } 14709 } 14710 14711 intel_init_dpio(dev); 14712 14713 intel_shared_dpll_init(dev); 14714 14715 /* save the BIOS value before clobbering it */ 14716 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 14717 /* Just disable it once at startup */ 14718 i915_disable_vga(dev); 14719 intel_setup_outputs(dev); 14720 14721 /* Just in case the BIOS is doing something questionable. */ 14722 intel_fbc_disable(dev); 14723 14724 drm_modeset_lock_all(dev); 14725 intel_modeset_setup_hw_state(dev, false); 14726 drm_modeset_unlock_all(dev); 14727 14728 for_each_intel_crtc(dev, crtc) { 14729 if (!crtc->active) 14730 continue; 14731 14732 /* 14733 * Note that reserving the BIOS fb up front prevents us 14734 * from stuffing other stolen allocations like the ring 14735 * on top. This prevents some ugliness at boot time, and 14736 * can even allow for smooth boot transitions if the BIOS 14737 * fb is large enough for the active pipe configuration. 14738 */ 14739 if (dev_priv->display.get_initial_plane_config) { 14740 dev_priv->display.get_initial_plane_config(crtc, 14741 &crtc->plane_config); 14742 /* 14743 * If the fb is shared between multiple heads, we'll 14744 * just get the first one. 14745 */ 14746 intel_find_initial_plane_obj(crtc, &crtc->plane_config); 14747 } 14748 } 14749 } 14750 14751 static void intel_enable_pipe_a(struct drm_device *dev) 14752 { 14753 struct intel_connector *connector; 14754 struct drm_connector *crt = NULL; 14755 struct intel_load_detect_pipe load_detect_temp; 14756 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 14757 14758 /* We can't just switch on the pipe A, we need to set things up with a 14759 * proper mode and output configuration. As a gross hack, enable pipe A 14760 * by enabling the load detect pipe once. */ 14761 for_each_intel_connector(dev, connector) { 14762 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 14763 crt = &connector->base; 14764 break; 14765 } 14766 } 14767 14768 if (!crt) 14769 return; 14770 14771 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 14772 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 14773 } 14774 14775 static bool 14776 intel_check_plane_mapping(struct intel_crtc *crtc) 14777 { 14778 struct drm_device *dev = crtc->base.dev; 14779 struct drm_i915_private *dev_priv = dev->dev_private; 14780 u32 reg, val; 14781 14782 if (INTEL_INFO(dev)->num_pipes == 1) 14783 return true; 14784 14785 reg = DSPCNTR(!crtc->plane); 14786 val = I915_READ(reg); 14787 14788 if ((val & DISPLAY_PLANE_ENABLE) && 14789 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 14790 return false; 14791 14792 return true; 14793 } 14794 14795 static void intel_sanitize_crtc(struct intel_crtc *crtc) 14796 { 14797 struct drm_device *dev = crtc->base.dev; 14798 struct drm_i915_private *dev_priv = dev->dev_private; 14799 u32 reg; 14800 14801 /* Clear any frame start delays used for debugging left by the BIOS */ 14802 reg = PIPECONF(crtc->config->cpu_transcoder); 14803 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 14804 14805 /* restore vblank interrupts to correct state */ 14806 drm_crtc_vblank_reset(&crtc->base); 14807 if (crtc->active) { 14808 update_scanline_offset(crtc); 14809 drm_crtc_vblank_on(&crtc->base); 14810 } 14811 14812 /* We need to sanitize the plane -> pipe mapping first because this will 14813 * disable the crtc (and hence change the state) if it is wrong. Note 14814 * that gen4+ has a fixed plane -> pipe mapping. */ 14815 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 14816 struct intel_connector *connector; 14817 bool plane; 14818 14819 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 14820 crtc->base.base.id); 14821 14822 /* Pipe has the wrong plane attached and the plane is active. 14823 * Temporarily change the plane mapping and disable everything 14824 * ... */ 14825 plane = crtc->plane; 14826 to_intel_plane_state(crtc->base.primary->state)->visible = true; 14827 crtc->plane = !plane; 14828 intel_crtc_disable_planes(&crtc->base); 14829 dev_priv->display.crtc_disable(&crtc->base); 14830 crtc->plane = plane; 14831 14832 /* ... and break all links. */ 14833 for_each_intel_connector(dev, connector) { 14834 if (connector->encoder->base.crtc != &crtc->base) 14835 continue; 14836 14837 connector->base.dpms = DRM_MODE_DPMS_OFF; 14838 connector->base.encoder = NULL; 14839 } 14840 /* multiple connectors may have the same encoder: 14841 * handle them and break crtc link separately */ 14842 for_each_intel_connector(dev, connector) 14843 if (connector->encoder->base.crtc == &crtc->base) { 14844 connector->encoder->base.crtc = NULL; 14845 connector->encoder->connectors_active = false; 14846 } 14847 14848 WARN_ON(crtc->active); 14849 crtc->base.state->enable = false; 14850 crtc->base.state->active = false; 14851 crtc->base.enabled = false; 14852 } 14853 14854 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 14855 crtc->pipe == PIPE_A && !crtc->active) { 14856 /* BIOS forgot to enable pipe A, this mostly happens after 14857 * resume. Force-enable the pipe to fix this, the update_dpms 14858 * call below we restore the pipe to the right state, but leave 14859 * the required bits on. */ 14860 intel_enable_pipe_a(dev); 14861 } 14862 14863 /* Adjust the state of the output pipe according to whether we 14864 * have active connectors/encoders. */ 14865 intel_crtc_update_dpms(&crtc->base); 14866 14867 if (crtc->active != crtc->base.state->enable) { 14868 struct intel_encoder *encoder; 14869 14870 /* This can happen either due to bugs in the get_hw_state 14871 * functions or because the pipe is force-enabled due to the 14872 * pipe A quirk. */ 14873 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 14874 crtc->base.base.id, 14875 crtc->base.state->enable ? "enabled" : "disabled", 14876 crtc->active ? "enabled" : "disabled"); 14877 14878 crtc->base.state->enable = crtc->active; 14879 crtc->base.state->active = crtc->active; 14880 crtc->base.enabled = crtc->active; 14881 14882 /* Because we only establish the connector -> encoder -> 14883 * crtc links if something is active, this means the 14884 * crtc is now deactivated. Break the links. connector 14885 * -> encoder links are only establish when things are 14886 * actually up, hence no need to break them. */ 14887 WARN_ON(crtc->active); 14888 14889 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 14890 WARN_ON(encoder->connectors_active); 14891 encoder->base.crtc = NULL; 14892 } 14893 } 14894 14895 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 14896 /* 14897 * We start out with underrun reporting disabled to avoid races. 14898 * For correct bookkeeping mark this on active crtcs. 14899 * 14900 * Also on gmch platforms we dont have any hardware bits to 14901 * disable the underrun reporting. Which means we need to start 14902 * out with underrun reporting disabled also on inactive pipes, 14903 * since otherwise we'll complain about the garbage we read when 14904 * e.g. coming up after runtime pm. 14905 * 14906 * No protection against concurrent access is required - at 14907 * worst a fifo underrun happens which also sets this to false. 14908 */ 14909 crtc->cpu_fifo_underrun_disabled = true; 14910 crtc->pch_fifo_underrun_disabled = true; 14911 } 14912 } 14913 14914 static void intel_sanitize_encoder(struct intel_encoder *encoder) 14915 { 14916 struct intel_connector *connector; 14917 struct drm_device *dev = encoder->base.dev; 14918 14919 /* We need to check both for a crtc link (meaning that the 14920 * encoder is active and trying to read from a pipe) and the 14921 * pipe itself being active. */ 14922 bool has_active_crtc = encoder->base.crtc && 14923 to_intel_crtc(encoder->base.crtc)->active; 14924 14925 if (encoder->connectors_active && !has_active_crtc) { 14926 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 14927 encoder->base.base.id, 14928 encoder->base.name); 14929 14930 /* Connector is active, but has no active pipe. This is 14931 * fallout from our resume register restoring. Disable 14932 * the encoder manually again. */ 14933 if (encoder->base.crtc) { 14934 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 14935 encoder->base.base.id, 14936 encoder->base.name); 14937 encoder->disable(encoder); 14938 if (encoder->post_disable) 14939 encoder->post_disable(encoder); 14940 } 14941 encoder->base.crtc = NULL; 14942 encoder->connectors_active = false; 14943 14944 /* Inconsistent output/port/pipe state happens presumably due to 14945 * a bug in one of the get_hw_state functions. Or someplace else 14946 * in our code, like the register restore mess on resume. Clamp 14947 * things to off as a safer default. */ 14948 for_each_intel_connector(dev, connector) { 14949 if (connector->encoder != encoder) 14950 continue; 14951 connector->base.dpms = DRM_MODE_DPMS_OFF; 14952 connector->base.encoder = NULL; 14953 } 14954 } 14955 /* Enabled encoders without active connectors will be fixed in 14956 * the crtc fixup. */ 14957 } 14958 14959 void i915_redisable_vga_power_on(struct drm_device *dev) 14960 { 14961 struct drm_i915_private *dev_priv = dev->dev_private; 14962 u32 vga_reg = i915_vgacntrl_reg(dev); 14963 14964 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 14965 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 14966 i915_disable_vga(dev); 14967 } 14968 } 14969 14970 void i915_redisable_vga(struct drm_device *dev) 14971 { 14972 struct drm_i915_private *dev_priv = dev->dev_private; 14973 14974 /* This function can be called both from intel_modeset_setup_hw_state or 14975 * at a very early point in our resume sequence, where the power well 14976 * structures are not yet restored. Since this function is at a very 14977 * paranoid "someone might have enabled VGA while we were not looking" 14978 * level, just check if the power well is enabled instead of trying to 14979 * follow the "don't touch the power well if we don't need it" policy 14980 * the rest of the driver uses. */ 14981 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 14982 return; 14983 14984 i915_redisable_vga_power_on(dev); 14985 } 14986 14987 static bool primary_get_hw_state(struct intel_crtc *crtc) 14988 { 14989 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 14990 14991 if (!crtc->active) 14992 return false; 14993 14994 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; 14995 } 14996 14997 static void intel_modeset_readout_hw_state(struct drm_device *dev) 14998 { 14999 struct drm_i915_private *dev_priv = dev->dev_private; 15000 enum i915_pipe pipe; 15001 struct intel_crtc *crtc; 15002 struct intel_encoder *encoder; 15003 struct intel_connector *connector; 15004 int i; 15005 15006 for_each_intel_crtc(dev, crtc) { 15007 struct drm_plane *primary = crtc->base.primary; 15008 struct intel_plane_state *plane_state; 15009 15010 memset(crtc->config, 0, sizeof(*crtc->config)); 15011 crtc->config->base.crtc = &crtc->base; 15012 15013 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 15014 15015 crtc->active = dev_priv->display.get_pipe_config(crtc, 15016 crtc->config); 15017 15018 crtc->base.state->enable = crtc->active; 15019 crtc->base.state->active = crtc->active; 15020 crtc->base.enabled = crtc->active; 15021 15022 plane_state = to_intel_plane_state(primary->state); 15023 plane_state->visible = primary_get_hw_state(crtc); 15024 15025 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 15026 crtc->base.base.id, 15027 crtc->active ? "enabled" : "disabled"); 15028 } 15029 15030 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15031 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15032 15033 pll->on = pll->get_hw_state(dev_priv, pll, 15034 &pll->config.hw_state); 15035 pll->active = 0; 15036 pll->config.crtc_mask = 0; 15037 for_each_intel_crtc(dev, crtc) { 15038 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 15039 pll->active++; 15040 pll->config.crtc_mask |= 1 << crtc->pipe; 15041 } 15042 } 15043 15044 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 15045 pll->name, pll->config.crtc_mask, pll->on); 15046 15047 if (pll->config.crtc_mask) 15048 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 15049 } 15050 15051 for_each_intel_encoder(dev, encoder) { 15052 pipe = 0; 15053 15054 if (encoder->get_hw_state(encoder, &pipe)) { 15055 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15056 encoder->base.crtc = &crtc->base; 15057 encoder->get_config(encoder, crtc->config); 15058 } else { 15059 encoder->base.crtc = NULL; 15060 } 15061 15062 encoder->connectors_active = false; 15063 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 15064 encoder->base.base.id, 15065 encoder->base.name, 15066 encoder->base.crtc ? "enabled" : "disabled", 15067 pipe_name(pipe)); 15068 } 15069 15070 for_each_intel_connector(dev, connector) { 15071 if (connector->get_hw_state(connector)) { 15072 connector->base.dpms = DRM_MODE_DPMS_ON; 15073 connector->encoder->connectors_active = true; 15074 connector->base.encoder = &connector->encoder->base; 15075 } else { 15076 connector->base.dpms = DRM_MODE_DPMS_OFF; 15077 connector->base.encoder = NULL; 15078 } 15079 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 15080 connector->base.base.id, 15081 connector->base.name, 15082 connector->base.encoder ? "enabled" : "disabled"); 15083 } 15084 } 15085 15086 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 15087 * and i915 state tracking structures. */ 15088 void intel_modeset_setup_hw_state(struct drm_device *dev, 15089 bool force_restore) 15090 { 15091 struct drm_i915_private *dev_priv = dev->dev_private; 15092 enum i915_pipe pipe; 15093 struct intel_crtc *crtc; 15094 struct intel_encoder *encoder; 15095 int i; 15096 15097 intel_modeset_readout_hw_state(dev); 15098 15099 /* 15100 * Now that we have the config, copy it to each CRTC struct 15101 * Note that this could go away if we move to using crtc_config 15102 * checking everywhere. 15103 */ 15104 for_each_intel_crtc(dev, crtc) { 15105 if (crtc->active && i915.fastboot) { 15106 intel_mode_from_pipe_config(&crtc->base.mode, 15107 crtc->config); 15108 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 15109 crtc->base.base.id); 15110 drm_mode_debug_printmodeline(&crtc->base.mode); 15111 } 15112 } 15113 15114 /* HW state is read out, now we need to sanitize this mess. */ 15115 for_each_intel_encoder(dev, encoder) { 15116 intel_sanitize_encoder(encoder); 15117 } 15118 15119 for_each_pipe(dev_priv, pipe) { 15120 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 15121 intel_sanitize_crtc(crtc); 15122 intel_dump_pipe_config(crtc, crtc->config, 15123 "[setup_hw_state]"); 15124 } 15125 15126 intel_modeset_update_connector_atomic_state(dev); 15127 15128 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 15129 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 15130 15131 if (!pll->on || pll->active) 15132 continue; 15133 15134 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 15135 15136 pll->disable(dev_priv, pll); 15137 pll->on = false; 15138 } 15139 15140 if (IS_GEN9(dev)) 15141 skl_wm_get_hw_state(dev); 15142 else if (HAS_PCH_SPLIT(dev)) 15143 ilk_wm_get_hw_state(dev); 15144 15145 if (force_restore) { 15146 i915_redisable_vga(dev); 15147 15148 /* 15149 * We need to use raw interfaces for restoring state to avoid 15150 * checking (bogus) intermediate states. 15151 */ 15152 for_each_pipe(dev_priv, pipe) { 15153 struct drm_crtc *crtc = 15154 dev_priv->pipe_to_crtc_mapping[pipe]; 15155 15156 intel_crtc_restore_mode(crtc); 15157 } 15158 } else { 15159 intel_modeset_update_staged_output_state(dev); 15160 } 15161 15162 intel_modeset_check_state(dev); 15163 } 15164 15165 void intel_modeset_gem_init(struct drm_device *dev) 15166 { 15167 struct drm_i915_private *dev_priv = dev->dev_private; 15168 struct drm_crtc *c; 15169 struct drm_i915_gem_object *obj; 15170 int ret; 15171 15172 mutex_lock(&dev->struct_mutex); 15173 intel_init_gt_powersave(dev); 15174 mutex_unlock(&dev->struct_mutex); 15175 15176 /* 15177 * There may be no VBT; and if the BIOS enabled SSC we can 15178 * just keep using it to avoid unnecessary flicker. Whereas if the 15179 * BIOS isn't using it, don't assume it will work even if the VBT 15180 * indicates as much. 15181 */ 15182 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 15183 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 15184 DREF_SSC1_ENABLE); 15185 15186 intel_modeset_init_hw(dev); 15187 15188 intel_setup_overlay(dev); 15189 15190 /* 15191 * Make sure any fbs we allocated at startup are properly 15192 * pinned & fenced. When we do the allocation it's too early 15193 * for this. 15194 */ 15195 for_each_crtc(dev, c) { 15196 obj = intel_fb_obj(c->primary->fb); 15197 if (obj == NULL) 15198 continue; 15199 15200 mutex_lock(&dev->struct_mutex); 15201 ret = intel_pin_and_fence_fb_obj(c->primary, 15202 c->primary->fb, 15203 c->primary->state, 15204 NULL); 15205 mutex_unlock(&dev->struct_mutex); 15206 if (ret) { 15207 DRM_ERROR("failed to pin boot fb on pipe %d\n", 15208 to_intel_crtc(c)->pipe); 15209 drm_framebuffer_unreference(c->primary->fb); 15210 c->primary->fb = NULL; 15211 update_state_fb(c->primary); 15212 } 15213 } 15214 15215 intel_backlight_register(dev); 15216 } 15217 15218 void intel_connector_unregister(struct intel_connector *intel_connector) 15219 { 15220 struct drm_connector *connector = &intel_connector->base; 15221 15222 intel_panel_destroy_backlight(connector); 15223 drm_connector_unregister(connector); 15224 } 15225 15226 void intel_modeset_cleanup(struct drm_device *dev) 15227 { 15228 struct drm_i915_private *dev_priv = dev->dev_private; 15229 struct drm_connector *connector; 15230 15231 intel_disable_gt_powersave(dev); 15232 15233 intel_backlight_unregister(dev); 15234 15235 /* 15236 * Interrupts and polling as the first thing to avoid creating havoc. 15237 * Too much stuff here (turning of connectors, ...) would 15238 * experience fancy races otherwise. 15239 */ 15240 intel_irq_uninstall(dev_priv); 15241 15242 /* 15243 * Due to the hpd irq storm handling the hotplug work can re-arm the 15244 * poll handlers. Hence disable polling after hpd handling is shut down. 15245 */ 15246 drm_kms_helper_poll_fini(dev); 15247 15248 mutex_lock(&dev->struct_mutex); 15249 15250 intel_unregister_dsm_handler(); 15251 15252 intel_fbc_disable(dev); 15253 15254 mutex_unlock(&dev->struct_mutex); 15255 15256 /* flush any delayed tasks or pending work */ 15257 flush_scheduled_work(); 15258 15259 /* destroy the backlight and sysfs files before encoders/connectors */ 15260 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 15261 struct intel_connector *intel_connector; 15262 15263 intel_connector = to_intel_connector(connector); 15264 intel_connector->unregister(intel_connector); 15265 } 15266 15267 drm_mode_config_cleanup(dev); 15268 15269 intel_cleanup_overlay(dev); 15270 15271 mutex_lock(&dev->struct_mutex); 15272 intel_cleanup_gt_powersave(dev); 15273 mutex_unlock(&dev->struct_mutex); 15274 } 15275 15276 /* 15277 * Return which encoder is currently attached for connector. 15278 */ 15279 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 15280 { 15281 return &intel_attached_encoder(connector)->base; 15282 } 15283 15284 void intel_connector_attach_encoder(struct intel_connector *connector, 15285 struct intel_encoder *encoder) 15286 { 15287 connector->encoder = encoder; 15288 drm_mode_connector_attach_encoder(&connector->base, 15289 &encoder->base); 15290 } 15291 15292 /* 15293 * set vga decode state - true == enable VGA decode 15294 */ 15295 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 15296 { 15297 struct drm_i915_private *dev_priv = dev->dev_private; 15298 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 15299 u16 gmch_ctrl; 15300 15301 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 15302 DRM_ERROR("failed to read control word\n"); 15303 return -EIO; 15304 } 15305 15306 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 15307 return 0; 15308 15309 if (state) 15310 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 15311 else 15312 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 15313 15314 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 15315 DRM_ERROR("failed to write control word\n"); 15316 return -EIO; 15317 } 15318 15319 return 0; 15320 } 15321 15322 #if 0 15323 struct intel_display_error_state { 15324 15325 u32 power_well_driver; 15326 15327 int num_transcoders; 15328 15329 struct intel_cursor_error_state { 15330 u32 control; 15331 u32 position; 15332 u32 base; 15333 u32 size; 15334 } cursor[I915_MAX_PIPES]; 15335 15336 struct intel_pipe_error_state { 15337 bool power_domain_on; 15338 u32 source; 15339 u32 stat; 15340 } pipe[I915_MAX_PIPES]; 15341 15342 struct intel_plane_error_state { 15343 u32 control; 15344 u32 stride; 15345 u32 size; 15346 u32 pos; 15347 u32 addr; 15348 u32 surface; 15349 u32 tile_offset; 15350 } plane[I915_MAX_PIPES]; 15351 15352 struct intel_transcoder_error_state { 15353 bool power_domain_on; 15354 enum transcoder cpu_transcoder; 15355 15356 u32 conf; 15357 15358 u32 htotal; 15359 u32 hblank; 15360 u32 hsync; 15361 u32 vtotal; 15362 u32 vblank; 15363 u32 vsync; 15364 } transcoder[4]; 15365 }; 15366 15367 struct intel_display_error_state * 15368 intel_display_capture_error_state(struct drm_device *dev) 15369 { 15370 struct drm_i915_private *dev_priv = dev->dev_private; 15371 struct intel_display_error_state *error; 15372 int transcoders[] = { 15373 TRANSCODER_A, 15374 TRANSCODER_B, 15375 TRANSCODER_C, 15376 TRANSCODER_EDP, 15377 }; 15378 int i; 15379 15380 if (INTEL_INFO(dev)->num_pipes == 0) 15381 return NULL; 15382 15383 error = kzalloc(sizeof(*error), GFP_ATOMIC); 15384 if (error == NULL) 15385 return NULL; 15386 15387 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15388 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 15389 15390 for_each_pipe(dev_priv, i) { 15391 error->pipe[i].power_domain_on = 15392 __intel_display_power_is_enabled(dev_priv, 15393 POWER_DOMAIN_PIPE(i)); 15394 if (!error->pipe[i].power_domain_on) 15395 continue; 15396 15397 error->cursor[i].control = I915_READ(CURCNTR(i)); 15398 error->cursor[i].position = I915_READ(CURPOS(i)); 15399 error->cursor[i].base = I915_READ(CURBASE(i)); 15400 15401 error->plane[i].control = I915_READ(DSPCNTR(i)); 15402 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 15403 if (INTEL_INFO(dev)->gen <= 3) { 15404 error->plane[i].size = I915_READ(DSPSIZE(i)); 15405 error->plane[i].pos = I915_READ(DSPPOS(i)); 15406 } 15407 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15408 error->plane[i].addr = I915_READ(DSPADDR(i)); 15409 if (INTEL_INFO(dev)->gen >= 4) { 15410 error->plane[i].surface = I915_READ(DSPSURF(i)); 15411 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 15412 } 15413 15414 error->pipe[i].source = I915_READ(PIPESRC(i)); 15415 15416 if (HAS_GMCH_DISPLAY(dev)) 15417 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 15418 } 15419 15420 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 15421 if (HAS_DDI(dev_priv->dev)) 15422 error->num_transcoders++; /* Account for eDP. */ 15423 15424 for (i = 0; i < error->num_transcoders; i++) { 15425 enum transcoder cpu_transcoder = transcoders[i]; 15426 15427 error->transcoder[i].power_domain_on = 15428 __intel_display_power_is_enabled(dev_priv, 15429 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 15430 if (!error->transcoder[i].power_domain_on) 15431 continue; 15432 15433 error->transcoder[i].cpu_transcoder = cpu_transcoder; 15434 15435 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 15436 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 15437 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 15438 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 15439 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 15440 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 15441 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 15442 } 15443 15444 return error; 15445 } 15446 15447 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 15448 15449 void 15450 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 15451 struct drm_device *dev, 15452 struct intel_display_error_state *error) 15453 { 15454 struct drm_i915_private *dev_priv = dev->dev_private; 15455 int i; 15456 15457 if (!error) 15458 return; 15459 15460 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 15461 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 15462 err_printf(m, "PWR_WELL_CTL2: %08x\n", 15463 error->power_well_driver); 15464 for_each_pipe(dev_priv, i) { 15465 err_printf(m, "Pipe [%d]:\n", i); 15466 err_printf(m, " Power: %s\n", 15467 error->pipe[i].power_domain_on ? "on" : "off"); 15468 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 15469 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 15470 15471 err_printf(m, "Plane [%d]:\n", i); 15472 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 15473 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 15474 if (INTEL_INFO(dev)->gen <= 3) { 15475 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 15476 err_printf(m, " POS: %08x\n", error->plane[i].pos); 15477 } 15478 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 15479 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 15480 if (INTEL_INFO(dev)->gen >= 4) { 15481 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 15482 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 15483 } 15484 15485 err_printf(m, "Cursor [%d]:\n", i); 15486 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 15487 err_printf(m, " POS: %08x\n", error->cursor[i].position); 15488 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 15489 } 15490 15491 for (i = 0; i < error->num_transcoders; i++) { 15492 err_printf(m, "CPU transcoder: %c\n", 15493 transcoder_name(error->transcoder[i].cpu_transcoder)); 15494 err_printf(m, " Power: %s\n", 15495 error->transcoder[i].power_domain_on ? "on" : "off"); 15496 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 15497 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 15498 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 15499 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 15500 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 15501 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 15502 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 15503 } 15504 } 15505 #endif 15506 15507 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 15508 { 15509 struct intel_crtc *crtc; 15510 15511 for_each_intel_crtc(dev, crtc) { 15512 struct intel_unpin_work *work; 15513 15514 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 15515 15516 work = crtc->unpin_work; 15517 15518 if (work && work->event && 15519 work->event->base.file_priv == file) { 15520 kfree(work->event); 15521 work->event = NULL; 15522 } 15523 15524 lockmgr(&dev->event_lock, LK_RELEASE); 15525 } 15526 } 15527