1 /* 2 * Copyright © 2006-2007 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 */ 26 27 #include <linux/dmi.h> 28 #include <linux/module.h> 29 #include <linux/i2c.h> 30 #include <linux/kernel.h> 31 #include <drm/drm_edid.h> 32 #include <drm/drmP.h> 33 #include "intel_drv.h" 34 #include <drm/i915_drm.h> 35 #include "i915_drv.h" 36 #include "i915_trace.h" 37 #include <drm/drm_atomic.h> 38 #include <drm/drm_atomic_helper.h> 39 #include <drm/drm_dp_helper.h> 40 #include <drm/drm_crtc_helper.h> 41 #include <drm/drm_plane_helper.h> 42 #include <drm/drm_rect.h> 43 44 /* Primary plane formats supported by all gen */ 45 #define COMMON_PRIMARY_FORMATS \ 46 DRM_FORMAT_C8, \ 47 DRM_FORMAT_RGB565, \ 48 DRM_FORMAT_XRGB8888, \ 49 DRM_FORMAT_ARGB8888 50 51 /* Primary plane formats for gen <= 3 */ 52 static const uint32_t intel_primary_formats_gen2[] = { 53 COMMON_PRIMARY_FORMATS, 54 DRM_FORMAT_XRGB1555, 55 DRM_FORMAT_ARGB1555, 56 }; 57 58 /* Primary plane formats for gen >= 4 */ 59 static const uint32_t intel_primary_formats_gen4[] = { 60 COMMON_PRIMARY_FORMATS, \ 61 DRM_FORMAT_XBGR8888, 62 DRM_FORMAT_ABGR8888, 63 DRM_FORMAT_XRGB2101010, 64 DRM_FORMAT_ARGB2101010, 65 DRM_FORMAT_XBGR2101010, 66 DRM_FORMAT_ABGR2101010, 67 }; 68 69 /* Cursor formats */ 70 static const uint32_t intel_cursor_formats[] = { 71 DRM_FORMAT_ARGB8888, 72 }; 73 74 static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 75 76 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 77 struct intel_crtc_state *pipe_config); 78 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 79 struct intel_crtc_state *pipe_config); 80 81 static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, 82 int x, int y, struct drm_framebuffer *old_fb, 83 struct drm_atomic_state *state); 84 static int intel_framebuffer_init(struct drm_device *dev, 85 struct intel_framebuffer *ifb, 86 struct drm_mode_fb_cmd2 *mode_cmd, 87 struct drm_i915_gem_object *obj); 88 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc); 89 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc); 90 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 91 struct intel_link_m_n *m_n, 92 struct intel_link_m_n *m2_n2); 93 static void ironlake_set_pipeconf(struct drm_crtc *crtc); 94 static void haswell_set_pipeconf(struct drm_crtc *crtc); 95 static void intel_set_pipe_csc(struct drm_crtc *crtc); 96 static void vlv_prepare_pll(struct intel_crtc *crtc, 97 const struct intel_crtc_state *pipe_config); 98 static void chv_prepare_pll(struct intel_crtc *crtc, 99 const struct intel_crtc_state *pipe_config); 100 static void intel_begin_crtc_commit(struct drm_crtc *crtc); 101 static void intel_finish_crtc_commit(struct drm_crtc *crtc); 102 103 static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 104 { 105 if (!connector->mst_port) 106 return connector->encoder; 107 else 108 return &connector->mst_port->mst_encoders[pipe]->base; 109 } 110 111 typedef struct { 112 int min, max; 113 } intel_range_t; 114 115 typedef struct { 116 int dot_limit; 117 int p2_slow, p2_fast; 118 } intel_p2_t; 119 120 typedef struct intel_limit intel_limit_t; 121 struct intel_limit { 122 intel_range_t dot, vco, n, m, m1, m2, p, p1; 123 intel_p2_t p2; 124 }; 125 126 int 127 intel_pch_rawclk(struct drm_device *dev) 128 { 129 struct drm_i915_private *dev_priv = dev->dev_private; 130 131 WARN_ON(!HAS_PCH_SPLIT(dev)); 132 133 return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK; 134 } 135 136 static inline u32 /* units of 100MHz */ 137 intel_fdi_link_freq(struct drm_device *dev) 138 { 139 if (IS_GEN5(dev)) { 140 struct drm_i915_private *dev_priv = dev->dev_private; 141 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; 142 } else 143 return 27; 144 } 145 146 static const intel_limit_t intel_limits_i8xx_dac = { 147 .dot = { .min = 25000, .max = 350000 }, 148 .vco = { .min = 908000, .max = 1512000 }, 149 .n = { .min = 2, .max = 16 }, 150 .m = { .min = 96, .max = 140 }, 151 .m1 = { .min = 18, .max = 26 }, 152 .m2 = { .min = 6, .max = 16 }, 153 .p = { .min = 4, .max = 128 }, 154 .p1 = { .min = 2, .max = 33 }, 155 .p2 = { .dot_limit = 165000, 156 .p2_slow = 4, .p2_fast = 2 }, 157 }; 158 159 static const intel_limit_t intel_limits_i8xx_dvo = { 160 .dot = { .min = 25000, .max = 350000 }, 161 .vco = { .min = 908000, .max = 1512000 }, 162 .n = { .min = 2, .max = 16 }, 163 .m = { .min = 96, .max = 140 }, 164 .m1 = { .min = 18, .max = 26 }, 165 .m2 = { .min = 6, .max = 16 }, 166 .p = { .min = 4, .max = 128 }, 167 .p1 = { .min = 2, .max = 33 }, 168 .p2 = { .dot_limit = 165000, 169 .p2_slow = 4, .p2_fast = 4 }, 170 }; 171 172 static const intel_limit_t intel_limits_i8xx_lvds = { 173 .dot = { .min = 25000, .max = 350000 }, 174 .vco = { .min = 908000, .max = 1512000 }, 175 .n = { .min = 2, .max = 16 }, 176 .m = { .min = 96, .max = 140 }, 177 .m1 = { .min = 18, .max = 26 }, 178 .m2 = { .min = 6, .max = 16 }, 179 .p = { .min = 4, .max = 128 }, 180 .p1 = { .min = 1, .max = 6 }, 181 .p2 = { .dot_limit = 165000, 182 .p2_slow = 14, .p2_fast = 7 }, 183 }; 184 185 static const intel_limit_t intel_limits_i9xx_sdvo = { 186 .dot = { .min = 20000, .max = 400000 }, 187 .vco = { .min = 1400000, .max = 2800000 }, 188 .n = { .min = 1, .max = 6 }, 189 .m = { .min = 70, .max = 120 }, 190 .m1 = { .min = 8, .max = 18 }, 191 .m2 = { .min = 3, .max = 7 }, 192 .p = { .min = 5, .max = 80 }, 193 .p1 = { .min = 1, .max = 8 }, 194 .p2 = { .dot_limit = 200000, 195 .p2_slow = 10, .p2_fast = 5 }, 196 }; 197 198 static const intel_limit_t intel_limits_i9xx_lvds = { 199 .dot = { .min = 20000, .max = 400000 }, 200 .vco = { .min = 1400000, .max = 2800000 }, 201 .n = { .min = 1, .max = 6 }, 202 .m = { .min = 70, .max = 120 }, 203 .m1 = { .min = 8, .max = 18 }, 204 .m2 = { .min = 3, .max = 7 }, 205 .p = { .min = 7, .max = 98 }, 206 .p1 = { .min = 1, .max = 8 }, 207 .p2 = { .dot_limit = 112000, 208 .p2_slow = 14, .p2_fast = 7 }, 209 }; 210 211 212 static const intel_limit_t intel_limits_g4x_sdvo = { 213 .dot = { .min = 25000, .max = 270000 }, 214 .vco = { .min = 1750000, .max = 3500000}, 215 .n = { .min = 1, .max = 4 }, 216 .m = { .min = 104, .max = 138 }, 217 .m1 = { .min = 17, .max = 23 }, 218 .m2 = { .min = 5, .max = 11 }, 219 .p = { .min = 10, .max = 30 }, 220 .p1 = { .min = 1, .max = 3}, 221 .p2 = { .dot_limit = 270000, 222 .p2_slow = 10, 223 .p2_fast = 10 224 }, 225 }; 226 227 static const intel_limit_t intel_limits_g4x_hdmi = { 228 .dot = { .min = 22000, .max = 400000 }, 229 .vco = { .min = 1750000, .max = 3500000}, 230 .n = { .min = 1, .max = 4 }, 231 .m = { .min = 104, .max = 138 }, 232 .m1 = { .min = 16, .max = 23 }, 233 .m2 = { .min = 5, .max = 11 }, 234 .p = { .min = 5, .max = 80 }, 235 .p1 = { .min = 1, .max = 8}, 236 .p2 = { .dot_limit = 165000, 237 .p2_slow = 10, .p2_fast = 5 }, 238 }; 239 240 static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 241 .dot = { .min = 20000, .max = 115000 }, 242 .vco = { .min = 1750000, .max = 3500000 }, 243 .n = { .min = 1, .max = 3 }, 244 .m = { .min = 104, .max = 138 }, 245 .m1 = { .min = 17, .max = 23 }, 246 .m2 = { .min = 5, .max = 11 }, 247 .p = { .min = 28, .max = 112 }, 248 .p1 = { .min = 2, .max = 8 }, 249 .p2 = { .dot_limit = 0, 250 .p2_slow = 14, .p2_fast = 14 251 }, 252 }; 253 254 static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 255 .dot = { .min = 80000, .max = 224000 }, 256 .vco = { .min = 1750000, .max = 3500000 }, 257 .n = { .min = 1, .max = 3 }, 258 .m = { .min = 104, .max = 138 }, 259 .m1 = { .min = 17, .max = 23 }, 260 .m2 = { .min = 5, .max = 11 }, 261 .p = { .min = 14, .max = 42 }, 262 .p1 = { .min = 2, .max = 6 }, 263 .p2 = { .dot_limit = 0, 264 .p2_slow = 7, .p2_fast = 7 265 }, 266 }; 267 268 static const intel_limit_t intel_limits_pineview_sdvo = { 269 .dot = { .min = 20000, .max = 400000}, 270 .vco = { .min = 1700000, .max = 3500000 }, 271 /* Pineview's Ncounter is a ring counter */ 272 .n = { .min = 3, .max = 6 }, 273 .m = { .min = 2, .max = 256 }, 274 /* Pineview only has one combined m divider, which we treat as m2. */ 275 .m1 = { .min = 0, .max = 0 }, 276 .m2 = { .min = 0, .max = 254 }, 277 .p = { .min = 5, .max = 80 }, 278 .p1 = { .min = 1, .max = 8 }, 279 .p2 = { .dot_limit = 200000, 280 .p2_slow = 10, .p2_fast = 5 }, 281 }; 282 283 static const intel_limit_t intel_limits_pineview_lvds = { 284 .dot = { .min = 20000, .max = 400000 }, 285 .vco = { .min = 1700000, .max = 3500000 }, 286 .n = { .min = 3, .max = 6 }, 287 .m = { .min = 2, .max = 256 }, 288 .m1 = { .min = 0, .max = 0 }, 289 .m2 = { .min = 0, .max = 254 }, 290 .p = { .min = 7, .max = 112 }, 291 .p1 = { .min = 1, .max = 8 }, 292 .p2 = { .dot_limit = 112000, 293 .p2_slow = 14, .p2_fast = 14 }, 294 }; 295 296 /* Ironlake / Sandybridge 297 * 298 * We calculate clock using (register_value + 2) for N/M1/M2, so here 299 * the range value for them is (actual_value - 2). 300 */ 301 static const intel_limit_t intel_limits_ironlake_dac = { 302 .dot = { .min = 25000, .max = 350000 }, 303 .vco = { .min = 1760000, .max = 3510000 }, 304 .n = { .min = 1, .max = 5 }, 305 .m = { .min = 79, .max = 127 }, 306 .m1 = { .min = 12, .max = 22 }, 307 .m2 = { .min = 5, .max = 9 }, 308 .p = { .min = 5, .max = 80 }, 309 .p1 = { .min = 1, .max = 8 }, 310 .p2 = { .dot_limit = 225000, 311 .p2_slow = 10, .p2_fast = 5 }, 312 }; 313 314 static const intel_limit_t intel_limits_ironlake_single_lvds = { 315 .dot = { .min = 25000, .max = 350000 }, 316 .vco = { .min = 1760000, .max = 3510000 }, 317 .n = { .min = 1, .max = 3 }, 318 .m = { .min = 79, .max = 118 }, 319 .m1 = { .min = 12, .max = 22 }, 320 .m2 = { .min = 5, .max = 9 }, 321 .p = { .min = 28, .max = 112 }, 322 .p1 = { .min = 2, .max = 8 }, 323 .p2 = { .dot_limit = 225000, 324 .p2_slow = 14, .p2_fast = 14 }, 325 }; 326 327 static const intel_limit_t intel_limits_ironlake_dual_lvds = { 328 .dot = { .min = 25000, .max = 350000 }, 329 .vco = { .min = 1760000, .max = 3510000 }, 330 .n = { .min = 1, .max = 3 }, 331 .m = { .min = 79, .max = 127 }, 332 .m1 = { .min = 12, .max = 22 }, 333 .m2 = { .min = 5, .max = 9 }, 334 .p = { .min = 14, .max = 56 }, 335 .p1 = { .min = 2, .max = 8 }, 336 .p2 = { .dot_limit = 225000, 337 .p2_slow = 7, .p2_fast = 7 }, 338 }; 339 340 /* LVDS 100mhz refclk limits. */ 341 static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 342 .dot = { .min = 25000, .max = 350000 }, 343 .vco = { .min = 1760000, .max = 3510000 }, 344 .n = { .min = 1, .max = 2 }, 345 .m = { .min = 79, .max = 126 }, 346 .m1 = { .min = 12, .max = 22 }, 347 .m2 = { .min = 5, .max = 9 }, 348 .p = { .min = 28, .max = 112 }, 349 .p1 = { .min = 2, .max = 8 }, 350 .p2 = { .dot_limit = 225000, 351 .p2_slow = 14, .p2_fast = 14 }, 352 }; 353 354 static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 355 .dot = { .min = 25000, .max = 350000 }, 356 .vco = { .min = 1760000, .max = 3510000 }, 357 .n = { .min = 1, .max = 3 }, 358 .m = { .min = 79, .max = 126 }, 359 .m1 = { .min = 12, .max = 22 }, 360 .m2 = { .min = 5, .max = 9 }, 361 .p = { .min = 14, .max = 42 }, 362 .p1 = { .min = 2, .max = 6 }, 363 .p2 = { .dot_limit = 225000, 364 .p2_slow = 7, .p2_fast = 7 }, 365 }; 366 367 static const intel_limit_t intel_limits_vlv = { 368 /* 369 * These are the data rate limits (measured in fast clocks) 370 * since those are the strictest limits we have. The fast 371 * clock and actual rate limits are more relaxed, so checking 372 * them would make no difference. 373 */ 374 .dot = { .min = 25000 * 5, .max = 270000 * 5 }, 375 .vco = { .min = 4000000, .max = 6000000 }, 376 .n = { .min = 1, .max = 7 }, 377 .m1 = { .min = 2, .max = 3 }, 378 .m2 = { .min = 11, .max = 156 }, 379 .p1 = { .min = 2, .max = 3 }, 380 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 381 }; 382 383 static const intel_limit_t intel_limits_chv = { 384 /* 385 * These are the data rate limits (measured in fast clocks) 386 * since those are the strictest limits we have. The fast 387 * clock and actual rate limits are more relaxed, so checking 388 * them would make no difference. 389 */ 390 .dot = { .min = 25000 * 5, .max = 540000 * 5}, 391 .vco = { .min = 4800000, .max = 6480000 }, 392 .n = { .min = 1, .max = 1 }, 393 .m1 = { .min = 2, .max = 2 }, 394 .m2 = { .min = 24 << 22, .max = 175 << 22 }, 395 .p1 = { .min = 2, .max = 4 }, 396 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 397 }; 398 399 static void vlv_clock(int refclk, intel_clock_t *clock) 400 { 401 clock->m = clock->m1 * clock->m2; 402 clock->p = clock->p1 * clock->p2; 403 if (WARN_ON(clock->n == 0 || clock->p == 0)) 404 return; 405 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 406 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 407 } 408 409 /** 410 * Returns whether any output on the specified pipe is of the specified type 411 */ 412 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) 413 { 414 struct drm_device *dev = crtc->base.dev; 415 struct intel_encoder *encoder; 416 417 for_each_encoder_on_crtc(dev, &crtc->base, encoder) 418 if (encoder->type == type) 419 return true; 420 421 return false; 422 } 423 424 /** 425 * Returns whether any output on the specified pipe will have the specified 426 * type after a staged modeset is complete, i.e., the same as 427 * intel_pipe_has_type() but looking at encoder->new_crtc instead of 428 * encoder->crtc. 429 */ 430 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, 431 int type) 432 { 433 struct drm_atomic_state *state = crtc_state->base.state; 434 struct drm_connector_state *connector_state; 435 struct intel_encoder *encoder; 436 int i, num_connectors = 0; 437 438 for (i = 0; i < state->num_connector; i++) { 439 if (!state->connectors[i]) 440 continue; 441 442 connector_state = state->connector_states[i]; 443 if (connector_state->crtc != crtc_state->base.crtc) 444 continue; 445 446 num_connectors++; 447 448 encoder = to_intel_encoder(connector_state->best_encoder); 449 if (encoder->type == type) 450 return true; 451 } 452 453 WARN_ON(num_connectors == 0); 454 455 return false; 456 } 457 458 static const intel_limit_t * 459 intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) 460 { 461 struct drm_device *dev = crtc_state->base.crtc->dev; 462 const intel_limit_t *limit; 463 464 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 465 if (intel_is_dual_link_lvds(dev)) { 466 if (refclk == 100000) 467 limit = &intel_limits_ironlake_dual_lvds_100m; 468 else 469 limit = &intel_limits_ironlake_dual_lvds; 470 } else { 471 if (refclk == 100000) 472 limit = &intel_limits_ironlake_single_lvds_100m; 473 else 474 limit = &intel_limits_ironlake_single_lvds; 475 } 476 } else 477 limit = &intel_limits_ironlake_dac; 478 479 return limit; 480 } 481 482 static const intel_limit_t * 483 intel_g4x_limit(struct intel_crtc_state *crtc_state) 484 { 485 struct drm_device *dev = crtc_state->base.crtc->dev; 486 const intel_limit_t *limit; 487 488 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 489 if (intel_is_dual_link_lvds(dev)) 490 limit = &intel_limits_g4x_dual_channel_lvds; 491 else 492 limit = &intel_limits_g4x_single_channel_lvds; 493 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 494 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 495 limit = &intel_limits_g4x_hdmi; 496 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 497 limit = &intel_limits_g4x_sdvo; 498 } else /* The option is for other outputs */ 499 limit = &intel_limits_i9xx_sdvo; 500 501 return limit; 502 } 503 504 static const intel_limit_t * 505 intel_limit(struct intel_crtc_state *crtc_state, int refclk) 506 { 507 struct drm_device *dev = crtc_state->base.crtc->dev; 508 const intel_limit_t *limit; 509 510 if (HAS_PCH_SPLIT(dev)) 511 limit = intel_ironlake_limit(crtc_state, refclk); 512 else if (IS_G4X(dev)) { 513 limit = intel_g4x_limit(crtc_state); 514 } else if (IS_PINEVIEW(dev)) { 515 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 516 limit = &intel_limits_pineview_lvds; 517 else 518 limit = &intel_limits_pineview_sdvo; 519 } else if (IS_CHERRYVIEW(dev)) { 520 limit = &intel_limits_chv; 521 } else if (IS_VALLEYVIEW(dev)) { 522 limit = &intel_limits_vlv; 523 } else if (!IS_GEN2(dev)) { 524 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 525 limit = &intel_limits_i9xx_lvds; 526 else 527 limit = &intel_limits_i9xx_sdvo; 528 } else { 529 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 530 limit = &intel_limits_i8xx_lvds; 531 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 532 limit = &intel_limits_i8xx_dvo; 533 else 534 limit = &intel_limits_i8xx_dac; 535 } 536 return limit; 537 } 538 539 /* m1 is reserved as 0 in Pineview, n is a ring counter */ 540 static void pineview_clock(int refclk, intel_clock_t *clock) 541 { 542 clock->m = clock->m2 + 2; 543 clock->p = clock->p1 * clock->p2; 544 if (WARN_ON(clock->n == 0 || clock->p == 0)) 545 return; 546 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n); 547 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 548 } 549 550 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll) 551 { 552 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 553 } 554 555 static void i9xx_clock(int refclk, intel_clock_t *clock) 556 { 557 clock->m = i9xx_dpll_compute_m(clock); 558 clock->p = clock->p1 * clock->p2; 559 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0)) 560 return; 561 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2); 562 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 563 } 564 565 static void chv_clock(int refclk, intel_clock_t *clock) 566 { 567 clock->m = clock->m1 * clock->m2; 568 clock->p = clock->p1 * clock->p2; 569 if (WARN_ON(clock->n == 0 || clock->p == 0)) 570 return; 571 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m, 572 clock->n << 22); 573 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p); 574 } 575 576 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 577 /** 578 * Returns whether the given set of divisors are valid for a given refclk with 579 * the given connectors. 580 */ 581 582 static bool intel_PLL_is_valid(struct drm_device *dev, 583 const intel_limit_t *limit, 584 const intel_clock_t *clock) 585 { 586 if (clock->n < limit->n.min || limit->n.max < clock->n) 587 INTELPllInvalid("n out of range\n"); 588 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 589 INTELPllInvalid("p1 out of range\n"); 590 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) 591 INTELPllInvalid("m2 out of range\n"); 592 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) 593 INTELPllInvalid("m1 out of range\n"); 594 595 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev)) 596 if (clock->m1 <= clock->m2) 597 INTELPllInvalid("m1 <= m2\n"); 598 599 if (!IS_VALLEYVIEW(dev)) { 600 if (clock->p < limit->p.min || limit->p.max < clock->p) 601 INTELPllInvalid("p out of range\n"); 602 if (clock->m < limit->m.min || limit->m.max < clock->m) 603 INTELPllInvalid("m out of range\n"); 604 } 605 606 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) 607 INTELPllInvalid("vco out of range\n"); 608 /* XXX: We may need to be checking "Dot clock" depending on the multiplier, 609 * connector, etc., rather than just a single range. 610 */ 611 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) 612 INTELPllInvalid("dot out of range\n"); 613 614 return true; 615 } 616 617 static bool 618 i9xx_find_best_dpll(const intel_limit_t *limit, 619 struct intel_crtc_state *crtc_state, 620 int target, int refclk, intel_clock_t *match_clock, 621 intel_clock_t *best_clock) 622 { 623 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 624 struct drm_device *dev = crtc->base.dev; 625 intel_clock_t clock; 626 int err = target; 627 628 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 629 /* 630 * For LVDS just rely on its current settings for dual-channel. 631 * We haven't figured out how to reliably set up different 632 * single/dual channel state, if we even can. 633 */ 634 if (intel_is_dual_link_lvds(dev)) 635 clock.p2 = limit->p2.p2_fast; 636 else 637 clock.p2 = limit->p2.p2_slow; 638 } else { 639 if (target < limit->p2.dot_limit) 640 clock.p2 = limit->p2.p2_slow; 641 else 642 clock.p2 = limit->p2.p2_fast; 643 } 644 645 memset(best_clock, 0, sizeof(*best_clock)); 646 647 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 648 clock.m1++) { 649 for (clock.m2 = limit->m2.min; 650 clock.m2 <= limit->m2.max; clock.m2++) { 651 if (clock.m2 >= clock.m1) 652 break; 653 for (clock.n = limit->n.min; 654 clock.n <= limit->n.max; clock.n++) { 655 for (clock.p1 = limit->p1.min; 656 clock.p1 <= limit->p1.max; clock.p1++) { 657 int this_err; 658 659 i9xx_clock(refclk, &clock); 660 if (!intel_PLL_is_valid(dev, limit, 661 &clock)) 662 continue; 663 if (match_clock && 664 clock.p != match_clock->p) 665 continue; 666 667 this_err = abs(clock.dot - target); 668 if (this_err < err) { 669 *best_clock = clock; 670 err = this_err; 671 } 672 } 673 } 674 } 675 } 676 677 return (err != target); 678 } 679 680 static bool 681 pnv_find_best_dpll(const intel_limit_t *limit, 682 struct intel_crtc_state *crtc_state, 683 int target, int refclk, intel_clock_t *match_clock, 684 intel_clock_t *best_clock) 685 { 686 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 687 struct drm_device *dev = crtc->base.dev; 688 intel_clock_t clock; 689 int err = target; 690 691 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 692 /* 693 * For LVDS just rely on its current settings for dual-channel. 694 * We haven't figured out how to reliably set up different 695 * single/dual channel state, if we even can. 696 */ 697 if (intel_is_dual_link_lvds(dev)) 698 clock.p2 = limit->p2.p2_fast; 699 else 700 clock.p2 = limit->p2.p2_slow; 701 } else { 702 if (target < limit->p2.dot_limit) 703 clock.p2 = limit->p2.p2_slow; 704 else 705 clock.p2 = limit->p2.p2_fast; 706 } 707 708 memset(best_clock, 0, sizeof(*best_clock)); 709 710 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; 711 clock.m1++) { 712 for (clock.m2 = limit->m2.min; 713 clock.m2 <= limit->m2.max; clock.m2++) { 714 for (clock.n = limit->n.min; 715 clock.n <= limit->n.max; clock.n++) { 716 for (clock.p1 = limit->p1.min; 717 clock.p1 <= limit->p1.max; clock.p1++) { 718 int this_err; 719 720 pineview_clock(refclk, &clock); 721 if (!intel_PLL_is_valid(dev, limit, 722 &clock)) 723 continue; 724 if (match_clock && 725 clock.p != match_clock->p) 726 continue; 727 728 this_err = abs(clock.dot - target); 729 if (this_err < err) { 730 *best_clock = clock; 731 err = this_err; 732 } 733 } 734 } 735 } 736 } 737 738 return (err != target); 739 } 740 741 static bool 742 g4x_find_best_dpll(const intel_limit_t *limit, 743 struct intel_crtc_state *crtc_state, 744 int target, int refclk, intel_clock_t *match_clock, 745 intel_clock_t *best_clock) 746 { 747 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 748 struct drm_device *dev = crtc->base.dev; 749 intel_clock_t clock; 750 int max_n; 751 bool found; 752 /* approximately equals target * 0.00585 */ 753 int err_most = (target >> 8) + (target >> 9); 754 found = false; 755 756 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 757 if (intel_is_dual_link_lvds(dev)) 758 clock.p2 = limit->p2.p2_fast; 759 else 760 clock.p2 = limit->p2.p2_slow; 761 } else { 762 if (target < limit->p2.dot_limit) 763 clock.p2 = limit->p2.p2_slow; 764 else 765 clock.p2 = limit->p2.p2_fast; 766 } 767 768 memset(best_clock, 0, sizeof(*best_clock)); 769 max_n = limit->n.max; 770 /* based on hardware requirement, prefer smaller n to precision */ 771 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 772 /* based on hardware requirement, prefere larger m1,m2 */ 773 for (clock.m1 = limit->m1.max; 774 clock.m1 >= limit->m1.min; clock.m1--) { 775 for (clock.m2 = limit->m2.max; 776 clock.m2 >= limit->m2.min; clock.m2--) { 777 for (clock.p1 = limit->p1.max; 778 clock.p1 >= limit->p1.min; clock.p1--) { 779 int this_err; 780 781 i9xx_clock(refclk, &clock); 782 if (!intel_PLL_is_valid(dev, limit, 783 &clock)) 784 continue; 785 786 this_err = abs(clock.dot - target); 787 if (this_err < err_most) { 788 *best_clock = clock; 789 err_most = this_err; 790 max_n = clock.n; 791 found = true; 792 } 793 } 794 } 795 } 796 } 797 return found; 798 } 799 800 /* 801 * Check if the calculated PLL configuration is more optimal compared to the 802 * best configuration and error found so far. Return the calculated error. 803 */ 804 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 805 const intel_clock_t *calculated_clock, 806 const intel_clock_t *best_clock, 807 unsigned int best_error_ppm, 808 unsigned int *error_ppm) 809 { 810 /* 811 * For CHV ignore the error and consider only the P value. 812 * Prefer a bigger P value based on HW requirements. 813 */ 814 if (IS_CHERRYVIEW(dev)) { 815 *error_ppm = 0; 816 817 return calculated_clock->p > best_clock->p; 818 } 819 820 if (WARN_ON_ONCE(!target_freq)) 821 return false; 822 823 *error_ppm = div_u64(1000000ULL * 824 abs(target_freq - calculated_clock->dot), 825 target_freq); 826 /* 827 * Prefer a better P value over a better (smaller) error if the error 828 * is small. Ensure this preference for future configurations too by 829 * setting the error to 0. 830 */ 831 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { 832 *error_ppm = 0; 833 834 return true; 835 } 836 837 return *error_ppm + 10 < best_error_ppm; 838 } 839 840 static bool 841 vlv_find_best_dpll(const intel_limit_t *limit, 842 struct intel_crtc_state *crtc_state, 843 int target, int refclk, intel_clock_t *match_clock, 844 intel_clock_t *best_clock) 845 { 846 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 847 struct drm_device *dev = crtc->base.dev; 848 intel_clock_t clock; 849 unsigned int bestppm = 1000000; 850 /* min update 19.2 MHz */ 851 int max_n = min(limit->n.max, refclk / 19200); 852 bool found = false; 853 854 target *= 5; /* fast clock */ 855 856 memset(best_clock, 0, sizeof(*best_clock)); 857 858 /* based on hardware requirement, prefer smaller n to precision */ 859 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { 860 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 861 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; 862 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 863 clock.p = clock.p1 * clock.p2; 864 /* based on hardware requirement, prefer bigger m1,m2 values */ 865 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { 866 unsigned int ppm; 867 868 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, 869 refclk * clock.m1); 870 871 vlv_clock(refclk, &clock); 872 873 if (!intel_PLL_is_valid(dev, limit, 874 &clock)) 875 continue; 876 877 if (!vlv_PLL_is_optimal(dev, target, 878 &clock, 879 best_clock, 880 bestppm, &ppm)) 881 continue; 882 883 *best_clock = clock; 884 bestppm = ppm; 885 found = true; 886 } 887 } 888 } 889 } 890 891 return found; 892 } 893 894 static bool 895 chv_find_best_dpll(const intel_limit_t *limit, 896 struct intel_crtc_state *crtc_state, 897 int target, int refclk, intel_clock_t *match_clock, 898 intel_clock_t *best_clock) 899 { 900 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 901 struct drm_device *dev = crtc->base.dev; 902 unsigned int best_error_ppm; 903 intel_clock_t clock; 904 uint64_t m2; 905 int found = false; 906 907 memset(best_clock, 0, sizeof(*best_clock)); 908 best_error_ppm = 1000000; 909 910 /* 911 * Based on hardware doc, the n always set to 1, and m1 always 912 * set to 2. If requires to support 200Mhz refclk, we need to 913 * revisit this because n may not 1 anymore. 914 */ 915 clock.n = 1, clock.m1 = 2; 916 target *= 5; /* fast clock */ 917 918 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 919 for (clock.p2 = limit->p2.p2_fast; 920 clock.p2 >= limit->p2.p2_slow; 921 clock.p2 -= clock.p2 > 10 ? 2 : 1) { 922 unsigned int error_ppm; 923 924 clock.p = clock.p1 * clock.p2; 925 926 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p * 927 clock.n) << 22, refclk * clock.m1); 928 929 if (m2 > INT_MAX/clock.m1) 930 continue; 931 932 clock.m2 = m2; 933 934 chv_clock(refclk, &clock); 935 936 if (!intel_PLL_is_valid(dev, limit, &clock)) 937 continue; 938 939 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, 940 best_error_ppm, &error_ppm)) 941 continue; 942 943 *best_clock = clock; 944 best_error_ppm = error_ppm; 945 found = true; 946 } 947 } 948 949 return found; 950 } 951 952 bool intel_crtc_active(struct drm_crtc *crtc) 953 { 954 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 955 956 /* Be paranoid as we can arrive here with only partial 957 * state retrieved from the hardware during setup. 958 * 959 * We can ditch the adjusted_mode.crtc_clock check as soon 960 * as Haswell has gained clock readout/fastboot support. 961 * 962 * We can ditch the crtc->primary->fb check as soon as we can 963 * properly reconstruct framebuffers. 964 * 965 * FIXME: The intel_crtc->active here should be switched to 966 * crtc->state->active once we have proper CRTC states wired up 967 * for atomic. 968 */ 969 return intel_crtc->active && crtc->primary->state->fb && 970 intel_crtc->config->base.adjusted_mode.crtc_clock; 971 } 972 973 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 974 enum i915_pipe pipe) 975 { 976 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 977 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 978 979 return intel_crtc->config->cpu_transcoder; 980 } 981 982 static bool pipe_dsl_stopped(struct drm_device *dev, enum i915_pipe pipe) 983 { 984 struct drm_i915_private *dev_priv = dev->dev_private; 985 u32 reg = PIPEDSL(pipe); 986 u32 line1, line2; 987 u32 line_mask; 988 989 if (IS_GEN2(dev)) 990 line_mask = DSL_LINEMASK_GEN2; 991 else 992 line_mask = DSL_LINEMASK_GEN3; 993 994 line1 = I915_READ(reg) & line_mask; 995 mdelay(5); 996 line2 = I915_READ(reg) & line_mask; 997 998 return line1 == line2; 999 } 1000 1001 /* 1002 * intel_wait_for_pipe_off - wait for pipe to turn off 1003 * @crtc: crtc whose pipe to wait for 1004 * 1005 * After disabling a pipe, we can't wait for vblank in the usual way, 1006 * spinning on the vblank interrupt status bit, since we won't actually 1007 * see an interrupt when the pipe is disabled. 1008 * 1009 * On Gen4 and above: 1010 * wait for the pipe register state bit to turn off 1011 * 1012 * Otherwise: 1013 * wait for the display line value to settle (it usually 1014 * ends up stopping at the start of the next frame). 1015 * 1016 */ 1017 static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1018 { 1019 struct drm_device *dev = crtc->base.dev; 1020 struct drm_i915_private *dev_priv = dev->dev_private; 1021 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1022 enum i915_pipe pipe = crtc->pipe; 1023 1024 if (INTEL_INFO(dev)->gen >= 4) { 1025 int reg = PIPECONF(cpu_transcoder); 1026 1027 /* Wait for the Pipe State to go off */ 1028 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1029 100)) 1030 WARN(1, "pipe_off wait timed out\n"); 1031 } else { 1032 /* Wait for the display line to settle */ 1033 if (wait_for(pipe_dsl_stopped(dev, pipe), 100)) 1034 WARN(1, "pipe_off wait timed out\n"); 1035 } 1036 } 1037 1038 /* 1039 * ibx_digital_port_connected - is the specified port connected? 1040 * @dev_priv: i915 private structure 1041 * @port: the port to test 1042 * 1043 * Returns true if @port is connected, false otherwise. 1044 */ 1045 bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, 1046 struct intel_digital_port *port) 1047 { 1048 u32 bit; 1049 1050 if (HAS_PCH_IBX(dev_priv->dev)) { 1051 switch (port->port) { 1052 case PORT_B: 1053 bit = SDE_PORTB_HOTPLUG; 1054 break; 1055 case PORT_C: 1056 bit = SDE_PORTC_HOTPLUG; 1057 break; 1058 case PORT_D: 1059 bit = SDE_PORTD_HOTPLUG; 1060 break; 1061 default: 1062 return true; 1063 } 1064 } else { 1065 switch (port->port) { 1066 case PORT_B: 1067 bit = SDE_PORTB_HOTPLUG_CPT; 1068 break; 1069 case PORT_C: 1070 bit = SDE_PORTC_HOTPLUG_CPT; 1071 break; 1072 case PORT_D: 1073 bit = SDE_PORTD_HOTPLUG_CPT; 1074 break; 1075 default: 1076 return true; 1077 } 1078 } 1079 1080 return I915_READ(SDEISR) & bit; 1081 } 1082 1083 static const char *state_string(bool enabled) 1084 { 1085 return enabled ? "on" : "off"; 1086 } 1087 1088 /* Only for pre-ILK configs */ 1089 void assert_pll(struct drm_i915_private *dev_priv, 1090 enum i915_pipe pipe, bool state) 1091 { 1092 int reg; 1093 u32 val; 1094 bool cur_state; 1095 1096 reg = DPLL(pipe); 1097 val = I915_READ(reg); 1098 cur_state = !!(val & DPLL_VCO_ENABLE); 1099 I915_STATE_WARN(cur_state != state, 1100 "PLL state assertion failure (expected %s, current %s)\n", 1101 state_string(state), state_string(cur_state)); 1102 } 1103 1104 /* XXX: the dsi pll is shared between MIPI DSI ports */ 1105 static void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state) 1106 { 1107 u32 val; 1108 bool cur_state; 1109 1110 mutex_lock(&dev_priv->dpio_lock); 1111 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); 1112 mutex_unlock(&dev_priv->dpio_lock); 1113 1114 cur_state = val & DSI_PLL_VCO_EN; 1115 I915_STATE_WARN(cur_state != state, 1116 "DSI PLL state assertion failure (expected %s, current %s)\n", 1117 state_string(state), state_string(cur_state)); 1118 } 1119 #define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true) 1120 #define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false) 1121 1122 struct intel_shared_dpll * 1123 intel_crtc_to_shared_dpll(struct intel_crtc *crtc) 1124 { 1125 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1126 1127 if (crtc->config->shared_dpll < 0) 1128 return NULL; 1129 1130 return &dev_priv->shared_dplls[crtc->config->shared_dpll]; 1131 } 1132 1133 /* For ILK+ */ 1134 void assert_shared_dpll(struct drm_i915_private *dev_priv, 1135 struct intel_shared_dpll *pll, 1136 bool state) 1137 { 1138 bool cur_state; 1139 struct intel_dpll_hw_state hw_state; 1140 1141 if (WARN (!pll, 1142 "asserting DPLL %s with no DPLL\n", state_string(state))) 1143 return; 1144 1145 cur_state = pll->get_hw_state(dev_priv, pll, &hw_state); 1146 I915_STATE_WARN(cur_state != state, 1147 "%s assertion failure (expected %s, current %s)\n", 1148 pll->name, state_string(state), state_string(cur_state)); 1149 } 1150 1151 static void assert_fdi_tx(struct drm_i915_private *dev_priv, 1152 enum i915_pipe pipe, bool state) 1153 { 1154 int reg; 1155 u32 val; 1156 bool cur_state; 1157 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1158 pipe); 1159 1160 if (HAS_DDI(dev_priv->dev)) { 1161 /* DDI does not have a specific FDI_TX register */ 1162 reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); 1163 val = I915_READ(reg); 1164 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); 1165 } else { 1166 reg = FDI_TX_CTL(pipe); 1167 val = I915_READ(reg); 1168 cur_state = !!(val & FDI_TX_ENABLE); 1169 } 1170 I915_STATE_WARN(cur_state != state, 1171 "FDI TX state assertion failure (expected %s, current %s)\n", 1172 state_string(state), state_string(cur_state)); 1173 } 1174 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) 1175 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) 1176 1177 static void assert_fdi_rx(struct drm_i915_private *dev_priv, 1178 enum i915_pipe pipe, bool state) 1179 { 1180 int reg; 1181 u32 val; 1182 bool cur_state; 1183 1184 reg = FDI_RX_CTL(pipe); 1185 val = I915_READ(reg); 1186 cur_state = !!(val & FDI_RX_ENABLE); 1187 I915_STATE_WARN(cur_state != state, 1188 "FDI RX state assertion failure (expected %s, current %s)\n", 1189 state_string(state), state_string(cur_state)); 1190 } 1191 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) 1192 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) 1193 1194 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, 1195 enum i915_pipe pipe) 1196 { 1197 int reg; 1198 u32 val; 1199 1200 /* ILK FDI PLL is always enabled */ 1201 if (INTEL_INFO(dev_priv->dev)->gen == 5) 1202 return; 1203 1204 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1205 if (HAS_DDI(dev_priv->dev)) 1206 return; 1207 1208 reg = FDI_TX_CTL(pipe); 1209 val = I915_READ(reg); 1210 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); 1211 } 1212 1213 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv, 1214 enum i915_pipe pipe, bool state) 1215 { 1216 int reg; 1217 u32 val; 1218 bool cur_state; 1219 1220 reg = FDI_RX_CTL(pipe); 1221 val = I915_READ(reg); 1222 cur_state = !!(val & FDI_RX_PLL_ENABLE); 1223 I915_STATE_WARN(cur_state != state, 1224 "FDI RX PLL assertion failure (expected %s, current %s)\n", 1225 state_string(state), state_string(cur_state)); 1226 } 1227 1228 void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1229 enum i915_pipe pipe) 1230 { 1231 struct drm_device *dev = dev_priv->dev; 1232 int pp_reg; 1233 u32 val; 1234 enum i915_pipe panel_pipe = PIPE_A; 1235 bool locked = true; 1236 1237 if (WARN_ON(HAS_DDI(dev))) 1238 return; 1239 1240 if (HAS_PCH_SPLIT(dev)) { 1241 u32 port_sel; 1242 1243 pp_reg = PCH_PP_CONTROL; 1244 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK; 1245 1246 if (port_sel == PANEL_PORT_SELECT_LVDS && 1247 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) 1248 panel_pipe = PIPE_B; 1249 /* XXX: else fix for eDP */ 1250 } else if (IS_VALLEYVIEW(dev)) { 1251 /* presumably write lock depends on pipe, not port select */ 1252 pp_reg = VLV_PIPE_PP_CONTROL(pipe); 1253 panel_pipe = pipe; 1254 } else { 1255 pp_reg = PP_CONTROL; 1256 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT) 1257 panel_pipe = PIPE_B; 1258 } 1259 1260 val = I915_READ(pp_reg); 1261 if (!(val & PANEL_POWER_ON) || 1262 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS)) 1263 locked = false; 1264 1265 I915_STATE_WARN(panel_pipe == pipe && locked, 1266 "panel assertion failure, pipe %c regs locked\n", 1267 pipe_name(pipe)); 1268 } 1269 1270 static void assert_cursor(struct drm_i915_private *dev_priv, 1271 enum i915_pipe pipe, bool state) 1272 { 1273 struct drm_device *dev = dev_priv->dev; 1274 bool cur_state; 1275 1276 if (IS_845G(dev) || IS_I865G(dev)) 1277 cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1278 else 1279 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1280 1281 I915_STATE_WARN(cur_state != state, 1282 "cursor on pipe %c assertion failure (expected %s, current %s)\n", 1283 pipe_name(pipe), state_string(state), state_string(cur_state)); 1284 } 1285 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true) 1286 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false) 1287 1288 void assert_pipe(struct drm_i915_private *dev_priv, 1289 enum i915_pipe pipe, bool state) 1290 { 1291 int reg; 1292 u32 val; 1293 bool cur_state; 1294 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 1295 pipe); 1296 1297 /* if we need the pipe quirk it must be always on */ 1298 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1299 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1300 state = true; 1301 1302 if (!intel_display_power_is_enabled(dev_priv, 1303 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1304 cur_state = false; 1305 } else { 1306 reg = PIPECONF(cpu_transcoder); 1307 val = I915_READ(reg); 1308 cur_state = !!(val & PIPECONF_ENABLE); 1309 } 1310 1311 I915_STATE_WARN(cur_state != state, 1312 "pipe %c assertion failure (expected %s, current %s)\n", 1313 pipe_name(pipe), state_string(state), state_string(cur_state)); 1314 } 1315 1316 static void assert_plane(struct drm_i915_private *dev_priv, 1317 enum plane plane, bool state) 1318 { 1319 int reg; 1320 u32 val; 1321 bool cur_state; 1322 1323 reg = DSPCNTR(plane); 1324 val = I915_READ(reg); 1325 cur_state = !!(val & DISPLAY_PLANE_ENABLE); 1326 I915_STATE_WARN(cur_state != state, 1327 "plane %c assertion failure (expected %s, current %s)\n", 1328 plane_name(plane), state_string(state), state_string(cur_state)); 1329 } 1330 1331 #define assert_plane_enabled(d, p) assert_plane(d, p, true) 1332 #define assert_plane_disabled(d, p) assert_plane(d, p, false) 1333 1334 static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1335 enum i915_pipe pipe) 1336 { 1337 struct drm_device *dev = dev_priv->dev; 1338 int reg, i; 1339 u32 val; 1340 int cur_pipe; 1341 1342 /* Primary planes are fixed to pipes on gen4+ */ 1343 if (INTEL_INFO(dev)->gen >= 4) { 1344 reg = DSPCNTR(pipe); 1345 val = I915_READ(reg); 1346 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE, 1347 "plane %c assertion failure, should be disabled but not\n", 1348 plane_name(pipe)); 1349 return; 1350 } 1351 1352 /* Need to check both planes against the pipe */ 1353 for_each_pipe(dev_priv, i) { 1354 reg = DSPCNTR(i); 1355 val = I915_READ(reg); 1356 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> 1357 DISPPLANE_SEL_PIPE_SHIFT; 1358 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, 1359 "plane %c assertion failure, should be off on pipe %c but is still active\n", 1360 plane_name(i), pipe_name(pipe)); 1361 } 1362 } 1363 1364 static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1365 enum i915_pipe pipe) 1366 { 1367 struct drm_device *dev = dev_priv->dev; 1368 int reg, sprite; 1369 u32 val; 1370 1371 if (INTEL_INFO(dev)->gen >= 9) { 1372 for_each_sprite(dev_priv, pipe, sprite) { 1373 val = I915_READ(PLANE_CTL(pipe, sprite)); 1374 I915_STATE_WARN(val & PLANE_CTL_ENABLE, 1375 "plane %d assertion failure, should be off on pipe %c but is still active\n", 1376 sprite, pipe_name(pipe)); 1377 } 1378 } else if (IS_VALLEYVIEW(dev)) { 1379 for_each_sprite(dev_priv, pipe, sprite) { 1380 reg = SPCNTR(pipe, sprite); 1381 val = I915_READ(reg); 1382 I915_STATE_WARN(val & SP_ENABLE, 1383 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1384 sprite_name(pipe, sprite), pipe_name(pipe)); 1385 } 1386 } else if (INTEL_INFO(dev)->gen >= 7) { 1387 reg = SPRCTL(pipe); 1388 val = I915_READ(reg); 1389 I915_STATE_WARN(val & SPRITE_ENABLE, 1390 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1391 plane_name(pipe), pipe_name(pipe)); 1392 } else if (INTEL_INFO(dev)->gen >= 5) { 1393 reg = DVSCNTR(pipe); 1394 val = I915_READ(reg); 1395 I915_STATE_WARN(val & DVS_ENABLE, 1396 "sprite %c assertion failure, should be off on pipe %c but is still active\n", 1397 plane_name(pipe), pipe_name(pipe)); 1398 } 1399 } 1400 1401 static void assert_vblank_disabled(struct drm_crtc *crtc) 1402 { 1403 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0)) 1404 drm_crtc_vblank_put(crtc); 1405 } 1406 1407 static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) 1408 { 1409 u32 val; 1410 bool enabled; 1411 1412 I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv->dev) || HAS_PCH_CPT(dev_priv->dev))); 1413 1414 val = I915_READ(PCH_DREF_CONTROL); 1415 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | 1416 DREF_SUPERSPREAD_SOURCE_MASK)); 1417 I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); 1418 } 1419 1420 static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, 1421 enum i915_pipe pipe) 1422 { 1423 int reg; 1424 u32 val; 1425 bool enabled; 1426 1427 reg = PCH_TRANSCONF(pipe); 1428 val = I915_READ(reg); 1429 enabled = !!(val & TRANS_ENABLE); 1430 I915_STATE_WARN(enabled, 1431 "transcoder assertion failed, should be off on pipe %c but is still active\n", 1432 pipe_name(pipe)); 1433 } 1434 1435 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, 1436 enum i915_pipe pipe, u32 port_sel, u32 val) 1437 { 1438 if ((val & DP_PORT_EN) == 0) 1439 return false; 1440 1441 if (HAS_PCH_CPT(dev_priv->dev)) { 1442 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); 1443 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); 1444 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) 1445 return false; 1446 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1447 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe)) 1448 return false; 1449 } else { 1450 if ((val & DP_PIPE_MASK) != (pipe << 30)) 1451 return false; 1452 } 1453 return true; 1454 } 1455 1456 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, 1457 enum i915_pipe pipe, u32 val) 1458 { 1459 if ((val & SDVO_ENABLE) == 0) 1460 return false; 1461 1462 if (HAS_PCH_CPT(dev_priv->dev)) { 1463 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe)) 1464 return false; 1465 } else if (IS_CHERRYVIEW(dev_priv->dev)) { 1466 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe)) 1467 return false; 1468 } else { 1469 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe)) 1470 return false; 1471 } 1472 return true; 1473 } 1474 1475 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, 1476 enum i915_pipe pipe, u32 val) 1477 { 1478 if ((val & LVDS_PORT_EN) == 0) 1479 return false; 1480 1481 if (HAS_PCH_CPT(dev_priv->dev)) { 1482 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1483 return false; 1484 } else { 1485 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) 1486 return false; 1487 } 1488 return true; 1489 } 1490 1491 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, 1492 enum i915_pipe pipe, u32 val) 1493 { 1494 if ((val & ADPA_DAC_ENABLE) == 0) 1495 return false; 1496 if (HAS_PCH_CPT(dev_priv->dev)) { 1497 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) 1498 return false; 1499 } else { 1500 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) 1501 return false; 1502 } 1503 return true; 1504 } 1505 1506 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, 1507 enum i915_pipe pipe, int reg, u32 port_sel) 1508 { 1509 u32 val = I915_READ(reg); 1510 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), 1511 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", 1512 reg, pipe_name(pipe)); 1513 1514 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 1515 && (val & DP_PIPEB_SELECT), 1516 "IBX PCH dp port still using transcoder B\n"); 1517 } 1518 1519 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, 1520 enum i915_pipe pipe, int reg) 1521 { 1522 u32 val = I915_READ(reg); 1523 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val), 1524 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", 1525 reg, pipe_name(pipe)); 1526 1527 I915_STATE_WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_ENABLE) == 0 1528 && (val & SDVO_PIPE_B_SELECT), 1529 "IBX PCH hdmi port still using transcoder B\n"); 1530 } 1531 1532 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, 1533 enum i915_pipe pipe) 1534 { 1535 int reg; 1536 u32 val; 1537 1538 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); 1539 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); 1540 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); 1541 1542 reg = PCH_ADPA; 1543 val = I915_READ(reg); 1544 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val), 1545 "PCH VGA enabled on transcoder %c, should be disabled\n", 1546 pipe_name(pipe)); 1547 1548 reg = PCH_LVDS; 1549 val = I915_READ(reg); 1550 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val), 1551 "PCH LVDS enabled on transcoder %c, should be disabled\n", 1552 pipe_name(pipe)); 1553 1554 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB); 1555 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC); 1556 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID); 1557 } 1558 1559 static void intel_init_dpio(struct drm_device *dev) 1560 { 1561 struct drm_i915_private *dev_priv = dev->dev_private; 1562 1563 if (!IS_VALLEYVIEW(dev)) 1564 return; 1565 1566 /* 1567 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C), 1568 * CHV x1 PHY (DP/HDMI D) 1569 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C) 1570 */ 1571 if (IS_CHERRYVIEW(dev)) { 1572 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2; 1573 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO; 1574 } else { 1575 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO; 1576 } 1577 } 1578 1579 static void vlv_enable_pll(struct intel_crtc *crtc, 1580 const struct intel_crtc_state *pipe_config) 1581 { 1582 struct drm_device *dev = crtc->base.dev; 1583 struct drm_i915_private *dev_priv = dev->dev_private; 1584 int reg = DPLL(crtc->pipe); 1585 u32 dpll = pipe_config->dpll_hw_state.dpll; 1586 1587 assert_pipe_disabled(dev_priv, crtc->pipe); 1588 1589 /* No really, not for ILK+ */ 1590 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev)); 1591 1592 /* PLL is protected by panel, make sure we can write it */ 1593 if (IS_MOBILE(dev_priv->dev)) 1594 assert_panel_unlocked(dev_priv, crtc->pipe); 1595 1596 I915_WRITE(reg, dpll); 1597 POSTING_READ(reg); 1598 udelay(150); 1599 1600 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1601 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1602 1603 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md); 1604 POSTING_READ(DPLL_MD(crtc->pipe)); 1605 1606 /* We do this three times for luck */ 1607 I915_WRITE(reg, dpll); 1608 POSTING_READ(reg); 1609 udelay(150); /* wait for warmup */ 1610 I915_WRITE(reg, dpll); 1611 POSTING_READ(reg); 1612 udelay(150); /* wait for warmup */ 1613 I915_WRITE(reg, dpll); 1614 POSTING_READ(reg); 1615 udelay(150); /* wait for warmup */ 1616 } 1617 1618 static void chv_enable_pll(struct intel_crtc *crtc, 1619 const struct intel_crtc_state *pipe_config) 1620 { 1621 struct drm_device *dev = crtc->base.dev; 1622 struct drm_i915_private *dev_priv = dev->dev_private; 1623 int pipe = crtc->pipe; 1624 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1625 u32 tmp; 1626 1627 assert_pipe_disabled(dev_priv, crtc->pipe); 1628 1629 BUG_ON(!IS_CHERRYVIEW(dev_priv->dev)); 1630 1631 mutex_lock(&dev_priv->dpio_lock); 1632 1633 /* Enable back the 10bit clock to display controller */ 1634 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1635 tmp |= DPIO_DCLKP_EN; 1636 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp); 1637 1638 /* 1639 * Need to wait > 100ns between dclkp clock enable bit and PLL enable. 1640 */ 1641 udelay(1); 1642 1643 /* Enable PLL */ 1644 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1645 1646 /* Check PLL is locked */ 1647 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1648 DRM_ERROR("PLL %d failed to lock\n", pipe); 1649 1650 /* not sure when this should be written */ 1651 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md); 1652 POSTING_READ(DPLL_MD(pipe)); 1653 1654 mutex_unlock(&dev_priv->dpio_lock); 1655 } 1656 1657 static int intel_num_dvo_pipes(struct drm_device *dev) 1658 { 1659 struct intel_crtc *crtc; 1660 int count = 0; 1661 1662 for_each_intel_crtc(dev, crtc) 1663 count += crtc->active && 1664 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1665 1666 return count; 1667 } 1668 1669 static void i9xx_enable_pll(struct intel_crtc *crtc) 1670 { 1671 struct drm_device *dev = crtc->base.dev; 1672 struct drm_i915_private *dev_priv = dev->dev_private; 1673 int reg = DPLL(crtc->pipe); 1674 u32 dpll = crtc->config->dpll_hw_state.dpll; 1675 1676 assert_pipe_disabled(dev_priv, crtc->pipe); 1677 1678 /* No really, not for ILK+ */ 1679 BUG_ON(INTEL_INFO(dev)->gen >= 5); 1680 1681 /* PLL is protected by panel, make sure we can write it */ 1682 if (IS_MOBILE(dev) && !IS_I830(dev)) 1683 assert_panel_unlocked(dev_priv, crtc->pipe); 1684 1685 /* Enable DVO 2x clock on both PLLs if necessary */ 1686 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) { 1687 /* 1688 * It appears to be important that we don't enable this 1689 * for the current pipe before otherwise configuring the 1690 * PLL. No idea how this should be handled if multiple 1691 * DVO outputs are enabled simultaneosly. 1692 */ 1693 dpll |= DPLL_DVO_2X_MODE; 1694 I915_WRITE(DPLL(!crtc->pipe), 1695 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE); 1696 } 1697 1698 /* Wait for the clocks to stabilize. */ 1699 POSTING_READ(reg); 1700 udelay(150); 1701 1702 if (INTEL_INFO(dev)->gen >= 4) { 1703 I915_WRITE(DPLL_MD(crtc->pipe), 1704 crtc->config->dpll_hw_state.dpll_md); 1705 } else { 1706 /* The pixel multiplier can only be updated once the 1707 * DPLL is enabled and the clocks are stable. 1708 * 1709 * So write it again. 1710 */ 1711 I915_WRITE(reg, dpll); 1712 } 1713 1714 /* We do this three times for luck */ 1715 I915_WRITE(reg, dpll); 1716 POSTING_READ(reg); 1717 udelay(150); /* wait for warmup */ 1718 I915_WRITE(reg, dpll); 1719 POSTING_READ(reg); 1720 udelay(150); /* wait for warmup */ 1721 I915_WRITE(reg, dpll); 1722 POSTING_READ(reg); 1723 udelay(150); /* wait for warmup */ 1724 } 1725 1726 /** 1727 * i9xx_disable_pll - disable a PLL 1728 * @dev_priv: i915 private structure 1729 * @pipe: pipe PLL to disable 1730 * 1731 * Disable the PLL for @pipe, making sure the pipe is off first. 1732 * 1733 * Note! This is for pre-ILK only. 1734 */ 1735 static void i9xx_disable_pll(struct intel_crtc *crtc) 1736 { 1737 struct drm_device *dev = crtc->base.dev; 1738 struct drm_i915_private *dev_priv = dev->dev_private; 1739 enum i915_pipe pipe = crtc->pipe; 1740 1741 /* Disable DVO 2x clock on both PLLs if necessary */ 1742 if (IS_I830(dev) && 1743 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1744 intel_num_dvo_pipes(dev) == 1) { 1745 I915_WRITE(DPLL(PIPE_B), 1746 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1747 I915_WRITE(DPLL(PIPE_A), 1748 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE); 1749 } 1750 1751 /* Don't disable pipe or pipe PLLs if needed */ 1752 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 1753 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1754 return; 1755 1756 /* Make sure the pipe isn't still relying on us */ 1757 assert_pipe_disabled(dev_priv, pipe); 1758 1759 I915_WRITE(DPLL(pipe), 0); 1760 POSTING_READ(DPLL(pipe)); 1761 } 1762 1763 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1764 { 1765 u32 val = 0; 1766 1767 /* Make sure the pipe isn't still relying on us */ 1768 assert_pipe_disabled(dev_priv, pipe); 1769 1770 /* 1771 * Leave integrated clock source and reference clock enabled for pipe B. 1772 * The latter is needed for VGA hotplug / manual detection. 1773 */ 1774 if (pipe == PIPE_B) 1775 val = DPLL_INTEGRATED_CRI_CLK_VLV | DPLL_REFA_CLK_ENABLE_VLV; 1776 I915_WRITE(DPLL(pipe), val); 1777 POSTING_READ(DPLL(pipe)); 1778 1779 } 1780 1781 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum i915_pipe pipe) 1782 { 1783 enum dpio_channel port = vlv_pipe_to_channel(pipe); 1784 u32 val; 1785 1786 /* Make sure the pipe isn't still relying on us */ 1787 assert_pipe_disabled(dev_priv, pipe); 1788 1789 /* Set PLL en = 0 */ 1790 val = DPLL_SSC_REF_CLOCK_CHV | DPLL_REFA_CLK_ENABLE_VLV; 1791 if (pipe != PIPE_A) 1792 val |= DPLL_INTEGRATED_CRI_CLK_VLV; 1793 I915_WRITE(DPLL(pipe), val); 1794 POSTING_READ(DPLL(pipe)); 1795 1796 mutex_lock(&dev_priv->dpio_lock); 1797 1798 /* Disable 10bit clock to display controller */ 1799 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)); 1800 val &= ~DPIO_DCLKP_EN; 1801 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val); 1802 1803 /* disable left/right clock distribution */ 1804 if (pipe != PIPE_B) { 1805 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0); 1806 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK); 1807 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val); 1808 } else { 1809 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1); 1810 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK); 1811 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val); 1812 } 1813 1814 mutex_unlock(&dev_priv->dpio_lock); 1815 } 1816 1817 void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1818 struct intel_digital_port *dport) 1819 { 1820 u32 port_mask; 1821 int dpll_reg; 1822 1823 switch (dport->port) { 1824 case PORT_B: 1825 port_mask = DPLL_PORTB_READY_MASK; 1826 dpll_reg = DPLL(0); 1827 break; 1828 case PORT_C: 1829 port_mask = DPLL_PORTC_READY_MASK; 1830 dpll_reg = DPLL(0); 1831 break; 1832 case PORT_D: 1833 port_mask = DPLL_PORTD_READY_MASK; 1834 dpll_reg = DPIO_PHY_STATUS; 1835 break; 1836 default: 1837 BUG(); 1838 } 1839 1840 if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000)) 1841 WARN(1, "timed out waiting for port %c ready: 0x%08x\n", 1842 port_name(dport->port), I915_READ(dpll_reg)); 1843 } 1844 1845 static void intel_prepare_shared_dpll(struct intel_crtc *crtc) 1846 { 1847 struct drm_device *dev = crtc->base.dev; 1848 struct drm_i915_private *dev_priv = dev->dev_private; 1849 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1850 1851 if (WARN_ON(pll == NULL)) 1852 return; 1853 1854 WARN_ON(!pll->config.crtc_mask); 1855 if (pll->active == 0) { 1856 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1857 WARN_ON(pll->on); 1858 assert_shared_dpll_disabled(dev_priv, pll); 1859 1860 pll->mode_set(dev_priv, pll); 1861 } 1862 } 1863 1864 /** 1865 * intel_enable_shared_dpll - enable PCH PLL 1866 * @dev_priv: i915 private structure 1867 * @pipe: pipe PLL to enable 1868 * 1869 * The PCH PLL needs to be enabled before the PCH transcoder, since it 1870 * drives the transcoder clock. 1871 */ 1872 static void intel_enable_shared_dpll(struct intel_crtc *crtc) 1873 { 1874 struct drm_device *dev = crtc->base.dev; 1875 struct drm_i915_private *dev_priv = dev->dev_private; 1876 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1877 1878 if (WARN_ON(pll == NULL)) 1879 return; 1880 1881 if (WARN_ON(pll->config.crtc_mask == 0)) 1882 return; 1883 1884 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1885 pll->name, pll->active, pll->on, 1886 crtc->base.base.id); 1887 1888 if (pll->active++) { 1889 WARN_ON(!pll->on); 1890 assert_shared_dpll_enabled(dev_priv, pll); 1891 return; 1892 } 1893 WARN_ON(pll->on); 1894 1895 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 1896 1897 DRM_DEBUG_KMS("enabling %s\n", pll->name); 1898 pll->enable(dev_priv, pll); 1899 pll->on = true; 1900 } 1901 1902 static void intel_disable_shared_dpll(struct intel_crtc *crtc) 1903 { 1904 struct drm_device *dev = crtc->base.dev; 1905 struct drm_i915_private *dev_priv = dev->dev_private; 1906 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 1907 1908 /* PCH only available on ILK+ */ 1909 BUG_ON(INTEL_INFO(dev)->gen < 5); 1910 if (WARN_ON(pll == NULL)) 1911 return; 1912 1913 if (WARN_ON(pll->config.crtc_mask == 0)) 1914 return; 1915 1916 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1917 pll->name, pll->active, pll->on, 1918 crtc->base.base.id); 1919 1920 if (WARN_ON(pll->active == 0)) { 1921 assert_shared_dpll_disabled(dev_priv, pll); 1922 return; 1923 } 1924 1925 assert_shared_dpll_enabled(dev_priv, pll); 1926 WARN_ON(!pll->on); 1927 if (--pll->active) 1928 return; 1929 1930 DRM_DEBUG_KMS("disabling %s\n", pll->name); 1931 pll->disable(dev_priv, pll); 1932 pll->on = false; 1933 1934 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 1935 } 1936 1937 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1938 enum i915_pipe pipe) 1939 { 1940 struct drm_device *dev = dev_priv->dev; 1941 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1942 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1943 uint32_t reg, val, pipeconf_val; 1944 1945 /* PCH only available on ILK+ */ 1946 BUG_ON(!HAS_PCH_SPLIT(dev)); 1947 1948 /* Make sure PCH DPLL is enabled */ 1949 assert_shared_dpll_enabled(dev_priv, 1950 intel_crtc_to_shared_dpll(intel_crtc)); 1951 1952 /* FDI must be feeding us bits for PCH ports */ 1953 assert_fdi_tx_enabled(dev_priv, pipe); 1954 assert_fdi_rx_enabled(dev_priv, pipe); 1955 1956 if (HAS_PCH_CPT(dev)) { 1957 /* Workaround: Set the timing override bit before enabling the 1958 * pch transcoder. */ 1959 reg = TRANS_CHICKEN2(pipe); 1960 val = I915_READ(reg); 1961 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 1962 I915_WRITE(reg, val); 1963 } 1964 1965 reg = PCH_TRANSCONF(pipe); 1966 val = I915_READ(reg); 1967 pipeconf_val = I915_READ(PIPECONF(pipe)); 1968 1969 if (HAS_PCH_IBX(dev_priv->dev)) { 1970 /* 1971 * make the BPC in transcoder be consistent with 1972 * that in pipeconf reg. 1973 */ 1974 val &= ~PIPECONF_BPC_MASK; 1975 val |= pipeconf_val & PIPECONF_BPC_MASK; 1976 } 1977 1978 val &= ~TRANS_INTERLACE_MASK; 1979 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1980 if (HAS_PCH_IBX(dev_priv->dev) && 1981 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1982 val |= TRANS_LEGACY_INTERLACED_ILK; 1983 else 1984 val |= TRANS_INTERLACED; 1985 else 1986 val |= TRANS_PROGRESSIVE; 1987 1988 I915_WRITE(reg, val | TRANS_ENABLE); 1989 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1990 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1991 } 1992 1993 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1994 enum transcoder cpu_transcoder) 1995 { 1996 u32 val, pipeconf_val; 1997 1998 /* PCH only available on ILK+ */ 1999 BUG_ON(!HAS_PCH_SPLIT(dev_priv->dev)); 2000 2001 /* FDI must be feeding us bits for PCH ports */ 2002 assert_fdi_tx_enabled(dev_priv, (enum i915_pipe) cpu_transcoder); 2003 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2004 2005 /* Workaround: set timing override bit. */ 2006 val = I915_READ(_TRANSA_CHICKEN2); 2007 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2008 I915_WRITE(_TRANSA_CHICKEN2, val); 2009 2010 val = TRANS_ENABLE; 2011 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); 2012 2013 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == 2014 PIPECONF_INTERLACED_ILK) 2015 val |= TRANS_INTERLACED; 2016 else 2017 val |= TRANS_PROGRESSIVE; 2018 2019 I915_WRITE(LPT_TRANSCONF, val); 2020 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 2021 DRM_ERROR("Failed to enable PCH transcoder\n"); 2022 } 2023 2024 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 2025 enum i915_pipe pipe) 2026 { 2027 struct drm_device *dev = dev_priv->dev; 2028 uint32_t reg, val; 2029 2030 /* FDI relies on the transcoder */ 2031 assert_fdi_tx_disabled(dev_priv, pipe); 2032 assert_fdi_rx_disabled(dev_priv, pipe); 2033 2034 /* Ports must be off as well */ 2035 assert_pch_ports_disabled(dev_priv, pipe); 2036 2037 reg = PCH_TRANSCONF(pipe); 2038 val = I915_READ(reg); 2039 val &= ~TRANS_ENABLE; 2040 I915_WRITE(reg, val); 2041 /* wait for PCH transcoder off, transcoder state */ 2042 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 2043 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 2044 2045 if (!HAS_PCH_IBX(dev)) { 2046 /* Workaround: Clear the timing override chicken bit again. */ 2047 reg = TRANS_CHICKEN2(pipe); 2048 val = I915_READ(reg); 2049 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2050 I915_WRITE(reg, val); 2051 } 2052 } 2053 2054 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv) 2055 { 2056 u32 val; 2057 2058 val = I915_READ(LPT_TRANSCONF); 2059 val &= ~TRANS_ENABLE; 2060 I915_WRITE(LPT_TRANSCONF, val); 2061 /* wait for PCH transcoder off, transcoder state */ 2062 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 2063 DRM_ERROR("Failed to disable PCH transcoder\n"); 2064 2065 /* Workaround: clear timing override bit. */ 2066 val = I915_READ(_TRANSA_CHICKEN2); 2067 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2068 I915_WRITE(_TRANSA_CHICKEN2, val); 2069 } 2070 2071 /** 2072 * intel_enable_pipe - enable a pipe, asserting requirements 2073 * @crtc: crtc responsible for the pipe 2074 * 2075 * Enable @crtc's pipe, making sure that various hardware specific requirements 2076 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. 2077 */ 2078 static void intel_enable_pipe(struct intel_crtc *crtc) 2079 { 2080 struct drm_device *dev = crtc->base.dev; 2081 struct drm_i915_private *dev_priv = dev->dev_private; 2082 enum i915_pipe pipe = crtc->pipe; 2083 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, 2084 pipe); 2085 enum i915_pipe pch_transcoder; 2086 int reg; 2087 u32 val; 2088 2089 assert_planes_disabled(dev_priv, pipe); 2090 assert_cursor_disabled(dev_priv, pipe); 2091 assert_sprites_disabled(dev_priv, pipe); 2092 2093 if (HAS_PCH_LPT(dev_priv->dev)) 2094 pch_transcoder = TRANSCODER_A; 2095 else 2096 pch_transcoder = pipe; 2097 2098 /* 2099 * A pipe without a PLL won't actually be able to drive bits from 2100 * a plane. On ILK+ the pipe PLLs are integrated, so we don't 2101 * need the check. 2102 */ 2103 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2104 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 2105 assert_dsi_pll_enabled(dev_priv); 2106 else 2107 assert_pll_enabled(dev_priv, pipe); 2108 else { 2109 if (crtc->config->has_pch_encoder) { 2110 /* if driving the PCH, we need FDI enabled */ 2111 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); 2112 assert_fdi_tx_pll_enabled(dev_priv, 2113 (enum i915_pipe) cpu_transcoder); 2114 } 2115 /* FIXME: assert CPU port conditions for SNB+ */ 2116 } 2117 2118 reg = PIPECONF(cpu_transcoder); 2119 val = I915_READ(reg); 2120 if (val & PIPECONF_ENABLE) { 2121 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 2122 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))); 2123 return; 2124 } 2125 2126 I915_WRITE(reg, val | PIPECONF_ENABLE); 2127 POSTING_READ(reg); 2128 } 2129 2130 /** 2131 * intel_disable_pipe - disable a pipe, asserting requirements 2132 * @crtc: crtc whose pipes is to be disabled 2133 * 2134 * Disable the pipe of @crtc, making sure that various hardware 2135 * specific requirements are met, if applicable, e.g. plane 2136 * disabled, panel fitter off, etc. 2137 * 2138 * Will wait until the pipe has shut down before returning. 2139 */ 2140 static void intel_disable_pipe(struct intel_crtc *crtc) 2141 { 2142 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2143 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2144 enum i915_pipe pipe = crtc->pipe; 2145 int reg; 2146 u32 val; 2147 2148 /* 2149 * Make sure planes won't keep trying to pump pixels to us, 2150 * or we might hang the display. 2151 */ 2152 assert_planes_disabled(dev_priv, pipe); 2153 assert_cursor_disabled(dev_priv, pipe); 2154 assert_sprites_disabled(dev_priv, pipe); 2155 2156 reg = PIPECONF(cpu_transcoder); 2157 val = I915_READ(reg); 2158 if ((val & PIPECONF_ENABLE) == 0) 2159 return; 2160 2161 /* 2162 * Double wide has implications for planes 2163 * so best keep it disabled when not needed. 2164 */ 2165 if (crtc->config->double_wide) 2166 val &= ~PIPECONF_DOUBLE_WIDE; 2167 2168 /* Don't disable pipe or pipe PLLs if needed */ 2169 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) && 2170 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 2171 val &= ~PIPECONF_ENABLE; 2172 2173 I915_WRITE(reg, val); 2174 if ((val & PIPECONF_ENABLE) == 0) 2175 intel_wait_for_pipe_off(crtc); 2176 } 2177 2178 /* 2179 * Plane regs are double buffered, going from enabled->disabled needs a 2180 * trigger in order to latch. The display address reg provides this. 2181 */ 2182 void intel_flush_primary_plane(struct drm_i915_private *dev_priv, 2183 enum plane plane) 2184 { 2185 struct drm_device *dev = dev_priv->dev; 2186 u32 reg = INTEL_INFO(dev)->gen >= 4 ? DSPSURF(plane) : DSPADDR(plane); 2187 2188 I915_WRITE(reg, I915_READ(reg)); 2189 POSTING_READ(reg); 2190 } 2191 2192 /** 2193 * intel_enable_primary_hw_plane - enable the primary plane on a given pipe 2194 * @plane: plane to be enabled 2195 * @crtc: crtc for the plane 2196 * 2197 * Enable @plane on @crtc, making sure that the pipe is running first. 2198 */ 2199 static void intel_enable_primary_hw_plane(struct drm_plane *plane, 2200 struct drm_crtc *crtc) 2201 { 2202 struct drm_device *dev = plane->dev; 2203 struct drm_i915_private *dev_priv = dev->dev_private; 2204 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2205 2206 /* If the pipe isn't enabled, we can't pump pixels and may hang */ 2207 assert_pipe_enabled(dev_priv, intel_crtc->pipe); 2208 2209 if (intel_crtc->primary_enabled) 2210 return; 2211 2212 intel_crtc->primary_enabled = true; 2213 2214 dev_priv->display.update_primary_plane(crtc, plane->fb, 2215 crtc->x, crtc->y); 2216 2217 /* 2218 * BDW signals flip done immediately if the plane 2219 * is disabled, even if the plane enable is already 2220 * armed to occur at the next vblank :( 2221 */ 2222 if (IS_BROADWELL(dev)) 2223 intel_wait_for_vblank(dev, intel_crtc->pipe); 2224 } 2225 2226 /** 2227 * intel_disable_primary_hw_plane - disable the primary hardware plane 2228 * @plane: plane to be disabled 2229 * @crtc: crtc for the plane 2230 * 2231 * Disable @plane on @crtc, making sure that the pipe is running first. 2232 */ 2233 static void intel_disable_primary_hw_plane(struct drm_plane *plane, 2234 struct drm_crtc *crtc) 2235 { 2236 struct drm_device *dev = plane->dev; 2237 struct drm_i915_private *dev_priv = dev->dev_private; 2238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2239 2240 if (WARN_ON(!intel_crtc->active)) 2241 return; 2242 2243 if (!intel_crtc->primary_enabled) 2244 return; 2245 2246 intel_crtc->primary_enabled = false; 2247 2248 dev_priv->display.update_primary_plane(crtc, plane->fb, 2249 crtc->x, crtc->y); 2250 } 2251 2252 static bool need_vtd_wa(struct drm_device *dev) 2253 { 2254 #ifdef CONFIG_INTEL_IOMMU 2255 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped) 2256 return true; 2257 #endif 2258 return false; 2259 } 2260 2261 unsigned int 2262 intel_tile_height(struct drm_device *dev, uint32_t pixel_format, 2263 uint64_t fb_format_modifier) 2264 { 2265 unsigned int tile_height; 2266 uint32_t pixel_bytes; 2267 2268 switch (fb_format_modifier) { 2269 case DRM_FORMAT_MOD_NONE: 2270 tile_height = 1; 2271 break; 2272 case I915_FORMAT_MOD_X_TILED: 2273 tile_height = IS_GEN2(dev) ? 16 : 8; 2274 break; 2275 case I915_FORMAT_MOD_Y_TILED: 2276 tile_height = 32; 2277 break; 2278 case I915_FORMAT_MOD_Yf_TILED: 2279 pixel_bytes = drm_format_plane_cpp(pixel_format, 0); 2280 switch (pixel_bytes) { 2281 default: 2282 case 1: 2283 tile_height = 64; 2284 break; 2285 case 2: 2286 case 4: 2287 tile_height = 32; 2288 break; 2289 case 8: 2290 tile_height = 16; 2291 break; 2292 case 16: 2293 WARN_ONCE(1, 2294 "128-bit pixels are not supported for display!"); 2295 tile_height = 16; 2296 break; 2297 } 2298 break; 2299 default: 2300 MISSING_CASE(fb_format_modifier); 2301 tile_height = 1; 2302 break; 2303 } 2304 2305 return tile_height; 2306 } 2307 2308 unsigned int 2309 intel_fb_align_height(struct drm_device *dev, unsigned int height, 2310 uint32_t pixel_format, uint64_t fb_format_modifier) 2311 { 2312 return ALIGN(height, intel_tile_height(dev, pixel_format, 2313 fb_format_modifier)); 2314 } 2315 2316 static int 2317 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, 2318 const struct drm_plane_state *plane_state) 2319 { 2320 struct intel_rotation_info *info = &view->rotation_info; 2321 2322 *view = i915_ggtt_view_normal; 2323 2324 if (!plane_state) 2325 return 0; 2326 2327 if (!intel_rotation_90_or_270(plane_state->rotation)) 2328 return 0; 2329 2330 *view = i915_ggtt_view_rotated; 2331 2332 info->height = fb->height; 2333 info->pixel_format = fb->pixel_format; 2334 info->pitch = fb->pitches[0]; 2335 info->fb_modifier = fb->modifier[0]; 2336 2337 if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED || 2338 info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) { 2339 DRM_DEBUG_KMS( 2340 "Y or Yf tiling is needed for 90/270 rotation!\n"); 2341 return -EINVAL; 2342 } 2343 2344 return 0; 2345 } 2346 2347 int 2348 intel_pin_and_fence_fb_obj(struct drm_plane *plane, 2349 struct drm_framebuffer *fb, 2350 const struct drm_plane_state *plane_state, 2351 struct intel_engine_cs *pipelined) 2352 { 2353 struct drm_device *dev = fb->dev; 2354 struct drm_i915_private *dev_priv = dev->dev_private; 2355 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2356 struct i915_ggtt_view view; 2357 u32 alignment; 2358 int ret; 2359 2360 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 2361 2362 switch (fb->modifier[0]) { 2363 case DRM_FORMAT_MOD_NONE: 2364 if (INTEL_INFO(dev)->gen >= 9) 2365 alignment = 256 * 1024; 2366 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2367 alignment = 128 * 1024; 2368 else if (INTEL_INFO(dev)->gen >= 4) 2369 alignment = 4 * 1024; 2370 else 2371 alignment = 64 * 1024; 2372 break; 2373 case I915_FORMAT_MOD_X_TILED: 2374 if (INTEL_INFO(dev)->gen >= 9) 2375 alignment = 256 * 1024; 2376 else { 2377 /* pin() will align the object as required by fence */ 2378 alignment = 0; 2379 } 2380 break; 2381 case I915_FORMAT_MOD_Y_TILED: 2382 case I915_FORMAT_MOD_Yf_TILED: 2383 if (WARN_ONCE(INTEL_INFO(dev)->gen < 9, 2384 "Y tiling bo slipped through, driver bug!\n")) 2385 return -EINVAL; 2386 alignment = 1 * 1024 * 1024; 2387 break; 2388 default: 2389 MISSING_CASE(fb->modifier[0]); 2390 return -EINVAL; 2391 } 2392 2393 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2394 if (ret) 2395 return ret; 2396 2397 /* Note that the w/a also requires 64 PTE of padding following the 2398 * bo. We currently fill all unused PTE with the shadow page and so 2399 * we should always have valid PTE following the scanout preventing 2400 * the VT-d warning. 2401 */ 2402 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2403 alignment = 256 * 1024; 2404 2405 /* 2406 * Global gtt pte registers are special registers which actually forward 2407 * writes to a chunk of system memory. Which means that there is no risk 2408 * that the register values disappear as soon as we call 2409 * intel_runtime_pm_put(), so it is correct to wrap only the 2410 * pin/unpin/fence and not more. 2411 */ 2412 intel_runtime_pm_get(dev_priv); 2413 2414 dev_priv->mm.interruptible = false; 2415 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, 2416 &view); 2417 if (ret) 2418 goto err_interruptible; 2419 2420 /* Install a fence for tiled scan-out. Pre-i965 always needs a 2421 * fence, whereas 965+ only requires a fence if using 2422 * framebuffer compression. For simplicity, we always install 2423 * a fence as the cost is not that onerous. 2424 */ 2425 ret = i915_gem_object_get_fence(obj); 2426 if (ret) 2427 goto err_unpin; 2428 2429 i915_gem_object_pin_fence(obj); 2430 2431 dev_priv->mm.interruptible = true; 2432 intel_runtime_pm_put(dev_priv); 2433 return 0; 2434 2435 err_unpin: 2436 i915_gem_object_unpin_from_display_plane(obj, &view); 2437 err_interruptible: 2438 dev_priv->mm.interruptible = true; 2439 intel_runtime_pm_put(dev_priv); 2440 return ret; 2441 } 2442 2443 static void intel_unpin_fb_obj(struct drm_framebuffer *fb, 2444 const struct drm_plane_state *plane_state) 2445 { 2446 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2447 struct i915_ggtt_view view; 2448 int ret; 2449 2450 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); 2451 2452 ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); 2453 WARN_ONCE(ret, "Couldn't get view from plane state!"); 2454 2455 i915_gem_object_unpin_fence(obj); 2456 i915_gem_object_unpin_from_display_plane(obj, &view); 2457 } 2458 2459 /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel 2460 * is assumed to be a power-of-two. */ 2461 unsigned long intel_gen4_compute_page_offset(int *x, int *y, 2462 unsigned int tiling_mode, 2463 unsigned int cpp, 2464 unsigned int pitch) 2465 { 2466 if (tiling_mode != I915_TILING_NONE) { 2467 unsigned int tile_rows, tiles; 2468 2469 tile_rows = *y / 8; 2470 *y %= 8; 2471 2472 tiles = *x / (512/cpp); 2473 *x %= 512/cpp; 2474 2475 return tile_rows * pitch * 8 + tiles * 4096; 2476 } else { 2477 unsigned int offset; 2478 2479 offset = *y * pitch + *x * cpp; 2480 *y = 0; 2481 *x = (offset & 4095) / cpp; 2482 return offset & -4096; 2483 } 2484 } 2485 2486 static int i9xx_format_to_fourcc(int format) 2487 { 2488 switch (format) { 2489 case DISPPLANE_8BPP: 2490 return DRM_FORMAT_C8; 2491 case DISPPLANE_BGRX555: 2492 return DRM_FORMAT_XRGB1555; 2493 case DISPPLANE_BGRX565: 2494 return DRM_FORMAT_RGB565; 2495 default: 2496 case DISPPLANE_BGRX888: 2497 return DRM_FORMAT_XRGB8888; 2498 case DISPPLANE_RGBX888: 2499 return DRM_FORMAT_XBGR8888; 2500 case DISPPLANE_BGRX101010: 2501 return DRM_FORMAT_XRGB2101010; 2502 case DISPPLANE_RGBX101010: 2503 return DRM_FORMAT_XBGR2101010; 2504 } 2505 } 2506 2507 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) 2508 { 2509 switch (format) { 2510 case PLANE_CTL_FORMAT_RGB_565: 2511 return DRM_FORMAT_RGB565; 2512 default: 2513 case PLANE_CTL_FORMAT_XRGB_8888: 2514 if (rgb_order) { 2515 if (alpha) 2516 return DRM_FORMAT_ABGR8888; 2517 else 2518 return DRM_FORMAT_XBGR8888; 2519 } else { 2520 if (alpha) 2521 return DRM_FORMAT_ARGB8888; 2522 else 2523 return DRM_FORMAT_XRGB8888; 2524 } 2525 case PLANE_CTL_FORMAT_XRGB_2101010: 2526 if (rgb_order) 2527 return DRM_FORMAT_XBGR2101010; 2528 else 2529 return DRM_FORMAT_XRGB2101010; 2530 } 2531 } 2532 2533 static bool 2534 intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 2535 struct intel_initial_plane_config *plane_config) 2536 { 2537 struct drm_device *dev = crtc->base.dev; 2538 struct drm_i915_gem_object *obj = NULL; 2539 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2540 struct drm_framebuffer *fb = &plane_config->fb->base; 2541 u32 base = plane_config->base; 2542 2543 if (plane_config->size == 0) 2544 return false; 2545 2546 obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, 2547 plane_config->size); 2548 if (!obj) 2549 return false; 2550 2551 obj->tiling_mode = plane_config->tiling; 2552 if (obj->tiling_mode == I915_TILING_X) 2553 obj->stride = fb->pitches[0]; 2554 2555 mode_cmd.pixel_format = fb->pixel_format; 2556 mode_cmd.width = fb->width; 2557 mode_cmd.height = fb->height; 2558 mode_cmd.pitches[0] = fb->pitches[0]; 2559 mode_cmd.modifier[0] = fb->modifier[0]; 2560 mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 2561 2562 mutex_lock(&dev->struct_mutex); 2563 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), 2564 &mode_cmd, obj)) { 2565 DRM_DEBUG_KMS("intel fb init failed\n"); 2566 goto out_unref_obj; 2567 } 2568 mutex_unlock(&dev->struct_mutex); 2569 2570 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); 2571 return true; 2572 2573 out_unref_obj: 2574 drm_gem_object_unreference(&obj->base); 2575 mutex_unlock(&dev->struct_mutex); 2576 return false; 2577 } 2578 2579 /* Update plane->state->fb to match plane->fb after driver-internal updates */ 2580 static void 2581 update_state_fb(struct drm_plane *plane) 2582 { 2583 if (plane->fb == plane->state->fb) 2584 return; 2585 2586 if (plane->state->fb) 2587 drm_framebuffer_unreference(plane->state->fb); 2588 plane->state->fb = plane->fb; 2589 if (plane->state->fb) 2590 drm_framebuffer_reference(plane->state->fb); 2591 } 2592 2593 static void 2594 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, 2595 struct intel_initial_plane_config *plane_config) 2596 { 2597 struct drm_device *dev = intel_crtc->base.dev; 2598 struct drm_i915_private *dev_priv = dev->dev_private; 2599 struct drm_crtc *c; 2600 struct intel_crtc *i; 2601 struct drm_i915_gem_object *obj; 2602 struct drm_plane *primary = intel_crtc->base.primary; 2603 struct drm_framebuffer *fb; 2604 2605 if (!plane_config->fb) 2606 return; 2607 2608 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { 2609 fb = &plane_config->fb->base; 2610 goto valid_fb; 2611 } 2612 2613 kfree(plane_config->fb); 2614 2615 /* 2616 * Failed to alloc the obj, check to see if we should share 2617 * an fb with another CRTC instead 2618 */ 2619 for_each_crtc(dev, c) { 2620 i = to_intel_crtc(c); 2621 2622 if (c == &intel_crtc->base) 2623 continue; 2624 2625 if (!i->active) 2626 continue; 2627 2628 fb = c->primary->fb; 2629 if (!fb) 2630 continue; 2631 2632 obj = intel_fb_obj(fb); 2633 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2634 drm_framebuffer_reference(fb); 2635 goto valid_fb; 2636 } 2637 } 2638 2639 return; 2640 2641 valid_fb: 2642 obj = intel_fb_obj(fb); 2643 if (obj->tiling_mode != I915_TILING_NONE) 2644 dev_priv->preserve_bios_swizzle = true; 2645 2646 primary->fb = fb; 2647 primary->state->crtc = &intel_crtc->base; 2648 primary->crtc = &intel_crtc->base; 2649 update_state_fb(primary); 2650 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2651 } 2652 2653 static void i9xx_update_primary_plane(struct drm_crtc *crtc, 2654 struct drm_framebuffer *fb, 2655 int x, int y) 2656 { 2657 struct drm_device *dev = crtc->dev; 2658 struct drm_i915_private *dev_priv = dev->dev_private; 2659 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2660 struct drm_i915_gem_object *obj; 2661 int plane = intel_crtc->plane; 2662 unsigned long linear_offset; 2663 u32 dspcntr; 2664 u32 reg = DSPCNTR(plane); 2665 int pixel_size; 2666 2667 if (!intel_crtc->primary_enabled) { 2668 I915_WRITE(reg, 0); 2669 if (INTEL_INFO(dev)->gen >= 4) 2670 I915_WRITE(DSPSURF(plane), 0); 2671 else 2672 I915_WRITE(DSPADDR(plane), 0); 2673 POSTING_READ(reg); 2674 return; 2675 } 2676 2677 obj = intel_fb_obj(fb); 2678 if (WARN_ON(obj == NULL)) 2679 return; 2680 2681 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2682 2683 dspcntr = DISPPLANE_GAMMA_ENABLE; 2684 2685 dspcntr |= DISPLAY_PLANE_ENABLE; 2686 2687 if (INTEL_INFO(dev)->gen < 4) { 2688 if (intel_crtc->pipe == PIPE_B) 2689 dspcntr |= DISPPLANE_SEL_PIPE_B; 2690 2691 /* pipesrc and dspsize control the size that is scaled from, 2692 * which should always be the user's requested size. 2693 */ 2694 I915_WRITE(DSPSIZE(plane), 2695 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2696 (intel_crtc->config->pipe_src_w - 1)); 2697 I915_WRITE(DSPPOS(plane), 0); 2698 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) { 2699 I915_WRITE(PRIMSIZE(plane), 2700 ((intel_crtc->config->pipe_src_h - 1) << 16) | 2701 (intel_crtc->config->pipe_src_w - 1)); 2702 I915_WRITE(PRIMPOS(plane), 0); 2703 I915_WRITE(PRIMCNSTALPHA(plane), 0); 2704 } 2705 2706 switch (fb->pixel_format) { 2707 case DRM_FORMAT_C8: 2708 dspcntr |= DISPPLANE_8BPP; 2709 break; 2710 case DRM_FORMAT_XRGB1555: 2711 case DRM_FORMAT_ARGB1555: 2712 dspcntr |= DISPPLANE_BGRX555; 2713 break; 2714 case DRM_FORMAT_RGB565: 2715 dspcntr |= DISPPLANE_BGRX565; 2716 break; 2717 case DRM_FORMAT_XRGB8888: 2718 case DRM_FORMAT_ARGB8888: 2719 dspcntr |= DISPPLANE_BGRX888; 2720 break; 2721 case DRM_FORMAT_XBGR8888: 2722 case DRM_FORMAT_ABGR8888: 2723 dspcntr |= DISPPLANE_RGBX888; 2724 break; 2725 case DRM_FORMAT_XRGB2101010: 2726 case DRM_FORMAT_ARGB2101010: 2727 dspcntr |= DISPPLANE_BGRX101010; 2728 break; 2729 case DRM_FORMAT_XBGR2101010: 2730 case DRM_FORMAT_ABGR2101010: 2731 dspcntr |= DISPPLANE_RGBX101010; 2732 break; 2733 default: 2734 BUG(); 2735 } 2736 2737 if (INTEL_INFO(dev)->gen >= 4 && 2738 obj->tiling_mode != I915_TILING_NONE) 2739 dspcntr |= DISPPLANE_TILED; 2740 2741 if (IS_G4X(dev)) 2742 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2743 2744 linear_offset = y * fb->pitches[0] + x * pixel_size; 2745 2746 if (INTEL_INFO(dev)->gen >= 4) { 2747 intel_crtc->dspaddr_offset = 2748 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2749 pixel_size, 2750 fb->pitches[0]); 2751 linear_offset -= intel_crtc->dspaddr_offset; 2752 } else { 2753 intel_crtc->dspaddr_offset = linear_offset; 2754 } 2755 2756 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2757 dspcntr |= DISPPLANE_ROTATE_180; 2758 2759 x += (intel_crtc->config->pipe_src_w - 1); 2760 y += (intel_crtc->config->pipe_src_h - 1); 2761 2762 /* Finding the last pixel of the last line of the display 2763 data and adding to linear_offset*/ 2764 linear_offset += 2765 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2766 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2767 } 2768 2769 I915_WRITE(reg, dspcntr); 2770 2771 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2772 if (INTEL_INFO(dev)->gen >= 4) { 2773 I915_WRITE(DSPSURF(plane), 2774 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2775 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2776 I915_WRITE(DSPLINOFF(plane), linear_offset); 2777 } else 2778 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset); 2779 POSTING_READ(reg); 2780 } 2781 2782 static void ironlake_update_primary_plane(struct drm_crtc *crtc, 2783 struct drm_framebuffer *fb, 2784 int x, int y) 2785 { 2786 struct drm_device *dev = crtc->dev; 2787 struct drm_i915_private *dev_priv = dev->dev_private; 2788 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2789 struct drm_i915_gem_object *obj; 2790 int plane = intel_crtc->plane; 2791 unsigned long linear_offset; 2792 u32 dspcntr; 2793 u32 reg = DSPCNTR(plane); 2794 int pixel_size; 2795 2796 if (!intel_crtc->primary_enabled) { 2797 I915_WRITE(reg, 0); 2798 I915_WRITE(DSPSURF(plane), 0); 2799 POSTING_READ(reg); 2800 return; 2801 } 2802 2803 obj = intel_fb_obj(fb); 2804 if (WARN_ON(obj == NULL)) 2805 return; 2806 2807 pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 2808 2809 dspcntr = DISPPLANE_GAMMA_ENABLE; 2810 2811 dspcntr |= DISPLAY_PLANE_ENABLE; 2812 2813 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2814 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 2815 2816 switch (fb->pixel_format) { 2817 case DRM_FORMAT_C8: 2818 dspcntr |= DISPPLANE_8BPP; 2819 break; 2820 case DRM_FORMAT_RGB565: 2821 dspcntr |= DISPPLANE_BGRX565; 2822 break; 2823 case DRM_FORMAT_XRGB8888: 2824 case DRM_FORMAT_ARGB8888: 2825 dspcntr |= DISPPLANE_BGRX888; 2826 break; 2827 case DRM_FORMAT_XBGR8888: 2828 case DRM_FORMAT_ABGR8888: 2829 dspcntr |= DISPPLANE_RGBX888; 2830 break; 2831 case DRM_FORMAT_XRGB2101010: 2832 case DRM_FORMAT_ARGB2101010: 2833 dspcntr |= DISPPLANE_BGRX101010; 2834 break; 2835 case DRM_FORMAT_XBGR2101010: 2836 case DRM_FORMAT_ABGR2101010: 2837 dspcntr |= DISPPLANE_RGBX101010; 2838 break; 2839 default: 2840 BUG(); 2841 } 2842 2843 if (obj->tiling_mode != I915_TILING_NONE) 2844 dspcntr |= DISPPLANE_TILED; 2845 2846 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) 2847 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2848 2849 linear_offset = y * fb->pitches[0] + x * pixel_size; 2850 intel_crtc->dspaddr_offset = 2851 intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, 2852 pixel_size, 2853 fb->pitches[0]); 2854 linear_offset -= intel_crtc->dspaddr_offset; 2855 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) { 2856 dspcntr |= DISPPLANE_ROTATE_180; 2857 2858 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) { 2859 x += (intel_crtc->config->pipe_src_w - 1); 2860 y += (intel_crtc->config->pipe_src_h - 1); 2861 2862 /* Finding the last pixel of the last line of the display 2863 data and adding to linear_offset*/ 2864 linear_offset += 2865 (intel_crtc->config->pipe_src_h - 1) * fb->pitches[0] + 2866 (intel_crtc->config->pipe_src_w - 1) * pixel_size; 2867 } 2868 } 2869 2870 I915_WRITE(reg, dspcntr); 2871 2872 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 2873 I915_WRITE(DSPSURF(plane), 2874 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); 2875 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2876 I915_WRITE(DSPOFFSET(plane), (y << 16) | x); 2877 } else { 2878 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 2879 I915_WRITE(DSPLINOFF(plane), linear_offset); 2880 } 2881 POSTING_READ(reg); 2882 } 2883 2884 u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, 2885 uint32_t pixel_format) 2886 { 2887 u32 bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; 2888 2889 /* 2890 * The stride is either expressed as a multiple of 64 bytes 2891 * chunks for linear buffers or in number of tiles for tiled 2892 * buffers. 2893 */ 2894 switch (fb_modifier) { 2895 case DRM_FORMAT_MOD_NONE: 2896 return 64; 2897 case I915_FORMAT_MOD_X_TILED: 2898 if (INTEL_INFO(dev)->gen == 2) 2899 return 128; 2900 return 512; 2901 case I915_FORMAT_MOD_Y_TILED: 2902 /* No need to check for old gens and Y tiling since this is 2903 * about the display engine and those will be blocked before 2904 * we get here. 2905 */ 2906 return 128; 2907 case I915_FORMAT_MOD_Yf_TILED: 2908 if (bits_per_pixel == 8) 2909 return 64; 2910 else 2911 return 128; 2912 default: 2913 MISSING_CASE(fb_modifier); 2914 return 64; 2915 } 2916 } 2917 2918 unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, 2919 struct drm_i915_gem_object *obj) 2920 { 2921 const struct i915_ggtt_view *view = &i915_ggtt_view_normal; 2922 2923 if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) 2924 view = &i915_ggtt_view_rotated; 2925 2926 return i915_gem_obj_ggtt_offset_view(obj, view); 2927 } 2928 2929 static void skylake_update_primary_plane(struct drm_crtc *crtc, 2930 struct drm_framebuffer *fb, 2931 int x, int y) 2932 { 2933 struct drm_device *dev = crtc->dev; 2934 struct drm_i915_private *dev_priv = dev->dev_private; 2935 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2936 struct drm_i915_gem_object *obj; 2937 int pipe = intel_crtc->pipe; 2938 u32 plane_ctl, stride_div; 2939 unsigned long surf_addr; 2940 2941 if (!intel_crtc->primary_enabled) { 2942 I915_WRITE(PLANE_CTL(pipe, 0), 0); 2943 I915_WRITE(PLANE_SURF(pipe, 0), 0); 2944 POSTING_READ(PLANE_CTL(pipe, 0)); 2945 return; 2946 } 2947 2948 plane_ctl = PLANE_CTL_ENABLE | 2949 PLANE_CTL_PIPE_GAMMA_ENABLE | 2950 PLANE_CTL_PIPE_CSC_ENABLE; 2951 2952 switch (fb->pixel_format) { 2953 case DRM_FORMAT_RGB565: 2954 plane_ctl |= PLANE_CTL_FORMAT_RGB_565; 2955 break; 2956 case DRM_FORMAT_XRGB8888: 2957 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2958 break; 2959 case DRM_FORMAT_ARGB8888: 2960 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2961 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2962 break; 2963 case DRM_FORMAT_XBGR8888: 2964 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2965 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2966 break; 2967 case DRM_FORMAT_ABGR8888: 2968 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2969 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; 2970 plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; 2971 break; 2972 case DRM_FORMAT_XRGB2101010: 2973 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2974 break; 2975 case DRM_FORMAT_XBGR2101010: 2976 plane_ctl |= PLANE_CTL_ORDER_RGBX; 2977 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; 2978 break; 2979 default: 2980 BUG(); 2981 } 2982 2983 switch (fb->modifier[0]) { 2984 case DRM_FORMAT_MOD_NONE: 2985 break; 2986 case I915_FORMAT_MOD_X_TILED: 2987 plane_ctl |= PLANE_CTL_TILED_X; 2988 break; 2989 case I915_FORMAT_MOD_Y_TILED: 2990 plane_ctl |= PLANE_CTL_TILED_Y; 2991 break; 2992 case I915_FORMAT_MOD_Yf_TILED: 2993 plane_ctl |= PLANE_CTL_TILED_YF; 2994 break; 2995 default: 2996 MISSING_CASE(fb->modifier[0]); 2997 } 2998 2999 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; 3000 if (crtc->primary->state->rotation == BIT(DRM_ROTATE_180)) 3001 plane_ctl |= PLANE_CTL_ROTATE_180; 3002 3003 obj = intel_fb_obj(fb); 3004 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 3005 fb->pixel_format); 3006 surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj); 3007 3008 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); 3009 I915_WRITE(PLANE_POS(pipe, 0), 0); 3010 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); 3011 I915_WRITE(PLANE_SIZE(pipe, 0), 3012 (intel_crtc->config->pipe_src_h - 1) << 16 | 3013 (intel_crtc->config->pipe_src_w - 1)); 3014 I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div); 3015 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); 3016 3017 POSTING_READ(PLANE_SURF(pipe, 0)); 3018 } 3019 3020 /* Assume fb object is pinned & idle & fenced and just update base pointers */ 3021 static int 3022 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 3023 int x, int y, enum mode_set_atomic state) 3024 { 3025 struct drm_device *dev = crtc->dev; 3026 struct drm_i915_private *dev_priv = dev->dev_private; 3027 3028 if (dev_priv->display.disable_fbc) 3029 dev_priv->display.disable_fbc(dev); 3030 3031 dev_priv->display.update_primary_plane(crtc, fb, x, y); 3032 3033 return 0; 3034 } 3035 3036 static void intel_complete_page_flips(struct drm_device *dev) 3037 { 3038 struct drm_crtc *crtc; 3039 3040 for_each_crtc(dev, crtc) { 3041 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3042 enum plane plane = intel_crtc->plane; 3043 3044 intel_prepare_page_flip(dev, plane); 3045 intel_finish_page_flip_plane(dev, plane); 3046 } 3047 } 3048 3049 static void intel_update_primary_planes(struct drm_device *dev) 3050 { 3051 struct drm_i915_private *dev_priv = dev->dev_private; 3052 struct drm_crtc *crtc; 3053 3054 for_each_crtc(dev, crtc) { 3055 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3056 3057 drm_modeset_lock(&crtc->mutex, NULL); 3058 /* 3059 * FIXME: Once we have proper support for primary planes (and 3060 * disabling them without disabling the entire crtc) allow again 3061 * a NULL crtc->primary->fb. 3062 */ 3063 if (intel_crtc->active && crtc->primary->fb) 3064 dev_priv->display.update_primary_plane(crtc, 3065 crtc->primary->fb, 3066 crtc->x, 3067 crtc->y); 3068 drm_modeset_unlock(&crtc->mutex); 3069 } 3070 } 3071 3072 void intel_prepare_reset(struct drm_device *dev) 3073 { 3074 struct drm_i915_private *dev_priv = to_i915(dev); 3075 struct intel_crtc *crtc; 3076 3077 /* no reset support for gen2 */ 3078 if (IS_GEN2(dev)) 3079 return; 3080 3081 /* reset doesn't touch the display */ 3082 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3083 return; 3084 3085 drm_modeset_lock_all(dev); 3086 3087 /* 3088 * Disabling the crtcs gracefully seems nicer. Also the 3089 * g33 docs say we should at least disable all the planes. 3090 */ 3091 for_each_intel_crtc(dev, crtc) { 3092 if (crtc->active) 3093 dev_priv->display.crtc_disable(&crtc->base); 3094 } 3095 } 3096 3097 void intel_finish_reset(struct drm_device *dev) 3098 { 3099 struct drm_i915_private *dev_priv = to_i915(dev); 3100 3101 /* 3102 * Flips in the rings will be nuked by the reset, 3103 * so complete all pending flips so that user space 3104 * will get its events and not get stuck. 3105 */ 3106 intel_complete_page_flips(dev); 3107 3108 /* no reset support for gen2 */ 3109 if (IS_GEN2(dev)) 3110 return; 3111 3112 /* reset doesn't touch the display */ 3113 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3114 /* 3115 * Flips in the rings have been nuked by the reset, 3116 * so update the base address of all primary 3117 * planes to the the last fb to make sure we're 3118 * showing the correct fb after a reset. 3119 */ 3120 intel_update_primary_planes(dev); 3121 return; 3122 } 3123 3124 /* 3125 * The display has been reset as well, 3126 * so need a full re-initialization. 3127 */ 3128 intel_runtime_pm_disable_interrupts(dev_priv); 3129 intel_runtime_pm_enable_interrupts(dev_priv); 3130 3131 intel_modeset_init_hw(dev); 3132 3133 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3134 if (dev_priv->display.hpd_irq_setup) 3135 dev_priv->display.hpd_irq_setup(dev); 3136 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3137 3138 intel_modeset_setup_hw_state(dev, true); 3139 3140 intel_hpd_init(dev_priv); 3141 3142 drm_modeset_unlock_all(dev); 3143 } 3144 3145 static int 3146 intel_finish_fb(struct drm_framebuffer *old_fb) 3147 { 3148 struct drm_i915_gem_object *obj = intel_fb_obj(old_fb); 3149 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3150 bool was_interruptible = dev_priv->mm.interruptible; 3151 int ret; 3152 3153 /* Big Hammer, we also need to ensure that any pending 3154 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 3155 * current scanout is retired before unpinning the old 3156 * framebuffer. 3157 * 3158 * This should only fail upon a hung GPU, in which case we 3159 * can safely continue. 3160 */ 3161 dev_priv->mm.interruptible = false; 3162 ret = i915_gem_object_finish_gpu(obj); 3163 dev_priv->mm.interruptible = was_interruptible; 3164 3165 return ret; 3166 } 3167 3168 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3169 { 3170 struct drm_device *dev = crtc->dev; 3171 struct drm_i915_private *dev_priv = dev->dev_private; 3172 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3173 bool pending; 3174 3175 if (i915_reset_in_progress(&dev_priv->gpu_error) || 3176 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 3177 return false; 3178 3179 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3180 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3181 lockmgr(&dev->event_lock, LK_RELEASE); 3182 3183 return pending; 3184 } 3185 3186 static void intel_update_pipe_size(struct intel_crtc *crtc) 3187 { 3188 struct drm_device *dev = crtc->base.dev; 3189 struct drm_i915_private *dev_priv = dev->dev_private; 3190 const struct drm_display_mode *adjusted_mode; 3191 3192 if (!i915.fastboot) 3193 return; 3194 3195 /* 3196 * Update pipe size and adjust fitter if needed: the reason for this is 3197 * that in compute_mode_changes we check the native mode (not the pfit 3198 * mode) to see if we can flip rather than do a full mode set. In the 3199 * fastboot case, we'll flip, but if we don't update the pipesrc and 3200 * pfit state, we'll end up with a big fb scanned out into the wrong 3201 * sized surface. 3202 * 3203 * To fix this properly, we need to hoist the checks up into 3204 * compute_mode_changes (or above), check the actual pfit state and 3205 * whether the platform allows pfit disable with pipe active, and only 3206 * then update the pipesrc and pfit state, even on the flip path. 3207 */ 3208 3209 adjusted_mode = &crtc->config->base.adjusted_mode; 3210 3211 I915_WRITE(PIPESRC(crtc->pipe), 3212 ((adjusted_mode->crtc_hdisplay - 1) << 16) | 3213 (adjusted_mode->crtc_vdisplay - 1)); 3214 if (!crtc->config->pch_pfit.enabled && 3215 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || 3216 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 3217 I915_WRITE(PF_CTL(crtc->pipe), 0); 3218 I915_WRITE(PF_WIN_POS(crtc->pipe), 0); 3219 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0); 3220 } 3221 crtc->config->pipe_src_w = adjusted_mode->crtc_hdisplay; 3222 crtc->config->pipe_src_h = adjusted_mode->crtc_vdisplay; 3223 } 3224 3225 static void intel_fdi_normal_train(struct drm_crtc *crtc) 3226 { 3227 struct drm_device *dev = crtc->dev; 3228 struct drm_i915_private *dev_priv = dev->dev_private; 3229 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3230 int pipe = intel_crtc->pipe; 3231 u32 reg, temp; 3232 3233 /* enable normal train */ 3234 reg = FDI_TX_CTL(pipe); 3235 temp = I915_READ(reg); 3236 if (IS_IVYBRIDGE(dev)) { 3237 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3238 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; 3239 } else { 3240 temp &= ~FDI_LINK_TRAIN_NONE; 3241 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; 3242 } 3243 I915_WRITE(reg, temp); 3244 3245 reg = FDI_RX_CTL(pipe); 3246 temp = I915_READ(reg); 3247 if (HAS_PCH_CPT(dev)) { 3248 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3249 temp |= FDI_LINK_TRAIN_NORMAL_CPT; 3250 } else { 3251 temp &= ~FDI_LINK_TRAIN_NONE; 3252 temp |= FDI_LINK_TRAIN_NONE; 3253 } 3254 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); 3255 3256 /* wait one idle pattern time */ 3257 POSTING_READ(reg); 3258 udelay(1000); 3259 3260 /* IVB wants error correction enabled */ 3261 if (IS_IVYBRIDGE(dev)) 3262 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | 3263 FDI_FE_ERRC_ENABLE); 3264 } 3265 3266 /* The FDI link training functions for ILK/Ibexpeak. */ 3267 static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3268 { 3269 struct drm_device *dev = crtc->dev; 3270 struct drm_i915_private *dev_priv = dev->dev_private; 3271 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3272 int pipe = intel_crtc->pipe; 3273 u32 reg, temp, tries; 3274 3275 /* FDI needs bits from pipe first */ 3276 assert_pipe_enabled(dev_priv, pipe); 3277 3278 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3279 for train result */ 3280 reg = FDI_RX_IMR(pipe); 3281 temp = I915_READ(reg); 3282 temp &= ~FDI_RX_SYMBOL_LOCK; 3283 temp &= ~FDI_RX_BIT_LOCK; 3284 I915_WRITE(reg, temp); 3285 I915_READ(reg); 3286 udelay(150); 3287 3288 /* enable CPU FDI TX and PCH FDI RX */ 3289 reg = FDI_TX_CTL(pipe); 3290 temp = I915_READ(reg); 3291 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3292 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3293 temp &= ~FDI_LINK_TRAIN_NONE; 3294 temp |= FDI_LINK_TRAIN_PATTERN_1; 3295 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3296 3297 reg = FDI_RX_CTL(pipe); 3298 temp = I915_READ(reg); 3299 temp &= ~FDI_LINK_TRAIN_NONE; 3300 temp |= FDI_LINK_TRAIN_PATTERN_1; 3301 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3302 3303 POSTING_READ(reg); 3304 udelay(150); 3305 3306 /* Ironlake workaround, enable clock pointer after FDI enable*/ 3307 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3308 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | 3309 FDI_RX_PHASE_SYNC_POINTER_EN); 3310 3311 reg = FDI_RX_IIR(pipe); 3312 for (tries = 0; tries < 5; tries++) { 3313 temp = I915_READ(reg); 3314 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3315 3316 if ((temp & FDI_RX_BIT_LOCK)) { 3317 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3318 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3319 break; 3320 } 3321 } 3322 if (tries == 5) 3323 DRM_ERROR("FDI train 1 fail!\n"); 3324 3325 /* Train 2 */ 3326 reg = FDI_TX_CTL(pipe); 3327 temp = I915_READ(reg); 3328 temp &= ~FDI_LINK_TRAIN_NONE; 3329 temp |= FDI_LINK_TRAIN_PATTERN_2; 3330 I915_WRITE(reg, temp); 3331 3332 reg = FDI_RX_CTL(pipe); 3333 temp = I915_READ(reg); 3334 temp &= ~FDI_LINK_TRAIN_NONE; 3335 temp |= FDI_LINK_TRAIN_PATTERN_2; 3336 I915_WRITE(reg, temp); 3337 3338 POSTING_READ(reg); 3339 udelay(150); 3340 3341 reg = FDI_RX_IIR(pipe); 3342 for (tries = 0; tries < 5; tries++) { 3343 temp = I915_READ(reg); 3344 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3345 3346 if (temp & FDI_RX_SYMBOL_LOCK) { 3347 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3348 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3349 break; 3350 } 3351 } 3352 if (tries == 5) 3353 DRM_ERROR("FDI train 2 fail!\n"); 3354 3355 DRM_DEBUG_KMS("FDI train done\n"); 3356 3357 } 3358 3359 static const int snb_b_fdi_train_param[] = { 3360 FDI_LINK_TRAIN_400MV_0DB_SNB_B, 3361 FDI_LINK_TRAIN_400MV_6DB_SNB_B, 3362 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, 3363 FDI_LINK_TRAIN_800MV_0DB_SNB_B, 3364 }; 3365 3366 /* The FDI link training functions for SNB/Cougarpoint. */ 3367 static void gen6_fdi_link_train(struct drm_crtc *crtc) 3368 { 3369 struct drm_device *dev = crtc->dev; 3370 struct drm_i915_private *dev_priv = dev->dev_private; 3371 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3372 int pipe = intel_crtc->pipe; 3373 u32 reg, temp, i, retry; 3374 3375 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3376 for train result */ 3377 reg = FDI_RX_IMR(pipe); 3378 temp = I915_READ(reg); 3379 temp &= ~FDI_RX_SYMBOL_LOCK; 3380 temp &= ~FDI_RX_BIT_LOCK; 3381 I915_WRITE(reg, temp); 3382 3383 POSTING_READ(reg); 3384 udelay(150); 3385 3386 /* enable CPU FDI TX and PCH FDI RX */ 3387 reg = FDI_TX_CTL(pipe); 3388 temp = I915_READ(reg); 3389 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3390 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3391 temp &= ~FDI_LINK_TRAIN_NONE; 3392 temp |= FDI_LINK_TRAIN_PATTERN_1; 3393 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3394 /* SNB-B */ 3395 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3396 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3397 3398 I915_WRITE(FDI_RX_MISC(pipe), 3399 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3400 3401 reg = FDI_RX_CTL(pipe); 3402 temp = I915_READ(reg); 3403 if (HAS_PCH_CPT(dev)) { 3404 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3405 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3406 } else { 3407 temp &= ~FDI_LINK_TRAIN_NONE; 3408 temp |= FDI_LINK_TRAIN_PATTERN_1; 3409 } 3410 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3411 3412 POSTING_READ(reg); 3413 udelay(150); 3414 3415 for (i = 0; i < 4; i++) { 3416 reg = FDI_TX_CTL(pipe); 3417 temp = I915_READ(reg); 3418 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3419 temp |= snb_b_fdi_train_param[i]; 3420 I915_WRITE(reg, temp); 3421 3422 POSTING_READ(reg); 3423 udelay(500); 3424 3425 for (retry = 0; retry < 5; retry++) { 3426 reg = FDI_RX_IIR(pipe); 3427 temp = I915_READ(reg); 3428 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3429 if (temp & FDI_RX_BIT_LOCK) { 3430 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3431 DRM_DEBUG_KMS("FDI train 1 done.\n"); 3432 break; 3433 } 3434 udelay(50); 3435 } 3436 if (retry < 5) 3437 break; 3438 } 3439 if (i == 4) 3440 DRM_ERROR("FDI train 1 fail!\n"); 3441 3442 /* Train 2 */ 3443 reg = FDI_TX_CTL(pipe); 3444 temp = I915_READ(reg); 3445 temp &= ~FDI_LINK_TRAIN_NONE; 3446 temp |= FDI_LINK_TRAIN_PATTERN_2; 3447 if (IS_GEN6(dev)) { 3448 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3449 /* SNB-B */ 3450 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; 3451 } 3452 I915_WRITE(reg, temp); 3453 3454 reg = FDI_RX_CTL(pipe); 3455 temp = I915_READ(reg); 3456 if (HAS_PCH_CPT(dev)) { 3457 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3458 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3459 } else { 3460 temp &= ~FDI_LINK_TRAIN_NONE; 3461 temp |= FDI_LINK_TRAIN_PATTERN_2; 3462 } 3463 I915_WRITE(reg, temp); 3464 3465 POSTING_READ(reg); 3466 udelay(150); 3467 3468 for (i = 0; i < 4; i++) { 3469 reg = FDI_TX_CTL(pipe); 3470 temp = I915_READ(reg); 3471 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3472 temp |= snb_b_fdi_train_param[i]; 3473 I915_WRITE(reg, temp); 3474 3475 POSTING_READ(reg); 3476 udelay(500); 3477 3478 for (retry = 0; retry < 5; retry++) { 3479 reg = FDI_RX_IIR(pipe); 3480 temp = I915_READ(reg); 3481 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3482 if (temp & FDI_RX_SYMBOL_LOCK) { 3483 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3484 DRM_DEBUG_KMS("FDI train 2 done.\n"); 3485 break; 3486 } 3487 udelay(50); 3488 } 3489 if (retry < 5) 3490 break; 3491 } 3492 if (i == 4) 3493 DRM_ERROR("FDI train 2 fail!\n"); 3494 3495 DRM_DEBUG_KMS("FDI train done.\n"); 3496 } 3497 3498 /* Manual link training for Ivy Bridge A0 parts */ 3499 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3500 { 3501 struct drm_device *dev = crtc->dev; 3502 struct drm_i915_private *dev_priv = dev->dev_private; 3503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3504 int pipe = intel_crtc->pipe; 3505 u32 reg, temp, i, j; 3506 3507 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit 3508 for train result */ 3509 reg = FDI_RX_IMR(pipe); 3510 temp = I915_READ(reg); 3511 temp &= ~FDI_RX_SYMBOL_LOCK; 3512 temp &= ~FDI_RX_BIT_LOCK; 3513 I915_WRITE(reg, temp); 3514 3515 POSTING_READ(reg); 3516 udelay(150); 3517 3518 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n", 3519 I915_READ(FDI_RX_IIR(pipe))); 3520 3521 /* Try each vswing and preemphasis setting twice before moving on */ 3522 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) { 3523 /* disable first in case we need to retry */ 3524 reg = FDI_TX_CTL(pipe); 3525 temp = I915_READ(reg); 3526 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); 3527 temp &= ~FDI_TX_ENABLE; 3528 I915_WRITE(reg, temp); 3529 3530 reg = FDI_RX_CTL(pipe); 3531 temp = I915_READ(reg); 3532 temp &= ~FDI_LINK_TRAIN_AUTO; 3533 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3534 temp &= ~FDI_RX_ENABLE; 3535 I915_WRITE(reg, temp); 3536 3537 /* enable CPU FDI TX and PCH FDI RX */ 3538 reg = FDI_TX_CTL(pipe); 3539 temp = I915_READ(reg); 3540 temp &= ~FDI_DP_PORT_WIDTH_MASK; 3541 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3542 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; 3543 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; 3544 temp |= snb_b_fdi_train_param[j/2]; 3545 temp |= FDI_COMPOSITE_SYNC; 3546 I915_WRITE(reg, temp | FDI_TX_ENABLE); 3547 3548 I915_WRITE(FDI_RX_MISC(pipe), 3549 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 3550 3551 reg = FDI_RX_CTL(pipe); 3552 temp = I915_READ(reg); 3553 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3554 temp |= FDI_COMPOSITE_SYNC; 3555 I915_WRITE(reg, temp | FDI_RX_ENABLE); 3556 3557 POSTING_READ(reg); 3558 udelay(1); /* should be 0.5us */ 3559 3560 for (i = 0; i < 4; i++) { 3561 reg = FDI_RX_IIR(pipe); 3562 temp = I915_READ(reg); 3563 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3564 3565 if (temp & FDI_RX_BIT_LOCK || 3566 (I915_READ(reg) & FDI_RX_BIT_LOCK)) { 3567 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); 3568 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", 3569 i); 3570 break; 3571 } 3572 udelay(1); /* should be 0.5us */ 3573 } 3574 if (i == 4) { 3575 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2); 3576 continue; 3577 } 3578 3579 /* Train 2 */ 3580 reg = FDI_TX_CTL(pipe); 3581 temp = I915_READ(reg); 3582 temp &= ~FDI_LINK_TRAIN_NONE_IVB; 3583 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; 3584 I915_WRITE(reg, temp); 3585 3586 reg = FDI_RX_CTL(pipe); 3587 temp = I915_READ(reg); 3588 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3589 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; 3590 I915_WRITE(reg, temp); 3591 3592 POSTING_READ(reg); 3593 udelay(2); /* should be 1.5us */ 3594 3595 for (i = 0; i < 4; i++) { 3596 reg = FDI_RX_IIR(pipe); 3597 temp = I915_READ(reg); 3598 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); 3599 3600 if (temp & FDI_RX_SYMBOL_LOCK || 3601 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) { 3602 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); 3603 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", 3604 i); 3605 goto train_done; 3606 } 3607 udelay(2); /* should be 1.5us */ 3608 } 3609 if (i == 4) 3610 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2); 3611 } 3612 3613 train_done: 3614 DRM_DEBUG_KMS("FDI train done.\n"); 3615 } 3616 3617 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3618 { 3619 struct drm_device *dev = intel_crtc->base.dev; 3620 struct drm_i915_private *dev_priv = dev->dev_private; 3621 int pipe = intel_crtc->pipe; 3622 u32 reg, temp; 3623 3624 3625 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 3626 reg = FDI_RX_CTL(pipe); 3627 temp = I915_READ(reg); 3628 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16)); 3629 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 3630 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3631 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); 3632 3633 POSTING_READ(reg); 3634 udelay(200); 3635 3636 /* Switch from Rawclk to PCDclk */ 3637 temp = I915_READ(reg); 3638 I915_WRITE(reg, temp | FDI_PCDCLK); 3639 3640 POSTING_READ(reg); 3641 udelay(200); 3642 3643 /* Enable CPU FDI TX PLL, always on for Ironlake */ 3644 reg = FDI_TX_CTL(pipe); 3645 temp = I915_READ(reg); 3646 if ((temp & FDI_TX_PLL_ENABLE) == 0) { 3647 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); 3648 3649 POSTING_READ(reg); 3650 udelay(100); 3651 } 3652 } 3653 3654 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3655 { 3656 struct drm_device *dev = intel_crtc->base.dev; 3657 struct drm_i915_private *dev_priv = dev->dev_private; 3658 int pipe = intel_crtc->pipe; 3659 u32 reg, temp; 3660 3661 /* Switch from PCDclk to Rawclk */ 3662 reg = FDI_RX_CTL(pipe); 3663 temp = I915_READ(reg); 3664 I915_WRITE(reg, temp & ~FDI_PCDCLK); 3665 3666 /* Disable CPU FDI TX PLL */ 3667 reg = FDI_TX_CTL(pipe); 3668 temp = I915_READ(reg); 3669 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); 3670 3671 POSTING_READ(reg); 3672 udelay(100); 3673 3674 reg = FDI_RX_CTL(pipe); 3675 temp = I915_READ(reg); 3676 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); 3677 3678 /* Wait for the clocks to turn off. */ 3679 POSTING_READ(reg); 3680 udelay(100); 3681 } 3682 3683 static void ironlake_fdi_disable(struct drm_crtc *crtc) 3684 { 3685 struct drm_device *dev = crtc->dev; 3686 struct drm_i915_private *dev_priv = dev->dev_private; 3687 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3688 int pipe = intel_crtc->pipe; 3689 u32 reg, temp; 3690 3691 /* disable CPU FDI tx and PCH FDI rx */ 3692 reg = FDI_TX_CTL(pipe); 3693 temp = I915_READ(reg); 3694 I915_WRITE(reg, temp & ~FDI_TX_ENABLE); 3695 POSTING_READ(reg); 3696 3697 reg = FDI_RX_CTL(pipe); 3698 temp = I915_READ(reg); 3699 temp &= ~(0x7 << 16); 3700 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3701 I915_WRITE(reg, temp & ~FDI_RX_ENABLE); 3702 3703 POSTING_READ(reg); 3704 udelay(100); 3705 3706 /* Ironlake workaround, disable clock pointer after downing FDI */ 3707 if (HAS_PCH_IBX(dev)) 3708 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); 3709 3710 /* still set train pattern 1 */ 3711 reg = FDI_TX_CTL(pipe); 3712 temp = I915_READ(reg); 3713 temp &= ~FDI_LINK_TRAIN_NONE; 3714 temp |= FDI_LINK_TRAIN_PATTERN_1; 3715 I915_WRITE(reg, temp); 3716 3717 reg = FDI_RX_CTL(pipe); 3718 temp = I915_READ(reg); 3719 if (HAS_PCH_CPT(dev)) { 3720 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; 3721 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; 3722 } else { 3723 temp &= ~FDI_LINK_TRAIN_NONE; 3724 temp |= FDI_LINK_TRAIN_PATTERN_1; 3725 } 3726 /* BPC in FDI rx is consistent with that in PIPECONF */ 3727 temp &= ~(0x07 << 16); 3728 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; 3729 I915_WRITE(reg, temp); 3730 3731 POSTING_READ(reg); 3732 udelay(100); 3733 } 3734 3735 bool intel_has_pending_fb_unpin(struct drm_device *dev) 3736 { 3737 struct intel_crtc *crtc; 3738 3739 /* Note that we don't need to be called with mode_config.lock here 3740 * as our list of CRTC objects is static for the lifetime of the 3741 * device and so cannot disappear as we iterate. Similarly, we can 3742 * happily treat the predicates as racy, atomic checks as userspace 3743 * cannot claim and pin a new fb without at least acquring the 3744 * struct_mutex and so serialising with us. 3745 */ 3746 for_each_intel_crtc(dev, crtc) { 3747 if (atomic_read(&crtc->unpin_work_count) == 0) 3748 continue; 3749 3750 if (crtc->unpin_work) 3751 intel_wait_for_vblank(dev, crtc->pipe); 3752 3753 return true; 3754 } 3755 3756 return false; 3757 } 3758 3759 static void page_flip_completed(struct intel_crtc *intel_crtc) 3760 { 3761 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3762 struct intel_unpin_work *work = intel_crtc->unpin_work; 3763 3764 /* ensure that the unpin work is consistent wrt ->pending. */ 3765 smp_rmb(); 3766 intel_crtc->unpin_work = NULL; 3767 3768 if (work->event) 3769 drm_send_vblank_event(intel_crtc->base.dev, 3770 intel_crtc->pipe, 3771 work->event); 3772 3773 drm_crtc_vblank_put(&intel_crtc->base); 3774 3775 wake_up_all(&dev_priv->pending_flip_queue); 3776 queue_work(dev_priv->wq, &work->work); 3777 3778 trace_i915_flip_complete(intel_crtc->plane, 3779 work->pending_flip_obj); 3780 } 3781 3782 void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3783 { 3784 struct drm_device *dev = crtc->dev; 3785 struct drm_i915_private *dev_priv = dev->dev_private; 3786 3787 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3788 if (WARN_ON(wait_event_timeout(dev_priv->pending_flip_queue, 3789 !intel_crtc_has_pending_flip(crtc), 3790 60*HZ) == 0)) { 3791 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3792 3793 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 3794 if (intel_crtc->unpin_work) { 3795 WARN_ONCE(1, "Removing stuck page flip\n"); 3796 page_flip_completed(intel_crtc); 3797 } 3798 lockmgr(&dev->event_lock, LK_RELEASE); 3799 } 3800 3801 if (crtc->primary->fb) { 3802 mutex_lock(&dev->struct_mutex); 3803 intel_finish_fb(crtc->primary->fb); 3804 mutex_unlock(&dev->struct_mutex); 3805 } 3806 } 3807 3808 /* Program iCLKIP clock to the desired frequency */ 3809 static void lpt_program_iclkip(struct drm_crtc *crtc) 3810 { 3811 struct drm_device *dev = crtc->dev; 3812 struct drm_i915_private *dev_priv = dev->dev_private; 3813 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; 3814 u32 divsel, phaseinc, auxdiv, phasedir = 0; 3815 u32 temp; 3816 3817 mutex_lock(&dev_priv->dpio_lock); 3818 3819 /* It is necessary to ungate the pixclk gate prior to programming 3820 * the divisors, and gate it back when it is done. 3821 */ 3822 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE); 3823 3824 /* Disable SSCCTL */ 3825 intel_sbi_write(dev_priv, SBI_SSCCTL6, 3826 intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) | 3827 SBI_SSCCTL_DISABLE, 3828 SBI_ICLK); 3829 3830 /* 20MHz is a corner case which is out of range for the 7-bit divisor */ 3831 if (clock == 20000) { 3832 auxdiv = 1; 3833 divsel = 0x41; 3834 phaseinc = 0x20; 3835 } else { 3836 /* The iCLK virtual clock root frequency is in MHz, 3837 * but the adjusted_mode->crtc_clock in in KHz. To get the 3838 * divisors, it is necessary to divide one by another, so we 3839 * convert the virtual clock precision to KHz here for higher 3840 * precision. 3841 */ 3842 u32 iclk_virtual_root_freq = 172800 * 1000; 3843 u32 iclk_pi_range = 64; 3844 u32 desired_divisor, msb_divisor_value, pi_value; 3845 3846 desired_divisor = (iclk_virtual_root_freq / clock); 3847 msb_divisor_value = desired_divisor / iclk_pi_range; 3848 pi_value = desired_divisor % iclk_pi_range; 3849 3850 auxdiv = 0; 3851 divsel = msb_divisor_value - 2; 3852 phaseinc = pi_value; 3853 } 3854 3855 /* This should not happen with any sane values */ 3856 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) & 3857 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK); 3858 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) & 3859 ~SBI_SSCDIVINTPHASE_INCVAL_MASK); 3860 3861 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n", 3862 clock, 3863 auxdiv, 3864 divsel, 3865 phasedir, 3866 phaseinc); 3867 3868 /* Program SSCDIVINTPHASE6 */ 3869 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK); 3870 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK; 3871 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel); 3872 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK; 3873 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc); 3874 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir); 3875 temp |= SBI_SSCDIVINTPHASE_PROPAGATE; 3876 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK); 3877 3878 /* Program SSCAUXDIV */ 3879 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK); 3880 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1); 3881 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv); 3882 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK); 3883 3884 /* Enable modulator and associated divider */ 3885 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK); 3886 temp &= ~SBI_SSCCTL_DISABLE; 3887 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK); 3888 3889 /* Wait for initialization time */ 3890 udelay(24); 3891 3892 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); 3893 3894 mutex_unlock(&dev_priv->dpio_lock); 3895 } 3896 3897 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, 3898 enum i915_pipe pch_transcoder) 3899 { 3900 struct drm_device *dev = crtc->base.dev; 3901 struct drm_i915_private *dev_priv = dev->dev_private; 3902 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 3903 3904 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3905 I915_READ(HTOTAL(cpu_transcoder))); 3906 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder), 3907 I915_READ(HBLANK(cpu_transcoder))); 3908 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder), 3909 I915_READ(HSYNC(cpu_transcoder))); 3910 3911 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder), 3912 I915_READ(VTOTAL(cpu_transcoder))); 3913 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder), 3914 I915_READ(VBLANK(cpu_transcoder))); 3915 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder), 3916 I915_READ(VSYNC(cpu_transcoder))); 3917 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder), 3918 I915_READ(VSYNCSHIFT(cpu_transcoder))); 3919 } 3920 3921 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 3922 { 3923 struct drm_i915_private *dev_priv = dev->dev_private; 3924 uint32_t temp; 3925 3926 temp = I915_READ(SOUTH_CHICKEN1); 3927 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) 3928 return; 3929 3930 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); 3931 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); 3932 3933 temp &= ~FDI_BC_BIFURCATION_SELECT; 3934 if (enable) 3935 temp |= FDI_BC_BIFURCATION_SELECT; 3936 3937 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); 3938 I915_WRITE(SOUTH_CHICKEN1, temp); 3939 POSTING_READ(SOUTH_CHICKEN1); 3940 } 3941 3942 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) 3943 { 3944 struct drm_device *dev = intel_crtc->base.dev; 3945 3946 switch (intel_crtc->pipe) { 3947 case PIPE_A: 3948 break; 3949 case PIPE_B: 3950 if (intel_crtc->config->fdi_lanes > 2) 3951 cpt_set_fdi_bc_bifurcation(dev, false); 3952 else 3953 cpt_set_fdi_bc_bifurcation(dev, true); 3954 3955 break; 3956 case PIPE_C: 3957 cpt_set_fdi_bc_bifurcation(dev, true); 3958 3959 break; 3960 default: 3961 BUG(); 3962 } 3963 } 3964 3965 /* 3966 * Enable PCH resources required for PCH ports: 3967 * - PCH PLLs 3968 * - FDI training & RX/TX 3969 * - update transcoder timings 3970 * - DP transcoding bits 3971 * - transcoder 3972 */ 3973 static void ironlake_pch_enable(struct drm_crtc *crtc) 3974 { 3975 struct drm_device *dev = crtc->dev; 3976 struct drm_i915_private *dev_priv = dev->dev_private; 3977 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3978 int pipe = intel_crtc->pipe; 3979 u32 reg, temp; 3980 3981 assert_pch_transcoder_disabled(dev_priv, pipe); 3982 3983 if (IS_IVYBRIDGE(dev)) 3984 ivybridge_update_fdi_bc_bifurcation(intel_crtc); 3985 3986 /* Write the TU size bits before fdi link training, so that error 3987 * detection works. */ 3988 I915_WRITE(FDI_RX_TUSIZE1(pipe), 3989 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); 3990 3991 /* For PCH output, training FDI link */ 3992 dev_priv->display.fdi_link_train(crtc); 3993 3994 /* We need to program the right clock selection before writing the pixel 3995 * mutliplier into the DPLL. */ 3996 if (HAS_PCH_CPT(dev)) { 3997 u32 sel; 3998 3999 temp = I915_READ(PCH_DPLL_SEL); 4000 temp |= TRANS_DPLL_ENABLE(pipe); 4001 sel = TRANS_DPLLB_SEL(pipe); 4002 if (intel_crtc->config->shared_dpll == DPLL_ID_PCH_PLL_B) 4003 temp |= sel; 4004 else 4005 temp &= ~sel; 4006 I915_WRITE(PCH_DPLL_SEL, temp); 4007 } 4008 4009 /* XXX: pch pll's can be enabled any time before we enable the PCH 4010 * transcoder, and we actually should do this to not upset any PCH 4011 * transcoder that already use the clock when we share it. 4012 * 4013 * Note that enable_shared_dpll tries to do the right thing, but 4014 * get_shared_dpll unconditionally resets the pll - we need that to have 4015 * the right LVDS enable sequence. */ 4016 intel_enable_shared_dpll(intel_crtc); 4017 4018 /* set transcoder timing, panel must allow it */ 4019 assert_panel_unlocked(dev_priv, pipe); 4020 ironlake_pch_transcoder_set_timings(intel_crtc, pipe); 4021 4022 intel_fdi_normal_train(crtc); 4023 4024 /* For PCH DP, enable TRANS_DP_CTL */ 4025 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4026 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4027 reg = TRANS_DP_CTL(pipe); 4028 temp = I915_READ(reg); 4029 temp &= ~(TRANS_DP_PORT_SEL_MASK | 4030 TRANS_DP_SYNC_MASK | 4031 TRANS_DP_BPC_MASK); 4032 temp |= (TRANS_DP_OUTPUT_ENABLE | 4033 TRANS_DP_ENH_FRAMING); 4034 temp |= bpc << 9; /* same format but at 11:9 */ 4035 4036 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 4037 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 4038 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) 4039 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; 4040 4041 switch (intel_trans_dp_port_sel(crtc)) { 4042 case PCH_DP_B: 4043 temp |= TRANS_DP_PORT_SEL_B; 4044 break; 4045 case PCH_DP_C: 4046 temp |= TRANS_DP_PORT_SEL_C; 4047 break; 4048 case PCH_DP_D: 4049 temp |= TRANS_DP_PORT_SEL_D; 4050 break; 4051 default: 4052 BUG(); 4053 } 4054 4055 I915_WRITE(reg, temp); 4056 } 4057 4058 ironlake_enable_pch_transcoder(dev_priv, pipe); 4059 } 4060 4061 static void lpt_pch_enable(struct drm_crtc *crtc) 4062 { 4063 struct drm_device *dev = crtc->dev; 4064 struct drm_i915_private *dev_priv = dev->dev_private; 4065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4066 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4067 4068 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A); 4069 4070 lpt_program_iclkip(crtc); 4071 4072 /* Set transcoder timing. */ 4073 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A); 4074 4075 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); 4076 } 4077 4078 void intel_put_shared_dpll(struct intel_crtc *crtc) 4079 { 4080 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 4081 4082 if (pll == NULL) 4083 return; 4084 4085 if (!(pll->config.crtc_mask & (1 << crtc->pipe))) { 4086 WARN(1, "bad %s crtc mask\n", pll->name); 4087 return; 4088 } 4089 4090 pll->config.crtc_mask &= ~(1 << crtc->pipe); 4091 if (pll->config.crtc_mask == 0) { 4092 WARN_ON(pll->on); 4093 WARN_ON(pll->active); 4094 } 4095 4096 crtc->config->shared_dpll = DPLL_ID_PRIVATE; 4097 } 4098 4099 struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, 4100 struct intel_crtc_state *crtc_state) 4101 { 4102 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 4103 struct intel_shared_dpll *pll; 4104 enum intel_dpll_id i; 4105 4106 if (HAS_PCH_IBX(dev_priv->dev)) { 4107 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 4108 i = (enum intel_dpll_id) crtc->pipe; 4109 pll = &dev_priv->shared_dplls[i]; 4110 4111 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 4112 crtc->base.base.id, pll->name); 4113 4114 WARN_ON(pll->new_config->crtc_mask); 4115 4116 goto found; 4117 } 4118 4119 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4120 pll = &dev_priv->shared_dplls[i]; 4121 4122 /* Only want to check enabled timings first */ 4123 if (pll->new_config->crtc_mask == 0) 4124 continue; 4125 4126 if (memcmp(&crtc_state->dpll_hw_state, 4127 &pll->new_config->hw_state, 4128 sizeof(pll->new_config->hw_state)) == 0) { 4129 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n", 4130 crtc->base.base.id, pll->name, 4131 pll->new_config->crtc_mask, 4132 pll->active); 4133 goto found; 4134 } 4135 } 4136 4137 /* Ok no matching timings, maybe there's a free one? */ 4138 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4139 pll = &dev_priv->shared_dplls[i]; 4140 if (pll->new_config->crtc_mask == 0) { 4141 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 4142 crtc->base.base.id, pll->name); 4143 goto found; 4144 } 4145 } 4146 4147 return NULL; 4148 4149 found: 4150 if (pll->new_config->crtc_mask == 0) 4151 pll->new_config->hw_state = crtc_state->dpll_hw_state; 4152 4153 crtc_state->shared_dpll = i; 4154 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 4155 pipe_name(crtc->pipe)); 4156 4157 pll->new_config->crtc_mask |= 1 << crtc->pipe; 4158 4159 return pll; 4160 } 4161 4162 /** 4163 * intel_shared_dpll_start_config - start a new PLL staged config 4164 * @dev_priv: DRM device 4165 * @clear_pipes: mask of pipes that will have their PLLs freed 4166 * 4167 * Starts a new PLL staged config, copying the current config but 4168 * releasing the references of pipes specified in clear_pipes. 4169 */ 4170 static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv, 4171 unsigned clear_pipes) 4172 { 4173 struct intel_shared_dpll *pll; 4174 enum intel_dpll_id i; 4175 4176 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4177 pll = &dev_priv->shared_dplls[i]; 4178 4179 pll->new_config = kmemdup(&pll->config, sizeof pll->config, 4180 GFP_KERNEL); 4181 if (!pll->new_config) 4182 goto cleanup; 4183 4184 pll->new_config->crtc_mask &= ~clear_pipes; 4185 } 4186 4187 return 0; 4188 4189 cleanup: 4190 while (--i >= 0) { 4191 pll = &dev_priv->shared_dplls[i]; 4192 kfree(pll->new_config); 4193 pll->new_config = NULL; 4194 } 4195 4196 return -ENOMEM; 4197 } 4198 4199 static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv) 4200 { 4201 struct intel_shared_dpll *pll; 4202 enum intel_dpll_id i; 4203 4204 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4205 pll = &dev_priv->shared_dplls[i]; 4206 4207 WARN_ON(pll->new_config == &pll->config); 4208 4209 pll->config = *pll->new_config; 4210 kfree(pll->new_config); 4211 pll->new_config = NULL; 4212 } 4213 } 4214 4215 static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv) 4216 { 4217 struct intel_shared_dpll *pll; 4218 enum intel_dpll_id i; 4219 4220 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 4221 pll = &dev_priv->shared_dplls[i]; 4222 4223 WARN_ON(pll->new_config == &pll->config); 4224 4225 kfree(pll->new_config); 4226 pll->new_config = NULL; 4227 } 4228 } 4229 4230 static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4231 { 4232 struct drm_i915_private *dev_priv = dev->dev_private; 4233 int dslreg = PIPEDSL(pipe); 4234 u32 temp; 4235 4236 temp = I915_READ(dslreg); 4237 udelay(500); 4238 if (wait_for(I915_READ(dslreg) != temp, 5)) { 4239 if (wait_for(I915_READ(dslreg) != temp, 5)) 4240 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe)); 4241 } 4242 } 4243 4244 static void skylake_pfit_enable(struct intel_crtc *crtc) 4245 { 4246 struct drm_device *dev = crtc->base.dev; 4247 struct drm_i915_private *dev_priv = dev->dev_private; 4248 int pipe = crtc->pipe; 4249 4250 if (crtc->config->pch_pfit.enabled) { 4251 I915_WRITE(PS_CTL(pipe), PS_ENABLE); 4252 I915_WRITE(PS_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4253 I915_WRITE(PS_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4254 } 4255 } 4256 4257 static void ironlake_pfit_enable(struct intel_crtc *crtc) 4258 { 4259 struct drm_device *dev = crtc->base.dev; 4260 struct drm_i915_private *dev_priv = dev->dev_private; 4261 int pipe = crtc->pipe; 4262 4263 if (crtc->config->pch_pfit.enabled) { 4264 /* Force use of hard-coded filter coefficients 4265 * as some pre-programmed values are broken, 4266 * e.g. x201. 4267 */ 4268 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 4269 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 | 4270 PF_PIPE_SEL_IVB(pipe)); 4271 else 4272 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); 4273 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos); 4274 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size); 4275 } 4276 } 4277 4278 static void intel_enable_sprite_planes(struct drm_crtc *crtc) 4279 { 4280 struct drm_device *dev = crtc->dev; 4281 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4282 struct drm_plane *plane; 4283 struct intel_plane *intel_plane; 4284 4285 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 4286 intel_plane = to_intel_plane(plane); 4287 if (intel_plane->pipe == pipe) 4288 intel_plane_restore(&intel_plane->base); 4289 } 4290 } 4291 4292 /* 4293 * Disable a plane internally without actually modifying the plane's state. 4294 * This will allow us to easily restore the plane later by just reprogramming 4295 * its state. 4296 */ 4297 static void disable_plane_internal(struct drm_plane *plane) 4298 { 4299 struct intel_plane *intel_plane = to_intel_plane(plane); 4300 struct drm_plane_state *state = 4301 plane->funcs->atomic_duplicate_state(plane); 4302 struct intel_plane_state *intel_state = to_intel_plane_state(state); 4303 4304 intel_state->visible = false; 4305 intel_plane->commit_plane(plane, intel_state); 4306 4307 intel_plane_destroy_state(plane, state); 4308 } 4309 4310 static void intel_disable_sprite_planes(struct drm_crtc *crtc) 4311 { 4312 struct drm_device *dev = crtc->dev; 4313 enum i915_pipe pipe = to_intel_crtc(crtc)->pipe; 4314 struct drm_plane *plane; 4315 struct intel_plane *intel_plane; 4316 4317 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { 4318 intel_plane = to_intel_plane(plane); 4319 if (plane->fb && intel_plane->pipe == pipe) 4320 disable_plane_internal(plane); 4321 } 4322 } 4323 4324 void hsw_enable_ips(struct intel_crtc *crtc) 4325 { 4326 struct drm_device *dev = crtc->base.dev; 4327 struct drm_i915_private *dev_priv = dev->dev_private; 4328 4329 if (!crtc->config->ips_enabled) 4330 return; 4331 4332 /* We can only enable IPS after we enable a plane and wait for a vblank */ 4333 intel_wait_for_vblank(dev, crtc->pipe); 4334 4335 assert_plane_enabled(dev_priv, crtc->plane); 4336 if (IS_BROADWELL(dev)) { 4337 mutex_lock(&dev_priv->rps.hw_lock); 4338 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000)); 4339 mutex_unlock(&dev_priv->rps.hw_lock); 4340 /* Quoting Art Runyan: "its not safe to expect any particular 4341 * value in IPS_CTL bit 31 after enabling IPS through the 4342 * mailbox." Moreover, the mailbox may return a bogus state, 4343 * so we need to just enable it and continue on. 4344 */ 4345 } else { 4346 I915_WRITE(IPS_CTL, IPS_ENABLE); 4347 /* The bit only becomes 1 in the next vblank, so this wait here 4348 * is essentially intel_wait_for_vblank. If we don't have this 4349 * and don't wait for vblanks until the end of crtc_enable, then 4350 * the HW state readout code will complain that the expected 4351 * IPS_CTL value is not the one we read. */ 4352 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4353 DRM_ERROR("Timed out waiting for IPS enable\n"); 4354 } 4355 } 4356 4357 void hsw_disable_ips(struct intel_crtc *crtc) 4358 { 4359 struct drm_device *dev = crtc->base.dev; 4360 struct drm_i915_private *dev_priv = dev->dev_private; 4361 4362 if (!crtc->config->ips_enabled) 4363 return; 4364 4365 assert_plane_enabled(dev_priv, crtc->plane); 4366 if (IS_BROADWELL(dev)) { 4367 mutex_lock(&dev_priv->rps.hw_lock); 4368 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4369 mutex_unlock(&dev_priv->rps.hw_lock); 4370 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4371 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4372 DRM_ERROR("Timed out waiting for IPS disable\n"); 4373 } else { 4374 I915_WRITE(IPS_CTL, 0); 4375 POSTING_READ(IPS_CTL); 4376 } 4377 4378 /* We need to wait for a vblank before we can disable the plane. */ 4379 intel_wait_for_vblank(dev, crtc->pipe); 4380 } 4381 4382 /** Loads the palette/gamma unit for the CRTC with the prepared values */ 4383 static void intel_crtc_load_lut(struct drm_crtc *crtc) 4384 { 4385 struct drm_device *dev = crtc->dev; 4386 struct drm_i915_private *dev_priv = dev->dev_private; 4387 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4388 enum i915_pipe pipe = intel_crtc->pipe; 4389 int palreg = PALETTE(pipe); 4390 int i; 4391 bool reenable_ips = false; 4392 4393 /* The clocks have to be on to load the palette. */ 4394 if (!crtc->state->enable || !intel_crtc->active) 4395 return; 4396 4397 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4398 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) 4399 assert_dsi_pll_enabled(dev_priv); 4400 else 4401 assert_pll_enabled(dev_priv, pipe); 4402 } 4403 4404 /* use legacy palette for Ironlake */ 4405 if (!HAS_GMCH_DISPLAY(dev)) 4406 palreg = LGC_PALETTE(pipe); 4407 4408 /* Workaround : Do not read or write the pipe palette/gamma data while 4409 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4410 */ 4411 if (IS_HASWELL(dev) && intel_crtc->config->ips_enabled && 4412 ((I915_READ(GAMMA_MODE(pipe)) & GAMMA_MODE_MODE_MASK) == 4413 GAMMA_MODE_MODE_SPLIT)) { 4414 hsw_disable_ips(intel_crtc); 4415 reenable_ips = true; 4416 } 4417 4418 for (i = 0; i < 256; i++) { 4419 I915_WRITE(palreg + 4 * i, 4420 (intel_crtc->lut_r[i] << 16) | 4421 (intel_crtc->lut_g[i] << 8) | 4422 intel_crtc->lut_b[i]); 4423 } 4424 4425 if (reenable_ips) 4426 hsw_enable_ips(intel_crtc); 4427 } 4428 4429 static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) 4430 { 4431 if (!enable && intel_crtc->overlay) { 4432 struct drm_device *dev = intel_crtc->base.dev; 4433 struct drm_i915_private *dev_priv = dev->dev_private; 4434 4435 mutex_lock(&dev->struct_mutex); 4436 dev_priv->mm.interruptible = false; 4437 (void) intel_overlay_switch_off(intel_crtc->overlay); 4438 dev_priv->mm.interruptible = true; 4439 mutex_unlock(&dev->struct_mutex); 4440 } 4441 4442 /* Let userspace switch the overlay on again. In most cases userspace 4443 * has to recompute where to put it anyway. 4444 */ 4445 } 4446 4447 static void intel_crtc_enable_planes(struct drm_crtc *crtc) 4448 { 4449 struct drm_device *dev = crtc->dev; 4450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4451 int pipe = intel_crtc->pipe; 4452 4453 intel_enable_primary_hw_plane(crtc->primary, crtc); 4454 intel_enable_sprite_planes(crtc); 4455 intel_crtc_update_cursor(crtc, true); 4456 intel_crtc_dpms_overlay(intel_crtc, true); 4457 4458 hsw_enable_ips(intel_crtc); 4459 4460 mutex_lock(&dev->struct_mutex); 4461 intel_fbc_update(dev); 4462 mutex_unlock(&dev->struct_mutex); 4463 4464 /* 4465 * FIXME: Once we grow proper nuclear flip support out of this we need 4466 * to compute the mask of flip planes precisely. For the time being 4467 * consider this a flip from a NULL plane. 4468 */ 4469 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4470 } 4471 4472 static void intel_crtc_disable_planes(struct drm_crtc *crtc) 4473 { 4474 struct drm_device *dev = crtc->dev; 4475 struct drm_i915_private *dev_priv = dev->dev_private; 4476 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4477 int pipe = intel_crtc->pipe; 4478 4479 intel_crtc_wait_for_pending_flips(crtc); 4480 4481 if (dev_priv->fbc.crtc == intel_crtc) 4482 intel_fbc_disable(dev); 4483 4484 hsw_disable_ips(intel_crtc); 4485 4486 intel_crtc_dpms_overlay(intel_crtc, false); 4487 intel_crtc_update_cursor(crtc, false); 4488 intel_disable_sprite_planes(crtc); 4489 intel_disable_primary_hw_plane(crtc->primary, crtc); 4490 4491 /* 4492 * FIXME: Once we grow proper nuclear flip support out of this we need 4493 * to compute the mask of flip planes precisely. For the time being 4494 * consider this a flip to a NULL plane. 4495 */ 4496 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4497 } 4498 4499 static void ironlake_crtc_enable(struct drm_crtc *crtc) 4500 { 4501 struct drm_device *dev = crtc->dev; 4502 struct drm_i915_private *dev_priv = dev->dev_private; 4503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4504 struct intel_encoder *encoder; 4505 int pipe = intel_crtc->pipe; 4506 4507 WARN_ON(!crtc->state->enable); 4508 4509 if (intel_crtc->active) 4510 return; 4511 4512 if (intel_crtc->config->has_pch_encoder) 4513 intel_prepare_shared_dpll(intel_crtc); 4514 4515 if (intel_crtc->config->has_dp_encoder) 4516 intel_dp_set_m_n(intel_crtc, M1_N1); 4517 4518 intel_set_pipe_timings(intel_crtc); 4519 4520 if (intel_crtc->config->has_pch_encoder) { 4521 intel_cpu_transcoder_set_m_n(intel_crtc, 4522 &intel_crtc->config->fdi_m_n, NULL); 4523 } 4524 4525 ironlake_set_pipeconf(crtc); 4526 4527 intel_crtc->active = true; 4528 4529 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4530 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); 4531 4532 for_each_encoder_on_crtc(dev, crtc, encoder) 4533 if (encoder->pre_enable) 4534 encoder->pre_enable(encoder); 4535 4536 if (intel_crtc->config->has_pch_encoder) { 4537 /* Note: FDI PLL enabling _must_ be done before we enable the 4538 * cpu pipes, hence this is separate from all the other fdi/pch 4539 * enabling. */ 4540 ironlake_fdi_pll_enable(intel_crtc); 4541 } else { 4542 assert_fdi_tx_disabled(dev_priv, pipe); 4543 assert_fdi_rx_disabled(dev_priv, pipe); 4544 } 4545 4546 ironlake_pfit_enable(intel_crtc); 4547 4548 /* 4549 * On ILK+ LUT must be loaded before the pipe is running but with 4550 * clocks enabled 4551 */ 4552 intel_crtc_load_lut(crtc); 4553 4554 intel_update_watermarks(crtc); 4555 intel_enable_pipe(intel_crtc); 4556 4557 if (intel_crtc->config->has_pch_encoder) 4558 ironlake_pch_enable(crtc); 4559 4560 assert_vblank_disabled(crtc); 4561 drm_crtc_vblank_on(crtc); 4562 4563 for_each_encoder_on_crtc(dev, crtc, encoder) 4564 encoder->enable(encoder); 4565 4566 if (HAS_PCH_CPT(dev)) 4567 cpt_verify_modeset(dev, intel_crtc->pipe); 4568 4569 intel_crtc_enable_planes(crtc); 4570 } 4571 4572 /* IPS only exists on ULT machines and is tied to pipe A. */ 4573 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc) 4574 { 4575 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A; 4576 } 4577 4578 /* 4579 * This implements the workaround described in the "notes" section of the mode 4580 * set sequence documentation. When going from no pipes or single pipe to 4581 * multiple pipes, and planes are enabled after the pipe, we need to wait at 4582 * least 2 vblanks on the first pipe before enabling planes on the second pipe. 4583 */ 4584 static void haswell_mode_set_planes_workaround(struct intel_crtc *crtc) 4585 { 4586 struct drm_device *dev = crtc->base.dev; 4587 struct intel_crtc *crtc_it, *other_active_crtc = NULL; 4588 4589 /* We want to get the other_active_crtc only if there's only 1 other 4590 * active crtc. */ 4591 for_each_intel_crtc(dev, crtc_it) { 4592 if (!crtc_it->active || crtc_it == crtc) 4593 continue; 4594 4595 if (other_active_crtc) 4596 return; 4597 4598 other_active_crtc = crtc_it; 4599 } 4600 if (!other_active_crtc) 4601 return; 4602 4603 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4604 intel_wait_for_vblank(dev, other_active_crtc->pipe); 4605 } 4606 4607 static void haswell_crtc_enable(struct drm_crtc *crtc) 4608 { 4609 struct drm_device *dev = crtc->dev; 4610 struct drm_i915_private *dev_priv = dev->dev_private; 4611 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4612 struct intel_encoder *encoder; 4613 int pipe = intel_crtc->pipe; 4614 4615 WARN_ON(!crtc->state->enable); 4616 4617 if (intel_crtc->active) 4618 return; 4619 4620 if (intel_crtc_to_shared_dpll(intel_crtc)) 4621 intel_enable_shared_dpll(intel_crtc); 4622 4623 if (intel_crtc->config->has_dp_encoder) 4624 intel_dp_set_m_n(intel_crtc, M1_N1); 4625 4626 intel_set_pipe_timings(intel_crtc); 4627 4628 if (intel_crtc->config->cpu_transcoder != TRANSCODER_EDP) { 4629 I915_WRITE(PIPE_MULT(intel_crtc->config->cpu_transcoder), 4630 intel_crtc->config->pixel_multiplier - 1); 4631 } 4632 4633 if (intel_crtc->config->has_pch_encoder) { 4634 intel_cpu_transcoder_set_m_n(intel_crtc, 4635 &intel_crtc->config->fdi_m_n, NULL); 4636 } 4637 4638 haswell_set_pipeconf(crtc); 4639 4640 intel_set_pipe_csc(crtc); 4641 4642 intel_crtc->active = true; 4643 4644 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4645 for_each_encoder_on_crtc(dev, crtc, encoder) 4646 if (encoder->pre_enable) 4647 encoder->pre_enable(encoder); 4648 4649 if (intel_crtc->config->has_pch_encoder) { 4650 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4651 true); 4652 dev_priv->display.fdi_link_train(crtc); 4653 } 4654 4655 intel_ddi_enable_pipe_clock(intel_crtc); 4656 4657 if (IS_SKYLAKE(dev)) 4658 skylake_pfit_enable(intel_crtc); 4659 else 4660 ironlake_pfit_enable(intel_crtc); 4661 4662 /* 4663 * On ILK+ LUT must be loaded before the pipe is running but with 4664 * clocks enabled 4665 */ 4666 intel_crtc_load_lut(crtc); 4667 4668 intel_ddi_set_pipe_settings(crtc); 4669 intel_ddi_enable_transcoder_func(crtc); 4670 4671 intel_update_watermarks(crtc); 4672 intel_enable_pipe(intel_crtc); 4673 4674 if (intel_crtc->config->has_pch_encoder) 4675 lpt_pch_enable(crtc); 4676 4677 if (intel_crtc->config->dp_encoder_is_mst) 4678 intel_ddi_set_vc_payload_alloc(crtc, true); 4679 4680 assert_vblank_disabled(crtc); 4681 drm_crtc_vblank_on(crtc); 4682 4683 for_each_encoder_on_crtc(dev, crtc, encoder) { 4684 encoder->enable(encoder); 4685 intel_opregion_notify_encoder(encoder, true); 4686 } 4687 4688 /* If we change the relative order between pipe/planes enabling, we need 4689 * to change the workaround. */ 4690 haswell_mode_set_planes_workaround(intel_crtc); 4691 intel_crtc_enable_planes(crtc); 4692 } 4693 4694 static void skylake_pfit_disable(struct intel_crtc *crtc) 4695 { 4696 struct drm_device *dev = crtc->base.dev; 4697 struct drm_i915_private *dev_priv = dev->dev_private; 4698 int pipe = crtc->pipe; 4699 4700 /* To avoid upsetting the power well on haswell only disable the pfit if 4701 * it's in use. The hw state code will make sure we get this right. */ 4702 if (crtc->config->pch_pfit.enabled) { 4703 I915_WRITE(PS_CTL(pipe), 0); 4704 I915_WRITE(PS_WIN_POS(pipe), 0); 4705 I915_WRITE(PS_WIN_SZ(pipe), 0); 4706 } 4707 } 4708 4709 static void ironlake_pfit_disable(struct intel_crtc *crtc) 4710 { 4711 struct drm_device *dev = crtc->base.dev; 4712 struct drm_i915_private *dev_priv = dev->dev_private; 4713 int pipe = crtc->pipe; 4714 4715 /* To avoid upsetting the power well on haswell only disable the pfit if 4716 * it's in use. The hw state code will make sure we get this right. */ 4717 if (crtc->config->pch_pfit.enabled) { 4718 I915_WRITE(PF_CTL(pipe), 0); 4719 I915_WRITE(PF_WIN_POS(pipe), 0); 4720 I915_WRITE(PF_WIN_SZ(pipe), 0); 4721 } 4722 } 4723 4724 static void ironlake_crtc_disable(struct drm_crtc *crtc) 4725 { 4726 struct drm_device *dev = crtc->dev; 4727 struct drm_i915_private *dev_priv = dev->dev_private; 4728 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4729 struct intel_encoder *encoder; 4730 int pipe = intel_crtc->pipe; 4731 u32 reg, temp; 4732 4733 if (!intel_crtc->active) 4734 return; 4735 4736 intel_crtc_disable_planes(crtc); 4737 4738 for_each_encoder_on_crtc(dev, crtc, encoder) 4739 encoder->disable(encoder); 4740 4741 drm_crtc_vblank_off(crtc); 4742 assert_vblank_disabled(crtc); 4743 4744 if (intel_crtc->config->has_pch_encoder) 4745 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); 4746 4747 intel_disable_pipe(intel_crtc); 4748 4749 ironlake_pfit_disable(intel_crtc); 4750 4751 for_each_encoder_on_crtc(dev, crtc, encoder) 4752 if (encoder->post_disable) 4753 encoder->post_disable(encoder); 4754 4755 if (intel_crtc->config->has_pch_encoder) { 4756 ironlake_fdi_disable(crtc); 4757 4758 ironlake_disable_pch_transcoder(dev_priv, pipe); 4759 4760 if (HAS_PCH_CPT(dev)) { 4761 /* disable TRANS_DP_CTL */ 4762 reg = TRANS_DP_CTL(pipe); 4763 temp = I915_READ(reg); 4764 temp &= ~(TRANS_DP_OUTPUT_ENABLE | 4765 TRANS_DP_PORT_SEL_MASK); 4766 temp |= TRANS_DP_PORT_SEL_NONE; 4767 I915_WRITE(reg, temp); 4768 4769 /* disable DPLL_SEL */ 4770 temp = I915_READ(PCH_DPLL_SEL); 4771 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe)); 4772 I915_WRITE(PCH_DPLL_SEL, temp); 4773 } 4774 4775 /* disable PCH DPLL */ 4776 intel_disable_shared_dpll(intel_crtc); 4777 4778 ironlake_fdi_pll_disable(intel_crtc); 4779 } 4780 4781 intel_crtc->active = false; 4782 intel_update_watermarks(crtc); 4783 4784 mutex_lock(&dev->struct_mutex); 4785 intel_fbc_update(dev); 4786 mutex_unlock(&dev->struct_mutex); 4787 } 4788 4789 static void haswell_crtc_disable(struct drm_crtc *crtc) 4790 { 4791 struct drm_device *dev = crtc->dev; 4792 struct drm_i915_private *dev_priv = dev->dev_private; 4793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4794 struct intel_encoder *encoder; 4795 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4796 4797 if (!intel_crtc->active) 4798 return; 4799 4800 intel_crtc_disable_planes(crtc); 4801 4802 for_each_encoder_on_crtc(dev, crtc, encoder) { 4803 intel_opregion_notify_encoder(encoder, false); 4804 encoder->disable(encoder); 4805 } 4806 4807 drm_crtc_vblank_off(crtc); 4808 assert_vblank_disabled(crtc); 4809 4810 if (intel_crtc->config->has_pch_encoder) 4811 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4812 false); 4813 intel_disable_pipe(intel_crtc); 4814 4815 if (intel_crtc->config->dp_encoder_is_mst) 4816 intel_ddi_set_vc_payload_alloc(crtc, false); 4817 4818 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4819 4820 if (IS_SKYLAKE(dev)) 4821 skylake_pfit_disable(intel_crtc); 4822 else 4823 ironlake_pfit_disable(intel_crtc); 4824 4825 intel_ddi_disable_pipe_clock(intel_crtc); 4826 4827 if (intel_crtc->config->has_pch_encoder) { 4828 lpt_disable_pch_transcoder(dev_priv); 4829 intel_ddi_fdi_disable(crtc); 4830 } 4831 4832 for_each_encoder_on_crtc(dev, crtc, encoder) 4833 if (encoder->post_disable) 4834 encoder->post_disable(encoder); 4835 4836 intel_crtc->active = false; 4837 intel_update_watermarks(crtc); 4838 4839 mutex_lock(&dev->struct_mutex); 4840 intel_fbc_update(dev); 4841 mutex_unlock(&dev->struct_mutex); 4842 4843 if (intel_crtc_to_shared_dpll(intel_crtc)) 4844 intel_disable_shared_dpll(intel_crtc); 4845 } 4846 4847 static void ironlake_crtc_off(struct drm_crtc *crtc) 4848 { 4849 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4850 intel_put_shared_dpll(intel_crtc); 4851 } 4852 4853 4854 static void i9xx_pfit_enable(struct intel_crtc *crtc) 4855 { 4856 struct drm_device *dev = crtc->base.dev; 4857 struct drm_i915_private *dev_priv = dev->dev_private; 4858 struct intel_crtc_state *pipe_config = crtc->config; 4859 4860 if (!pipe_config->gmch_pfit.control) 4861 return; 4862 4863 /* 4864 * The panel fitter should only be adjusted whilst the pipe is disabled, 4865 * according to register description and PRM. 4866 */ 4867 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE); 4868 assert_pipe_disabled(dev_priv, crtc->pipe); 4869 4870 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios); 4871 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control); 4872 4873 /* Border color in case we don't scale up to the full screen. Black by 4874 * default, change to something else for debugging. */ 4875 I915_WRITE(BCLRPAT(crtc->pipe), 0); 4876 } 4877 4878 static enum intel_display_power_domain port_to_power_domain(enum port port) 4879 { 4880 switch (port) { 4881 case PORT_A: 4882 return POWER_DOMAIN_PORT_DDI_A_4_LANES; 4883 case PORT_B: 4884 return POWER_DOMAIN_PORT_DDI_B_4_LANES; 4885 case PORT_C: 4886 return POWER_DOMAIN_PORT_DDI_C_4_LANES; 4887 case PORT_D: 4888 return POWER_DOMAIN_PORT_DDI_D_4_LANES; 4889 default: 4890 WARN_ON_ONCE(1); 4891 return POWER_DOMAIN_PORT_OTHER; 4892 } 4893 } 4894 4895 #define for_each_power_domain(domain, mask) \ 4896 for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ 4897 if ((1 << (domain)) & (mask)) 4898 4899 enum intel_display_power_domain 4900 intel_display_port_power_domain(struct intel_encoder *intel_encoder) 4901 { 4902 struct drm_device *dev = intel_encoder->base.dev; 4903 struct intel_digital_port *intel_dig_port; 4904 4905 switch (intel_encoder->type) { 4906 case INTEL_OUTPUT_UNKNOWN: 4907 /* Only DDI platforms should ever use this output type */ 4908 WARN_ON_ONCE(!HAS_DDI(dev)); 4909 case INTEL_OUTPUT_DISPLAYPORT: 4910 case INTEL_OUTPUT_HDMI: 4911 case INTEL_OUTPUT_EDP: 4912 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 4913 return port_to_power_domain(intel_dig_port->port); 4914 case INTEL_OUTPUT_ANALOG: 4915 return POWER_DOMAIN_PORT_CRT; 4916 case INTEL_OUTPUT_DSI: 4917 return POWER_DOMAIN_PORT_DSI; 4918 default: 4919 return POWER_DOMAIN_PORT_OTHER; 4920 } 4921 } 4922 4923 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) 4924 { 4925 struct drm_device *dev = crtc->dev; 4926 struct intel_encoder *intel_encoder; 4927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4928 enum i915_pipe pipe = intel_crtc->pipe; 4929 unsigned long mask; 4930 enum transcoder transcoder; 4931 4932 transcoder = intel_pipe_to_cpu_transcoder(dev->dev_private, pipe); 4933 4934 mask = BIT(POWER_DOMAIN_PIPE(pipe)); 4935 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder)); 4936 if (intel_crtc->config->pch_pfit.enabled || 4937 intel_crtc->config->pch_pfit.force_thru) 4938 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe)); 4939 4940 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 4941 mask |= BIT(intel_display_port_power_domain(intel_encoder)); 4942 4943 return mask; 4944 } 4945 4946 static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) 4947 { 4948 struct drm_device *dev = state->dev; 4949 struct drm_i915_private *dev_priv = dev->dev_private; 4950 unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; 4951 struct intel_crtc *crtc; 4952 4953 /* 4954 * First get all needed power domains, then put all unneeded, to avoid 4955 * any unnecessary toggling of the power wells. 4956 */ 4957 for_each_intel_crtc(dev, crtc) { 4958 enum intel_display_power_domain domain; 4959 4960 if (!crtc->base.state->enable) 4961 continue; 4962 4963 pipe_domains[crtc->pipe] = get_crtc_power_domains(&crtc->base); 4964 4965 for_each_power_domain(domain, pipe_domains[crtc->pipe]) 4966 intel_display_power_get(dev_priv, domain); 4967 } 4968 4969 if (dev_priv->display.modeset_global_resources) 4970 dev_priv->display.modeset_global_resources(state); 4971 4972 for_each_intel_crtc(dev, crtc) { 4973 enum intel_display_power_domain domain; 4974 4975 for_each_power_domain(domain, crtc->enabled_power_domains) 4976 intel_display_power_put(dev_priv, domain); 4977 4978 crtc->enabled_power_domains = pipe_domains[crtc->pipe]; 4979 } 4980 4981 intel_display_set_init_power(dev_priv, false); 4982 } 4983 4984 /* returns HPLL frequency in kHz */ 4985 static int valleyview_get_vco(struct drm_i915_private *dev_priv) 4986 { 4987 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 4988 4989 /* Obtain SKU information */ 4990 mutex_lock(&dev_priv->dpio_lock); 4991 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 4992 CCK_FUSE_HPLL_FREQ_MASK; 4993 mutex_unlock(&dev_priv->dpio_lock); 4994 4995 return vco_freq[hpll_freq] * 1000; 4996 } 4997 4998 static void vlv_update_cdclk(struct drm_device *dev) 4999 { 5000 struct drm_i915_private *dev_priv = dev->dev_private; 5001 5002 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5003 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5004 dev_priv->vlv_cdclk_freq); 5005 5006 /* 5007 * Program the gmbus_freq based on the cdclk frequency. 5008 * BSpec erroneously claims we should aim for 4MHz, but 5009 * in fact 1MHz is the correct frequency. 5010 */ 5011 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000)); 5012 } 5013 5014 /* Adjust CDclk dividers to allow high res or save power if possible */ 5015 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5016 { 5017 struct drm_i915_private *dev_priv = dev->dev_private; 5018 u32 val, cmd; 5019 5020 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 5021 5022 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ 5023 cmd = 2; 5024 else if (cdclk == 266667) 5025 cmd = 1; 5026 else 5027 cmd = 0; 5028 5029 mutex_lock(&dev_priv->rps.hw_lock); 5030 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5031 val &= ~DSPFREQGUAR_MASK; 5032 val |= (cmd << DSPFREQGUAR_SHIFT); 5033 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5034 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5035 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), 5036 50)) { 5037 DRM_ERROR("timed out waiting for CDclk change\n"); 5038 } 5039 mutex_unlock(&dev_priv->rps.hw_lock); 5040 5041 if (cdclk == 400000) { 5042 u32 divider; 5043 5044 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5045 5046 mutex_lock(&dev_priv->dpio_lock); 5047 /* adjust cdclk divider */ 5048 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5049 val &= ~DISPLAY_FREQUENCY_VALUES; 5050 val |= divider; 5051 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5052 5053 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5054 DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5055 50)) 5056 DRM_ERROR("timed out waiting for CDclk change\n"); 5057 mutex_unlock(&dev_priv->dpio_lock); 5058 } 5059 5060 mutex_lock(&dev_priv->dpio_lock); 5061 /* adjust self-refresh exit latency value */ 5062 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC); 5063 val &= ~0x7f; 5064 5065 /* 5066 * For high bandwidth configs, we set a higher latency in the bunit 5067 * so that the core display fetch happens in time to avoid underruns. 5068 */ 5069 if (cdclk == 400000) 5070 val |= 4500 / 250; /* 4.5 usec */ 5071 else 5072 val |= 3000 / 250; /* 3.0 usec */ 5073 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val); 5074 mutex_unlock(&dev_priv->dpio_lock); 5075 5076 vlv_update_cdclk(dev); 5077 } 5078 5079 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5080 { 5081 struct drm_i915_private *dev_priv = dev->dev_private; 5082 u32 val, cmd; 5083 5084 WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); 5085 5086 switch (cdclk) { 5087 case 333333: 5088 case 320000: 5089 case 266667: 5090 case 200000: 5091 break; 5092 default: 5093 MISSING_CASE(cdclk); 5094 return; 5095 } 5096 5097 /* 5098 * Specs are full of misinformation, but testing on actual 5099 * hardware has shown that we just need to write the desired 5100 * CCK divider into the Punit register. 5101 */ 5102 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; 5103 5104 mutex_lock(&dev_priv->rps.hw_lock); 5105 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); 5106 val &= ~DSPFREQGUAR_MASK_CHV; 5107 val |= (cmd << DSPFREQGUAR_SHIFT_CHV); 5108 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); 5109 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & 5110 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), 5111 50)) { 5112 DRM_ERROR("timed out waiting for CDclk change\n"); 5113 } 5114 mutex_unlock(&dev_priv->rps.hw_lock); 5115 5116 vlv_update_cdclk(dev); 5117 } 5118 5119 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 5120 int max_pixclk) 5121 { 5122 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; 5123 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; 5124 5125 /* 5126 * Really only a few cases to deal with, as only 4 CDclks are supported: 5127 * 200MHz 5128 * 267MHz 5129 * 320/333MHz (depends on HPLL freq) 5130 * 400MHz (VLV only) 5131 * So we check to see whether we're above 90% (VLV) or 95% (CHV) 5132 * of the lower bin and adjust if needed. 5133 * 5134 * We seem to get an unstable or solid color picture at 200MHz. 5135 * Not sure what's wrong. For now use 200MHz only when all pipes 5136 * are off. 5137 */ 5138 if (!IS_CHERRYVIEW(dev_priv) && 5139 max_pixclk > freq_320*limit/100) 5140 return 400000; 5141 else if (max_pixclk > 266667*limit/100) 5142 return freq_320; 5143 else if (max_pixclk > 0) 5144 return 266667; 5145 else 5146 return 200000; 5147 } 5148 5149 /* compute the max pixel clock for new configuration */ 5150 static int intel_mode_max_pixclk(struct drm_i915_private *dev_priv) 5151 { 5152 struct drm_device *dev = dev_priv->dev; 5153 struct intel_crtc *intel_crtc; 5154 int max_pixclk = 0; 5155 5156 for_each_intel_crtc(dev, intel_crtc) { 5157 if (intel_crtc->new_enabled) 5158 max_pixclk = max(max_pixclk, 5159 intel_crtc->new_config->base.adjusted_mode.crtc_clock); 5160 } 5161 5162 return max_pixclk; 5163 } 5164 5165 static void valleyview_modeset_global_pipes(struct drm_device *dev, 5166 unsigned *prepare_pipes) 5167 { 5168 struct drm_i915_private *dev_priv = dev->dev_private; 5169 struct intel_crtc *intel_crtc; 5170 int max_pixclk = intel_mode_max_pixclk(dev_priv); 5171 5172 if (valleyview_calc_cdclk(dev_priv, max_pixclk) == 5173 dev_priv->vlv_cdclk_freq) 5174 return; 5175 5176 /* disable/enable all currently active pipes while we change cdclk */ 5177 for_each_intel_crtc(dev, intel_crtc) 5178 if (intel_crtc->base.state->enable) 5179 *prepare_pipes |= (1 << intel_crtc->pipe); 5180 } 5181 5182 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) 5183 { 5184 unsigned int credits, default_credits; 5185 5186 if (IS_CHERRYVIEW(dev_priv)) 5187 default_credits = PFI_CREDIT(12); 5188 else 5189 default_credits = PFI_CREDIT(8); 5190 5191 if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { 5192 /* CHV suggested value is 31 or 63 */ 5193 if (IS_CHERRYVIEW(dev_priv)) 5194 credits = PFI_CREDIT_31; 5195 else 5196 credits = PFI_CREDIT(15); 5197 } else { 5198 credits = default_credits; 5199 } 5200 5201 /* 5202 * WA - write default credits before re-programming 5203 * FIXME: should we also set the resend bit here? 5204 */ 5205 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 5206 default_credits); 5207 5208 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | 5209 credits | PFI_CREDIT_RESEND); 5210 5211 /* 5212 * FIXME is this guaranteed to clear 5213 * immediately or should we poll for it? 5214 */ 5215 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); 5216 } 5217 5218 static void valleyview_modeset_global_resources(struct drm_atomic_state *state) 5219 { 5220 struct drm_device *dev = state->dev; 5221 struct drm_i915_private *dev_priv = dev->dev_private; 5222 int max_pixclk = intel_mode_max_pixclk(dev_priv); 5223 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 5224 5225 if (req_cdclk != dev_priv->vlv_cdclk_freq) { 5226 /* 5227 * FIXME: We can end up here with all power domains off, yet 5228 * with a CDCLK frequency other than the minimum. To account 5229 * for this take the PIPE-A power domain, which covers the HW 5230 * blocks needed for the following programming. This can be 5231 * removed once it's guaranteed that we get here either with 5232 * the minimum CDCLK set, or the required power domains 5233 * enabled. 5234 */ 5235 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); 5236 5237 if (IS_CHERRYVIEW(dev)) 5238 cherryview_set_cdclk(dev, req_cdclk); 5239 else 5240 valleyview_set_cdclk(dev, req_cdclk); 5241 5242 vlv_program_pfi_credits(dev_priv); 5243 5244 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); 5245 } 5246 } 5247 5248 static void valleyview_crtc_enable(struct drm_crtc *crtc) 5249 { 5250 struct drm_device *dev = crtc->dev; 5251 struct drm_i915_private *dev_priv = to_i915(dev); 5252 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5253 struct intel_encoder *encoder; 5254 int pipe = intel_crtc->pipe; 5255 bool is_dsi; 5256 5257 WARN_ON(!crtc->state->enable); 5258 5259 if (intel_crtc->active) 5260 return; 5261 5262 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 5263 5264 if (!is_dsi) { 5265 if (IS_CHERRYVIEW(dev)) 5266 chv_prepare_pll(intel_crtc, intel_crtc->config); 5267 else 5268 vlv_prepare_pll(intel_crtc, intel_crtc->config); 5269 } 5270 5271 if (intel_crtc->config->has_dp_encoder) 5272 intel_dp_set_m_n(intel_crtc, M1_N1); 5273 5274 intel_set_pipe_timings(intel_crtc); 5275 5276 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 5277 struct drm_i915_private *dev_priv = dev->dev_private; 5278 5279 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 5280 I915_WRITE(CHV_CANVAS(pipe), 0); 5281 } 5282 5283 i9xx_set_pipeconf(intel_crtc); 5284 5285 intel_crtc->active = true; 5286 5287 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5288 5289 for_each_encoder_on_crtc(dev, crtc, encoder) 5290 if (encoder->pre_pll_enable) 5291 encoder->pre_pll_enable(encoder); 5292 5293 if (!is_dsi) { 5294 if (IS_CHERRYVIEW(dev)) 5295 chv_enable_pll(intel_crtc, intel_crtc->config); 5296 else 5297 vlv_enable_pll(intel_crtc, intel_crtc->config); 5298 } 5299 5300 for_each_encoder_on_crtc(dev, crtc, encoder) 5301 if (encoder->pre_enable) 5302 encoder->pre_enable(encoder); 5303 5304 i9xx_pfit_enable(intel_crtc); 5305 5306 intel_crtc_load_lut(crtc); 5307 5308 intel_update_watermarks(crtc); 5309 intel_enable_pipe(intel_crtc); 5310 5311 assert_vblank_disabled(crtc); 5312 drm_crtc_vblank_on(crtc); 5313 5314 for_each_encoder_on_crtc(dev, crtc, encoder) 5315 encoder->enable(encoder); 5316 5317 intel_crtc_enable_planes(crtc); 5318 5319 /* Underruns don't raise interrupts, so check manually. */ 5320 i9xx_check_fifo_underruns(dev_priv); 5321 } 5322 5323 static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5324 { 5325 struct drm_device *dev = crtc->base.dev; 5326 struct drm_i915_private *dev_priv = dev->dev_private; 5327 5328 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 5329 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 5330 } 5331 5332 static void i9xx_crtc_enable(struct drm_crtc *crtc) 5333 { 5334 struct drm_device *dev = crtc->dev; 5335 struct drm_i915_private *dev_priv = to_i915(dev); 5336 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5337 struct intel_encoder *encoder; 5338 int pipe = intel_crtc->pipe; 5339 5340 WARN_ON(!crtc->state->enable); 5341 5342 if (intel_crtc->active) 5343 return; 5344 5345 i9xx_set_pll_dividers(intel_crtc); 5346 5347 if (intel_crtc->config->has_dp_encoder) 5348 intel_dp_set_m_n(intel_crtc, M1_N1); 5349 5350 intel_set_pipe_timings(intel_crtc); 5351 5352 i9xx_set_pipeconf(intel_crtc); 5353 5354 intel_crtc->active = true; 5355 5356 if (!IS_GEN2(dev)) 5357 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5358 5359 for_each_encoder_on_crtc(dev, crtc, encoder) 5360 if (encoder->pre_enable) 5361 encoder->pre_enable(encoder); 5362 5363 i9xx_enable_pll(intel_crtc); 5364 5365 i9xx_pfit_enable(intel_crtc); 5366 5367 intel_crtc_load_lut(crtc); 5368 5369 intel_update_watermarks(crtc); 5370 intel_enable_pipe(intel_crtc); 5371 5372 assert_vblank_disabled(crtc); 5373 drm_crtc_vblank_on(crtc); 5374 5375 for_each_encoder_on_crtc(dev, crtc, encoder) 5376 encoder->enable(encoder); 5377 5378 intel_crtc_enable_planes(crtc); 5379 5380 /* 5381 * Gen2 reports pipe underruns whenever all planes are disabled. 5382 * So don't enable underrun reporting before at least some planes 5383 * are enabled. 5384 * FIXME: Need to fix the logic to work when we turn off all planes 5385 * but leave the pipe running. 5386 */ 5387 if (IS_GEN2(dev)) 5388 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 5389 5390 /* Underruns don't raise interrupts, so check manually. */ 5391 i9xx_check_fifo_underruns(dev_priv); 5392 } 5393 5394 static void i9xx_pfit_disable(struct intel_crtc *crtc) 5395 { 5396 struct drm_device *dev = crtc->base.dev; 5397 struct drm_i915_private *dev_priv = dev->dev_private; 5398 5399 if (!crtc->config->gmch_pfit.control) 5400 return; 5401 5402 assert_pipe_disabled(dev_priv, crtc->pipe); 5403 5404 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n", 5405 I915_READ(PFIT_CONTROL)); 5406 I915_WRITE(PFIT_CONTROL, 0); 5407 } 5408 5409 static void i9xx_crtc_disable(struct drm_crtc *crtc) 5410 { 5411 struct drm_device *dev = crtc->dev; 5412 struct drm_i915_private *dev_priv = dev->dev_private; 5413 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5414 struct intel_encoder *encoder; 5415 int pipe = intel_crtc->pipe; 5416 5417 if (!intel_crtc->active) 5418 return; 5419 5420 /* 5421 * Gen2 reports pipe underruns whenever all planes are disabled. 5422 * So diasble underrun reporting before all the planes get disabled. 5423 * FIXME: Need to fix the logic to work when we turn off all planes 5424 * but leave the pipe running. 5425 */ 5426 if (IS_GEN2(dev)) 5427 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5428 5429 /* 5430 * Vblank time updates from the shadow to live plane control register 5431 * are blocked if the memory self-refresh mode is active at that 5432 * moment. So to make sure the plane gets truly disabled, disable 5433 * first the self-refresh mode. The self-refresh enable bit in turn 5434 * will be checked/applied by the HW only at the next frame start 5435 * event which is after the vblank start event, so we need to have a 5436 * wait-for-vblank between disabling the plane and the pipe. 5437 */ 5438 intel_set_memory_cxsr(dev_priv, false); 5439 intel_crtc_disable_planes(crtc); 5440 5441 /* 5442 * On gen2 planes are double buffered but the pipe isn't, so we must 5443 * wait for planes to fully turn off before disabling the pipe. 5444 * We also need to wait on all gmch platforms because of the 5445 * self-refresh mode constraint explained above. 5446 */ 5447 intel_wait_for_vblank(dev, pipe); 5448 5449 for_each_encoder_on_crtc(dev, crtc, encoder) 5450 encoder->disable(encoder); 5451 5452 drm_crtc_vblank_off(crtc); 5453 assert_vblank_disabled(crtc); 5454 5455 intel_disable_pipe(intel_crtc); 5456 5457 i9xx_pfit_disable(intel_crtc); 5458 5459 for_each_encoder_on_crtc(dev, crtc, encoder) 5460 if (encoder->post_disable) 5461 encoder->post_disable(encoder); 5462 5463 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) { 5464 if (IS_CHERRYVIEW(dev)) 5465 chv_disable_pll(dev_priv, pipe); 5466 else if (IS_VALLEYVIEW(dev)) 5467 vlv_disable_pll(dev_priv, pipe); 5468 else 5469 i9xx_disable_pll(intel_crtc); 5470 } 5471 5472 if (!IS_GEN2(dev)) 5473 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 5474 5475 intel_crtc->active = false; 5476 intel_update_watermarks(crtc); 5477 5478 mutex_lock(&dev->struct_mutex); 5479 intel_fbc_update(dev); 5480 mutex_unlock(&dev->struct_mutex); 5481 } 5482 5483 static void i9xx_crtc_off(struct drm_crtc *crtc) 5484 { 5485 } 5486 5487 /* Master function to enable/disable CRTC and corresponding power wells */ 5488 void intel_crtc_control(struct drm_crtc *crtc, bool enable) 5489 { 5490 struct drm_device *dev = crtc->dev; 5491 struct drm_i915_private *dev_priv = dev->dev_private; 5492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5493 enum intel_display_power_domain domain; 5494 unsigned long domains; 5495 5496 if (enable) { 5497 if (!intel_crtc->active) { 5498 domains = get_crtc_power_domains(crtc); 5499 for_each_power_domain(domain, domains) 5500 intel_display_power_get(dev_priv, domain); 5501 intel_crtc->enabled_power_domains = domains; 5502 5503 dev_priv->display.crtc_enable(crtc); 5504 } 5505 } else { 5506 if (intel_crtc->active) { 5507 dev_priv->display.crtc_disable(crtc); 5508 5509 domains = intel_crtc->enabled_power_domains; 5510 for_each_power_domain(domain, domains) 5511 intel_display_power_put(dev_priv, domain); 5512 intel_crtc->enabled_power_domains = 0; 5513 } 5514 } 5515 5516 update_state_fb(intel_crtc->base.primary); 5517 } 5518 5519 /** 5520 * Sets the power management mode of the pipe and plane. 5521 */ 5522 void intel_crtc_update_dpms(struct drm_crtc *crtc) 5523 { 5524 struct drm_device *dev = crtc->dev; 5525 struct intel_encoder *intel_encoder; 5526 bool enable = false; 5527 5528 for_each_encoder_on_crtc(dev, crtc, intel_encoder) 5529 enable |= intel_encoder->connectors_active; 5530 5531 intel_crtc_control(crtc, enable); 5532 } 5533 5534 static void intel_crtc_disable(struct drm_crtc *crtc) 5535 { 5536 struct drm_device *dev = crtc->dev; 5537 struct drm_connector *connector; 5538 struct drm_i915_private *dev_priv = dev->dev_private; 5539 5540 /* crtc should still be enabled when we disable it. */ 5541 WARN_ON(!crtc->state->enable); 5542 5543 dev_priv->display.crtc_disable(crtc); 5544 dev_priv->display.off(crtc); 5545 5546 crtc->primary->funcs->disable_plane(crtc->primary); 5547 5548 /* Update computed state. */ 5549 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 5550 if (!connector->encoder || !connector->encoder->crtc) 5551 continue; 5552 5553 if (connector->encoder->crtc != crtc) 5554 continue; 5555 5556 connector->dpms = DRM_MODE_DPMS_OFF; 5557 to_intel_encoder(connector->encoder)->connectors_active = false; 5558 } 5559 } 5560 5561 void intel_encoder_destroy(struct drm_encoder *encoder) 5562 { 5563 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 5564 5565 drm_encoder_cleanup(encoder); 5566 kfree(intel_encoder); 5567 } 5568 5569 /* Simple dpms helper for encoders with just one connector, no cloning and only 5570 * one kind of off state. It clamps all !ON modes to fully OFF and changes the 5571 * state of the entire output pipe. */ 5572 static void intel_encoder_dpms(struct intel_encoder *encoder, int mode) 5573 { 5574 if (mode == DRM_MODE_DPMS_ON) { 5575 encoder->connectors_active = true; 5576 5577 intel_crtc_update_dpms(encoder->base.crtc); 5578 } else { 5579 encoder->connectors_active = false; 5580 5581 intel_crtc_update_dpms(encoder->base.crtc); 5582 } 5583 } 5584 5585 /* Cross check the actual hw state with our own modeset state tracking (and it's 5586 * internal consistency). */ 5587 static void intel_connector_check_state(struct intel_connector *connector) 5588 { 5589 if (connector->get_hw_state(connector)) { 5590 struct intel_encoder *encoder = connector->encoder; 5591 struct drm_crtc *crtc; 5592 bool encoder_enabled; 5593 enum i915_pipe pipe; 5594 5595 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 5596 connector->base.base.id, 5597 connector->base.name); 5598 5599 /* there is no real hw state for MST connectors */ 5600 if (connector->mst_port) 5601 return; 5602 5603 I915_STATE_WARN(connector->base.dpms == DRM_MODE_DPMS_OFF, 5604 "wrong connector dpms state\n"); 5605 I915_STATE_WARN(connector->base.encoder != &encoder->base, 5606 "active connector not linked to encoder\n"); 5607 5608 if (encoder) { 5609 I915_STATE_WARN(!encoder->connectors_active, 5610 "encoder->connectors_active not set\n"); 5611 5612 encoder_enabled = encoder->get_hw_state(encoder, &pipe); 5613 I915_STATE_WARN(!encoder_enabled, "encoder not enabled\n"); 5614 if (I915_STATE_WARN_ON(!encoder->base.crtc)) 5615 return; 5616 5617 crtc = encoder->base.crtc; 5618 5619 I915_STATE_WARN(!crtc->state->enable, 5620 "crtc not enabled\n"); 5621 I915_STATE_WARN(!to_intel_crtc(crtc)->active, "crtc not active\n"); 5622 I915_STATE_WARN(pipe != to_intel_crtc(crtc)->pipe, 5623 "encoder active on the wrong pipe\n"); 5624 } 5625 } 5626 } 5627 5628 int intel_connector_init(struct intel_connector *connector) 5629 { 5630 struct drm_connector_state *connector_state; 5631 5632 connector_state = kzalloc(sizeof *connector_state, GFP_KERNEL); 5633 if (!connector_state) 5634 return -ENOMEM; 5635 5636 connector->base.state = connector_state; 5637 return 0; 5638 } 5639 5640 struct intel_connector *intel_connector_alloc(void) 5641 { 5642 struct intel_connector *connector; 5643 5644 connector = kzalloc(sizeof *connector, GFP_KERNEL); 5645 if (!connector) 5646 return NULL; 5647 5648 if (intel_connector_init(connector) < 0) { 5649 kfree(connector); 5650 return NULL; 5651 } 5652 5653 return connector; 5654 } 5655 5656 /* Even simpler default implementation, if there's really no special case to 5657 * consider. */ 5658 void intel_connector_dpms(struct drm_connector *connector, int mode) 5659 { 5660 /* All the simple cases only support two dpms states. */ 5661 if (mode != DRM_MODE_DPMS_ON) 5662 mode = DRM_MODE_DPMS_OFF; 5663 5664 if (mode == connector->dpms) 5665 return; 5666 5667 connector->dpms = mode; 5668 5669 /* Only need to change hw state when actually enabled */ 5670 if (connector->encoder) 5671 intel_encoder_dpms(to_intel_encoder(connector->encoder), mode); 5672 5673 intel_modeset_check_state(connector->dev); 5674 } 5675 5676 /* Simple connector->get_hw_state implementation for encoders that support only 5677 * one connector and no cloning and hence the encoder state determines the state 5678 * of the connector. */ 5679 bool intel_connector_get_hw_state(struct intel_connector *connector) 5680 { 5681 enum i915_pipe pipe = 0; 5682 struct intel_encoder *encoder = connector->encoder; 5683 5684 return encoder->get_hw_state(encoder, &pipe); 5685 } 5686 5687 static int pipe_required_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe) 5688 { 5689 struct intel_crtc *crtc = 5690 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 5691 5692 if (crtc->base.state->enable && 5693 crtc->config->has_pch_encoder) 5694 return crtc->config->fdi_lanes; 5695 5696 return 0; 5697 } 5698 5699 static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum i915_pipe pipe, 5700 struct intel_crtc_state *pipe_config) 5701 { 5702 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", 5703 pipe_name(pipe), pipe_config->fdi_lanes); 5704 if (pipe_config->fdi_lanes > 4) { 5705 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n", 5706 pipe_name(pipe), pipe_config->fdi_lanes); 5707 return false; 5708 } 5709 5710 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 5711 if (pipe_config->fdi_lanes > 2) { 5712 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n", 5713 pipe_config->fdi_lanes); 5714 return false; 5715 } else { 5716 return true; 5717 } 5718 } 5719 5720 if (INTEL_INFO(dev)->num_pipes == 2) 5721 return true; 5722 5723 /* Ivybridge 3 pipe is really complicated */ 5724 switch (pipe) { 5725 case PIPE_A: 5726 return true; 5727 case PIPE_B: 5728 if (pipe_config->fdi_lanes > 2 && 5729 pipe_required_fdi_lanes(dev, PIPE_C) > 0) { 5730 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", 5731 pipe_name(pipe), pipe_config->fdi_lanes); 5732 return false; 5733 } 5734 return true; 5735 case PIPE_C: 5736 if (pipe_config->fdi_lanes > 2) { 5737 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", 5738 pipe_name(pipe), pipe_config->fdi_lanes); 5739 return false; 5740 } 5741 if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) { 5742 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); 5743 return false; 5744 } 5745 return true; 5746 default: 5747 BUG(); 5748 } 5749 } 5750 5751 #define RETRY 1 5752 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc, 5753 struct intel_crtc_state *pipe_config) 5754 { 5755 struct drm_device *dev = intel_crtc->base.dev; 5756 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 5757 int lane, link_bw, fdi_dotclock; 5758 bool setup_ok, needs_recompute = false; 5759 5760 retry: 5761 /* FDI is a binary signal running at ~2.7GHz, encoding 5762 * each output octet as 10 bits. The actual frequency 5763 * is stored as a divider into a 100MHz clock, and the 5764 * mode pixel clock is stored in units of 1KHz. 5765 * Hence the bw of each lane in terms of the mode signal 5766 * is: 5767 */ 5768 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; 5769 5770 fdi_dotclock = adjusted_mode->crtc_clock; 5771 5772 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw, 5773 pipe_config->pipe_bpp); 5774 5775 pipe_config->fdi_lanes = lane; 5776 5777 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 5778 link_bw, &pipe_config->fdi_m_n); 5779 5780 setup_ok = ironlake_check_fdi_lanes(intel_crtc->base.dev, 5781 intel_crtc->pipe, pipe_config); 5782 if (!setup_ok && pipe_config->pipe_bpp > 6*3) { 5783 pipe_config->pipe_bpp -= 2*3; 5784 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n", 5785 pipe_config->pipe_bpp); 5786 needs_recompute = true; 5787 pipe_config->bw_constrained = true; 5788 5789 goto retry; 5790 } 5791 5792 if (needs_recompute) 5793 return RETRY; 5794 5795 return setup_ok ? 0 : -EINVAL; 5796 } 5797 5798 static void hsw_compute_ips_config(struct intel_crtc *crtc, 5799 struct intel_crtc_state *pipe_config) 5800 { 5801 pipe_config->ips_enabled = i915.enable_ips && 5802 hsw_crtc_supports_ips(crtc) && 5803 pipe_config->pipe_bpp <= 24; 5804 } 5805 5806 static int intel_crtc_compute_config(struct intel_crtc *crtc, 5807 struct intel_crtc_state *pipe_config) 5808 { 5809 struct drm_device *dev = crtc->base.dev; 5810 struct drm_i915_private *dev_priv = dev->dev_private; 5811 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 5812 5813 /* FIXME should check pixel clock limits on all platforms */ 5814 if (INTEL_INFO(dev)->gen < 4) { 5815 int clock_limit = 5816 dev_priv->display.get_display_clock_speed(dev); 5817 5818 /* 5819 * Enable pixel doubling when the dot clock 5820 * is > 90% of the (display) core speed. 5821 * 5822 * GDG double wide on either pipe, 5823 * otherwise pipe A only. 5824 */ 5825 if ((crtc->pipe == PIPE_A || IS_I915G(dev)) && 5826 adjusted_mode->crtc_clock > clock_limit * 9 / 10) { 5827 clock_limit *= 2; 5828 pipe_config->double_wide = true; 5829 } 5830 5831 if (adjusted_mode->crtc_clock > clock_limit * 9 / 10) 5832 return -EINVAL; 5833 } 5834 5835 /* 5836 * Pipe horizontal size must be even in: 5837 * - DVO ganged mode 5838 * - LVDS dual channel mode 5839 * - Double wide pipe 5840 */ 5841 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 5842 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5843 pipe_config->pipe_src_w &= ~1; 5844 5845 /* Cantiga+ cannot handle modes with a hsync front porch of 0. 5846 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 5847 */ 5848 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 5849 adjusted_mode->hsync_start == adjusted_mode->hdisplay) 5850 return -EINVAL; 5851 5852 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && pipe_config->pipe_bpp > 10*3) { 5853 pipe_config->pipe_bpp = 10*3; /* 12bpc is gen5+ */ 5854 } else if (INTEL_INFO(dev)->gen <= 4 && pipe_config->pipe_bpp > 8*3) { 5855 /* only a 8bpc pipe, with 6bpc dither through the panel fitter 5856 * for lvds. */ 5857 pipe_config->pipe_bpp = 8*3; 5858 } 5859 5860 if (HAS_IPS(dev)) 5861 hsw_compute_ips_config(crtc, pipe_config); 5862 5863 if (pipe_config->has_pch_encoder) 5864 return ironlake_fdi_compute_config(crtc, pipe_config); 5865 5866 return 0; 5867 } 5868 5869 static int valleyview_get_display_clock_speed(struct drm_device *dev) 5870 { 5871 struct drm_i915_private *dev_priv = dev->dev_private; 5872 u32 val; 5873 int divider; 5874 5875 if (dev_priv->hpll_freq == 0) 5876 dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 5877 5878 mutex_lock(&dev_priv->dpio_lock); 5879 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5880 mutex_unlock(&dev_priv->dpio_lock); 5881 5882 divider = val & DISPLAY_FREQUENCY_VALUES; 5883 5884 WARN((val & DISPLAY_FREQUENCY_STATUS) != 5885 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5886 "cdclk change in progress\n"); 5887 5888 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 5889 } 5890 5891 static int i945_get_display_clock_speed(struct drm_device *dev) 5892 { 5893 return 400000; 5894 } 5895 5896 static int i915_get_display_clock_speed(struct drm_device *dev) 5897 { 5898 return 333000; 5899 } 5900 5901 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) 5902 { 5903 return 200000; 5904 } 5905 5906 static int pnv_get_display_clock_speed(struct drm_device *dev) 5907 { 5908 u16 gcfgc = 0; 5909 5910 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5911 5912 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5913 case GC_DISPLAY_CLOCK_267_MHZ_PNV: 5914 return 267000; 5915 case GC_DISPLAY_CLOCK_333_MHZ_PNV: 5916 return 333000; 5917 case GC_DISPLAY_CLOCK_444_MHZ_PNV: 5918 return 444000; 5919 case GC_DISPLAY_CLOCK_200_MHZ_PNV: 5920 return 200000; 5921 default: 5922 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc); 5923 case GC_DISPLAY_CLOCK_133_MHZ_PNV: 5924 return 133000; 5925 case GC_DISPLAY_CLOCK_167_MHZ_PNV: 5926 return 167000; 5927 } 5928 } 5929 5930 static int i915gm_get_display_clock_speed(struct drm_device *dev) 5931 { 5932 u16 gcfgc = 0; 5933 5934 pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 5935 5936 if (gcfgc & GC_LOW_FREQUENCY_ENABLE) 5937 return 133000; 5938 else { 5939 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { 5940 case GC_DISPLAY_CLOCK_333_MHZ: 5941 return 333000; 5942 default: 5943 case GC_DISPLAY_CLOCK_190_200_MHZ: 5944 return 190000; 5945 } 5946 } 5947 } 5948 5949 static int i865_get_display_clock_speed(struct drm_device *dev) 5950 { 5951 return 266000; 5952 } 5953 5954 static int i855_get_display_clock_speed(struct drm_device *dev) 5955 { 5956 u16 hpllcc = 0; 5957 /* Assume that the hardware is in the high speed state. This 5958 * should be the default. 5959 */ 5960 switch (hpllcc & GC_CLOCK_CONTROL_MASK) { 5961 case GC_CLOCK_133_200: 5962 case GC_CLOCK_100_200: 5963 return 200000; 5964 case GC_CLOCK_166_250: 5965 return 250000; 5966 case GC_CLOCK_100_133: 5967 return 133000; 5968 } 5969 5970 /* Shouldn't happen */ 5971 return 0; 5972 } 5973 5974 static int i830_get_display_clock_speed(struct drm_device *dev) 5975 { 5976 return 133000; 5977 } 5978 5979 static void 5980 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) 5981 { 5982 while (*num > DATA_LINK_M_N_MASK || 5983 *den > DATA_LINK_M_N_MASK) { 5984 *num >>= 1; 5985 *den >>= 1; 5986 } 5987 } 5988 5989 static void compute_m_n(unsigned int m, unsigned int n, 5990 uint32_t *ret_m, uint32_t *ret_n) 5991 { 5992 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); 5993 *ret_m = div_u64((uint64_t) m * *ret_n, n); 5994 intel_reduce_m_n_ratio(ret_m, ret_n); 5995 } 5996 5997 void 5998 intel_link_compute_m_n(int bits_per_pixel, int nlanes, 5999 int pixel_clock, int link_clock, 6000 struct intel_link_m_n *m_n) 6001 { 6002 m_n->tu = 64; 6003 6004 compute_m_n(bits_per_pixel * pixel_clock, 6005 link_clock * nlanes * 8, 6006 &m_n->gmch_m, &m_n->gmch_n); 6007 6008 compute_m_n(pixel_clock, link_clock, 6009 &m_n->link_m, &m_n->link_n); 6010 } 6011 6012 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 6013 { 6014 if (i915.panel_use_ssc >= 0) 6015 return i915.panel_use_ssc != 0; 6016 return dev_priv->vbt.lvds_use_ssc 6017 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 6018 } 6019 6020 static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, 6021 int num_connectors) 6022 { 6023 struct drm_device *dev = crtc_state->base.crtc->dev; 6024 struct drm_i915_private *dev_priv = dev->dev_private; 6025 int refclk; 6026 6027 WARN_ON(!crtc_state->base.state); 6028 6029 if (IS_VALLEYVIEW(dev)) { 6030 refclk = 100000; 6031 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6032 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 6033 refclk = dev_priv->vbt.lvds_ssc_freq; 6034 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 6035 } else if (!IS_GEN2(dev)) { 6036 refclk = 96000; 6037 } else { 6038 refclk = 48000; 6039 } 6040 6041 return refclk; 6042 } 6043 6044 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll) 6045 { 6046 return (1 << dpll->n) << 16 | dpll->m2; 6047 } 6048 6049 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll) 6050 { 6051 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2; 6052 } 6053 6054 static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 6055 struct intel_crtc_state *crtc_state, 6056 intel_clock_t *reduced_clock) 6057 { 6058 struct drm_device *dev = crtc->base.dev; 6059 u32 fp, fp2 = 0; 6060 6061 if (IS_PINEVIEW(dev)) { 6062 fp = pnv_dpll_compute_fp(&crtc_state->dpll); 6063 if (reduced_clock) 6064 fp2 = pnv_dpll_compute_fp(reduced_clock); 6065 } else { 6066 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 6067 if (reduced_clock) 6068 fp2 = i9xx_dpll_compute_fp(reduced_clock); 6069 } 6070 6071 crtc_state->dpll_hw_state.fp0 = fp; 6072 6073 crtc->lowfreq_avail = false; 6074 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6075 reduced_clock) { 6076 crtc_state->dpll_hw_state.fp1 = fp2; 6077 crtc->lowfreq_avail = true; 6078 } else { 6079 crtc_state->dpll_hw_state.fp1 = fp; 6080 } 6081 } 6082 6083 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum i915_pipe 6084 pipe) 6085 { 6086 u32 reg_val; 6087 6088 /* 6089 * PLLB opamp always calibrates to max value of 0x3f, force enable it 6090 * and set it to a reasonable value instead. 6091 */ 6092 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6093 reg_val &= 0xffffff00; 6094 reg_val |= 0x00000030; 6095 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6096 6097 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6098 reg_val &= 0x8cffffff; 6099 reg_val = 0x8c000000; 6100 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6101 6102 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1)); 6103 reg_val &= 0xffffff00; 6104 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val); 6105 6106 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13); 6107 reg_val &= 0x00ffffff; 6108 reg_val |= 0xb0000000; 6109 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val); 6110 } 6111 6112 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, 6113 struct intel_link_m_n *m_n) 6114 { 6115 struct drm_device *dev = crtc->base.dev; 6116 struct drm_i915_private *dev_priv = dev->dev_private; 6117 int pipe = crtc->pipe; 6118 6119 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6120 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); 6121 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m); 6122 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n); 6123 } 6124 6125 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc, 6126 struct intel_link_m_n *m_n, 6127 struct intel_link_m_n *m2_n2) 6128 { 6129 struct drm_device *dev = crtc->base.dev; 6130 struct drm_i915_private *dev_priv = dev->dev_private; 6131 int pipe = crtc->pipe; 6132 enum transcoder transcoder = crtc->config->cpu_transcoder; 6133 6134 if (INTEL_INFO(dev)->gen >= 5) { 6135 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m); 6136 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n); 6137 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m); 6138 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n); 6139 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available 6140 * for gen < 8) and if DRRS is supported (to make sure the 6141 * registers are not unnecessarily accessed). 6142 */ 6143 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) && 6144 crtc->config->has_drrs) { 6145 I915_WRITE(PIPE_DATA_M2(transcoder), 6146 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); 6147 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n); 6148 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m); 6149 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n); 6150 } 6151 } else { 6152 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 6153 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n); 6154 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m); 6155 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n); 6156 } 6157 } 6158 6159 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n) 6160 { 6161 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; 6162 6163 if (m_n == M1_N1) { 6164 dp_m_n = &crtc->config->dp_m_n; 6165 dp_m2_n2 = &crtc->config->dp_m2_n2; 6166 } else if (m_n == M2_N2) { 6167 6168 /* 6169 * M2_N2 registers are not supported. Hence m2_n2 divider value 6170 * needs to be programmed into M1_N1. 6171 */ 6172 dp_m_n = &crtc->config->dp_m2_n2; 6173 } else { 6174 DRM_ERROR("Unsupported divider value\n"); 6175 return; 6176 } 6177 6178 if (crtc->config->has_pch_encoder) 6179 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n); 6180 else 6181 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2); 6182 } 6183 6184 static void vlv_update_pll(struct intel_crtc *crtc, 6185 struct intel_crtc_state *pipe_config) 6186 { 6187 u32 dpll, dpll_md; 6188 6189 /* 6190 * Enable DPIO clock input. We should never disable the reference 6191 * clock for pipe B, since VGA hotplug / manual detection depends 6192 * on it. 6193 */ 6194 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV | 6195 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV; 6196 /* We should never disable this, set it here for state tracking */ 6197 if (crtc->pipe == PIPE_B) 6198 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6199 dpll |= DPLL_VCO_ENABLE; 6200 pipe_config->dpll_hw_state.dpll = dpll; 6201 6202 dpll_md = (pipe_config->pixel_multiplier - 1) 6203 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6204 pipe_config->dpll_hw_state.dpll_md = dpll_md; 6205 } 6206 6207 static void vlv_prepare_pll(struct intel_crtc *crtc, 6208 const struct intel_crtc_state *pipe_config) 6209 { 6210 struct drm_device *dev = crtc->base.dev; 6211 struct drm_i915_private *dev_priv = dev->dev_private; 6212 int pipe = crtc->pipe; 6213 u32 mdiv; 6214 u32 bestn, bestm1, bestm2, bestp1, bestp2; 6215 u32 coreclk, reg_val; 6216 6217 mutex_lock(&dev_priv->dpio_lock); 6218 6219 bestn = pipe_config->dpll.n; 6220 bestm1 = pipe_config->dpll.m1; 6221 bestm2 = pipe_config->dpll.m2; 6222 bestp1 = pipe_config->dpll.p1; 6223 bestp2 = pipe_config->dpll.p2; 6224 6225 /* See eDP HDMI DPIO driver vbios notes doc */ 6226 6227 /* PLL B needs special handling */ 6228 if (pipe == PIPE_B) 6229 vlv_pllb_recal_opamp(dev_priv, pipe); 6230 6231 /* Set up Tx target for periodic Rcomp update */ 6232 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f); 6233 6234 /* Disable target IRef on PLL */ 6235 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe)); 6236 reg_val &= 0x00ffffff; 6237 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val); 6238 6239 /* Disable fast lock */ 6240 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610); 6241 6242 /* Set idtafcrecal before PLL is enabled */ 6243 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK)); 6244 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT)); 6245 mdiv |= ((bestn << DPIO_N_SHIFT)); 6246 mdiv |= (1 << DPIO_K_SHIFT); 6247 6248 /* 6249 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS, 6250 * but we don't support that). 6251 * Note: don't use the DAC post divider as it seems unstable. 6252 */ 6253 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT); 6254 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6255 6256 mdiv |= DPIO_ENABLE_CALIBRATION; 6257 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 6258 6259 /* Set HBR and RBR LPF coefficients */ 6260 if (pipe_config->port_clock == 162000 || 6261 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 6262 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 6263 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6264 0x009f0003); 6265 else 6266 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 6267 0x00d0000f); 6268 6269 if (pipe_config->has_dp_encoder) { 6270 /* Use SSC source */ 6271 if (pipe == PIPE_A) 6272 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6273 0x0df40000); 6274 else 6275 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6276 0x0df70000); 6277 } else { /* HDMI or VGA */ 6278 /* Use bend source */ 6279 if (pipe == PIPE_A) 6280 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6281 0x0df70000); 6282 else 6283 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6284 0x0df40000); 6285 } 6286 6287 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6288 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6289 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 6290 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 6291 coreclk |= 0x01000000; 6292 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6293 6294 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000); 6295 mutex_unlock(&dev_priv->dpio_lock); 6296 } 6297 6298 static void chv_update_pll(struct intel_crtc *crtc, 6299 struct intel_crtc_state *pipe_config) 6300 { 6301 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 6302 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 6303 DPLL_VCO_ENABLE; 6304 if (crtc->pipe != PIPE_A) 6305 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6306 6307 pipe_config->dpll_hw_state.dpll_md = 6308 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6309 } 6310 6311 static void chv_prepare_pll(struct intel_crtc *crtc, 6312 const struct intel_crtc_state *pipe_config) 6313 { 6314 struct drm_device *dev = crtc->base.dev; 6315 struct drm_i915_private *dev_priv = dev->dev_private; 6316 int pipe = crtc->pipe; 6317 int dpll_reg = DPLL(crtc->pipe); 6318 enum dpio_channel port = vlv_pipe_to_channel(pipe); 6319 u32 loopfilter, tribuf_calcntr; 6320 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6321 u32 dpio_val; 6322 int vco; 6323 6324 bestn = pipe_config->dpll.n; 6325 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; 6326 bestm1 = pipe_config->dpll.m1; 6327 bestm2 = pipe_config->dpll.m2 >> 22; 6328 bestp1 = pipe_config->dpll.p1; 6329 bestp2 = pipe_config->dpll.p2; 6330 vco = pipe_config->dpll.vco; 6331 dpio_val = 0; 6332 loopfilter = 0; 6333 6334 /* 6335 * Enable Refclk and SSC 6336 */ 6337 I915_WRITE(dpll_reg, 6338 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6339 6340 mutex_lock(&dev_priv->dpio_lock); 6341 6342 /* p1 and p2 divider */ 6343 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port), 6344 5 << DPIO_CHV_S1_DIV_SHIFT | 6345 bestp1 << DPIO_CHV_P1_DIV_SHIFT | 6346 bestp2 << DPIO_CHV_P2_DIV_SHIFT | 6347 1 << DPIO_CHV_K_DIV_SHIFT); 6348 6349 /* Feedback post-divider - m2 */ 6350 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2); 6351 6352 /* Feedback refclk divider - n and m1 */ 6353 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port), 6354 DPIO_CHV_M1_DIV_BY_2 | 6355 1 << DPIO_CHV_N_DIV_SHIFT); 6356 6357 /* M2 fraction division */ 6358 if (bestm2_frac) 6359 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); 6360 6361 /* M2 fraction division enable */ 6362 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); 6363 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); 6364 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); 6365 if (bestm2_frac) 6366 dpio_val |= DPIO_CHV_FRAC_DIV_EN; 6367 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); 6368 6369 /* Program digital lock detect threshold */ 6370 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); 6371 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | 6372 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); 6373 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); 6374 if (!bestm2_frac) 6375 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; 6376 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); 6377 6378 /* Loop filter */ 6379 if (vco == 5400000) { 6380 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); 6381 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); 6382 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); 6383 tribuf_calcntr = 0x9; 6384 } else if (vco <= 6200000) { 6385 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); 6386 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); 6387 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6388 tribuf_calcntr = 0x9; 6389 } else if (vco <= 6480000) { 6390 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6391 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6392 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6393 tribuf_calcntr = 0x8; 6394 } else { 6395 /* Not supported. Apply the same limits as in the max case */ 6396 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); 6397 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); 6398 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); 6399 tribuf_calcntr = 0; 6400 } 6401 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); 6402 6403 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); 6404 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; 6405 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); 6406 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); 6407 6408 /* AFC Recal */ 6409 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), 6410 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | 6411 DPIO_AFC_RECAL); 6412 6413 mutex_unlock(&dev_priv->dpio_lock); 6414 } 6415 6416 /** 6417 * vlv_force_pll_on - forcibly enable just the PLL 6418 * @dev_priv: i915 private structure 6419 * @pipe: pipe PLL to enable 6420 * @dpll: PLL configuration 6421 * 6422 * Enable the PLL for @pipe using the supplied @dpll config. To be used 6423 * in cases where we need the PLL enabled even when @pipe is not going to 6424 * be enabled. 6425 */ 6426 void vlv_force_pll_on(struct drm_device *dev, enum i915_pipe pipe, 6427 const struct dpll *dpll) 6428 { 6429 struct intel_crtc *crtc = 6430 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 6431 struct intel_crtc_state pipe_config = { 6432 .base.crtc = &crtc->base, 6433 .pixel_multiplier = 1, 6434 .dpll = *dpll, 6435 }; 6436 6437 if (IS_CHERRYVIEW(dev)) { 6438 chv_update_pll(crtc, &pipe_config); 6439 chv_prepare_pll(crtc, &pipe_config); 6440 chv_enable_pll(crtc, &pipe_config); 6441 } else { 6442 vlv_update_pll(crtc, &pipe_config); 6443 vlv_prepare_pll(crtc, &pipe_config); 6444 vlv_enable_pll(crtc, &pipe_config); 6445 } 6446 } 6447 6448 /** 6449 * vlv_force_pll_off - forcibly disable just the PLL 6450 * @dev_priv: i915 private structure 6451 * @pipe: pipe PLL to disable 6452 * 6453 * Disable the PLL for @pipe. To be used in cases where we need 6454 * the PLL enabled even when @pipe is not going to be enabled. 6455 */ 6456 void vlv_force_pll_off(struct drm_device *dev, enum i915_pipe pipe) 6457 { 6458 if (IS_CHERRYVIEW(dev)) 6459 chv_disable_pll(to_i915(dev), pipe); 6460 else 6461 vlv_disable_pll(to_i915(dev), pipe); 6462 } 6463 6464 static void i9xx_update_pll(struct intel_crtc *crtc, 6465 struct intel_crtc_state *crtc_state, 6466 intel_clock_t *reduced_clock, 6467 int num_connectors) 6468 { 6469 struct drm_device *dev = crtc->base.dev; 6470 struct drm_i915_private *dev_priv = dev->dev_private; 6471 u32 dpll; 6472 bool is_sdvo; 6473 struct dpll *clock = &crtc_state->dpll; 6474 6475 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6476 6477 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || 6478 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); 6479 6480 dpll = DPLL_VGA_MODE_DIS; 6481 6482 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 6483 dpll |= DPLLB_MODE_LVDS; 6484 else 6485 dpll |= DPLLB_MODE_DAC_SERIAL; 6486 6487 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6488 dpll |= (crtc_state->pixel_multiplier - 1) 6489 << SDVO_MULTIPLIER_SHIFT_HIRES; 6490 } 6491 6492 if (is_sdvo) 6493 dpll |= DPLL_SDVO_HIGH_SPEED; 6494 6495 if (crtc_state->has_dp_encoder) 6496 dpll |= DPLL_SDVO_HIGH_SPEED; 6497 6498 /* compute bitmask from p1 value */ 6499 if (IS_PINEVIEW(dev)) 6500 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; 6501 else { 6502 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6503 if (IS_G4X(dev) && reduced_clock) 6504 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 6505 } 6506 switch (clock->p2) { 6507 case 5: 6508 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 6509 break; 6510 case 7: 6511 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 6512 break; 6513 case 10: 6514 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 6515 break; 6516 case 14: 6517 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 6518 break; 6519 } 6520 if (INTEL_INFO(dev)->gen >= 4) 6521 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6522 6523 if (crtc_state->sdvo_tv_clock) 6524 dpll |= PLL_REF_INPUT_TVCLKINBC; 6525 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6526 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6527 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6528 else 6529 dpll |= PLL_REF_INPUT_DREFCLK; 6530 6531 dpll |= DPLL_VCO_ENABLE; 6532 crtc_state->dpll_hw_state.dpll = dpll; 6533 6534 if (INTEL_INFO(dev)->gen >= 4) { 6535 u32 dpll_md = (crtc_state->pixel_multiplier - 1) 6536 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6537 crtc_state->dpll_hw_state.dpll_md = dpll_md; 6538 } 6539 } 6540 6541 static void i8xx_update_pll(struct intel_crtc *crtc, 6542 struct intel_crtc_state *crtc_state, 6543 intel_clock_t *reduced_clock, 6544 int num_connectors) 6545 { 6546 struct drm_device *dev = crtc->base.dev; 6547 struct drm_i915_private *dev_priv = dev->dev_private; 6548 u32 dpll; 6549 struct dpll *clock = &crtc_state->dpll; 6550 6551 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 6552 6553 dpll = DPLL_VGA_MODE_DIS; 6554 6555 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 6556 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6557 } else { 6558 if (clock->p1 == 2) 6559 dpll |= PLL_P1_DIVIDE_BY_TWO; 6560 else 6561 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6562 if (clock->p2 == 4) 6563 dpll |= PLL_P2_DIVIDE_BY_4; 6564 } 6565 6566 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 6567 dpll |= DPLL_DVO_2X_MODE; 6568 6569 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 6570 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6571 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6572 else 6573 dpll |= PLL_REF_INPUT_DREFCLK; 6574 6575 dpll |= DPLL_VCO_ENABLE; 6576 crtc_state->dpll_hw_state.dpll = dpll; 6577 } 6578 6579 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6580 { 6581 struct drm_device *dev = intel_crtc->base.dev; 6582 struct drm_i915_private *dev_priv = dev->dev_private; 6583 enum i915_pipe pipe = intel_crtc->pipe; 6584 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 6585 struct drm_display_mode *adjusted_mode = 6586 &intel_crtc->config->base.adjusted_mode; 6587 uint32_t crtc_vtotal, crtc_vblank_end; 6588 int vsyncshift = 0; 6589 6590 /* We need to be careful not to changed the adjusted mode, for otherwise 6591 * the hw state checker will get angry at the mismatch. */ 6592 crtc_vtotal = adjusted_mode->crtc_vtotal; 6593 crtc_vblank_end = adjusted_mode->crtc_vblank_end; 6594 6595 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { 6596 /* the chip adds 2 halflines automatically */ 6597 crtc_vtotal -= 1; 6598 crtc_vblank_end -= 1; 6599 6600 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6601 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6602 else 6603 vsyncshift = adjusted_mode->crtc_hsync_start - 6604 adjusted_mode->crtc_htotal / 2; 6605 if (vsyncshift < 0) 6606 vsyncshift += adjusted_mode->crtc_htotal; 6607 } 6608 6609 if (INTEL_INFO(dev)->gen > 3) 6610 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift); 6611 6612 I915_WRITE(HTOTAL(cpu_transcoder), 6613 (adjusted_mode->crtc_hdisplay - 1) | 6614 ((adjusted_mode->crtc_htotal - 1) << 16)); 6615 I915_WRITE(HBLANK(cpu_transcoder), 6616 (adjusted_mode->crtc_hblank_start - 1) | 6617 ((adjusted_mode->crtc_hblank_end - 1) << 16)); 6618 I915_WRITE(HSYNC(cpu_transcoder), 6619 (adjusted_mode->crtc_hsync_start - 1) | 6620 ((adjusted_mode->crtc_hsync_end - 1) << 16)); 6621 6622 I915_WRITE(VTOTAL(cpu_transcoder), 6623 (adjusted_mode->crtc_vdisplay - 1) | 6624 ((crtc_vtotal - 1) << 16)); 6625 I915_WRITE(VBLANK(cpu_transcoder), 6626 (adjusted_mode->crtc_vblank_start - 1) | 6627 ((crtc_vblank_end - 1) << 16)); 6628 I915_WRITE(VSYNC(cpu_transcoder), 6629 (adjusted_mode->crtc_vsync_start - 1) | 6630 ((adjusted_mode->crtc_vsync_end - 1) << 16)); 6631 6632 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be 6633 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is 6634 * documented on the DDI_FUNC_CTL register description, EDP Input Select 6635 * bits. */ 6636 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP && 6637 (pipe == PIPE_B || pipe == PIPE_C)) 6638 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder))); 6639 6640 /* pipesrc controls the size that is scaled from, which should 6641 * always be the user's requested size. 6642 */ 6643 I915_WRITE(PIPESRC(pipe), 6644 ((intel_crtc->config->pipe_src_w - 1) << 16) | 6645 (intel_crtc->config->pipe_src_h - 1)); 6646 } 6647 6648 static void intel_get_pipe_timings(struct intel_crtc *crtc, 6649 struct intel_crtc_state *pipe_config) 6650 { 6651 struct drm_device *dev = crtc->base.dev; 6652 struct drm_i915_private *dev_priv = dev->dev_private; 6653 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 6654 uint32_t tmp; 6655 6656 tmp = I915_READ(HTOTAL(cpu_transcoder)); 6657 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1; 6658 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1; 6659 tmp = I915_READ(HBLANK(cpu_transcoder)); 6660 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1; 6661 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1; 6662 tmp = I915_READ(HSYNC(cpu_transcoder)); 6663 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1; 6664 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1; 6665 6666 tmp = I915_READ(VTOTAL(cpu_transcoder)); 6667 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1; 6668 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1; 6669 tmp = I915_READ(VBLANK(cpu_transcoder)); 6670 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1; 6671 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1; 6672 tmp = I915_READ(VSYNC(cpu_transcoder)); 6673 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; 6674 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; 6675 6676 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { 6677 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; 6678 pipe_config->base.adjusted_mode.crtc_vtotal += 1; 6679 pipe_config->base.adjusted_mode.crtc_vblank_end += 1; 6680 } 6681 6682 tmp = I915_READ(PIPESRC(crtc->pipe)); 6683 pipe_config->pipe_src_h = (tmp & 0xffff) + 1; 6684 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; 6685 6686 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h; 6687 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w; 6688 } 6689 6690 void intel_mode_from_pipe_config(struct drm_display_mode *mode, 6691 struct intel_crtc_state *pipe_config) 6692 { 6693 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay; 6694 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal; 6695 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start; 6696 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end; 6697 6698 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay; 6699 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal; 6700 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start; 6701 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end; 6702 6703 mode->flags = pipe_config->base.adjusted_mode.flags; 6704 6705 mode->clock = pipe_config->base.adjusted_mode.crtc_clock; 6706 mode->flags |= pipe_config->base.adjusted_mode.flags; 6707 } 6708 6709 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 6710 { 6711 struct drm_device *dev = intel_crtc->base.dev; 6712 struct drm_i915_private *dev_priv = dev->dev_private; 6713 uint32_t pipeconf; 6714 6715 pipeconf = 0; 6716 6717 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 6718 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 6719 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE; 6720 6721 if (intel_crtc->config->double_wide) 6722 pipeconf |= PIPECONF_DOUBLE_WIDE; 6723 6724 /* only g4x and later have fancy bpc/dither controls */ 6725 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 6726 /* Bspec claims that we can't use dithering for 30bpp pipes. */ 6727 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30) 6728 pipeconf |= PIPECONF_DITHER_EN | 6729 PIPECONF_DITHER_TYPE_SP; 6730 6731 switch (intel_crtc->config->pipe_bpp) { 6732 case 18: 6733 pipeconf |= PIPECONF_6BPC; 6734 break; 6735 case 24: 6736 pipeconf |= PIPECONF_8BPC; 6737 break; 6738 case 30: 6739 pipeconf |= PIPECONF_10BPC; 6740 break; 6741 default: 6742 /* Case prevented by intel_choose_pipe_bpp_dither. */ 6743 BUG(); 6744 } 6745 } 6746 6747 if (HAS_PIPE_CXSR(dev)) { 6748 if (intel_crtc->lowfreq_avail) { 6749 DRM_DEBUG_KMS("enabling CxSR downclocking\n"); 6750 pipeconf |= PIPECONF_CXSR_DOWNCLOCK; 6751 } else { 6752 DRM_DEBUG_KMS("disabling CxSR downclocking\n"); 6753 } 6754 } 6755 6756 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6757 if (INTEL_INFO(dev)->gen < 4 || 6758 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 6759 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6760 else 6761 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6762 } else 6763 pipeconf |= PIPECONF_PROGRESSIVE; 6764 6765 if (IS_VALLEYVIEW(dev) && intel_crtc->config->limited_color_range) 6766 pipeconf |= PIPECONF_COLOR_RANGE_SELECT; 6767 6768 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf); 6769 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6770 } 6771 6772 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, 6773 struct intel_crtc_state *crtc_state) 6774 { 6775 struct drm_device *dev = crtc->base.dev; 6776 struct drm_i915_private *dev_priv = dev->dev_private; 6777 int refclk, num_connectors = 0; 6778 intel_clock_t clock, reduced_clock; 6779 bool ok, has_reduced_clock = false; 6780 bool is_lvds = false, is_dsi = false; 6781 struct intel_encoder *encoder; 6782 const intel_limit_t *limit; 6783 struct drm_atomic_state *state = crtc_state->base.state; 6784 struct drm_connector_state *connector_state; 6785 int i; 6786 6787 for (i = 0; i < state->num_connector; i++) { 6788 if (!state->connectors[i]) 6789 continue; 6790 6791 connector_state = state->connector_states[i]; 6792 if (connector_state->crtc != &crtc->base) 6793 continue; 6794 6795 encoder = to_intel_encoder(connector_state->best_encoder); 6796 6797 switch (encoder->type) { 6798 case INTEL_OUTPUT_LVDS: 6799 is_lvds = true; 6800 break; 6801 case INTEL_OUTPUT_DSI: 6802 is_dsi = true; 6803 break; 6804 default: 6805 break; 6806 } 6807 6808 num_connectors++; 6809 } 6810 6811 if (is_dsi) 6812 return 0; 6813 6814 if (!crtc_state->clock_set) { 6815 refclk = i9xx_get_refclk(crtc_state, num_connectors); 6816 6817 /* 6818 * Returns a set of divisors for the desired target clock with 6819 * the given refclk, or FALSE. The returned values represent 6820 * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 6821 * 2) / p1 / p2. 6822 */ 6823 limit = intel_limit(crtc_state, refclk); 6824 ok = dev_priv->display.find_dpll(limit, crtc_state, 6825 crtc_state->port_clock, 6826 refclk, NULL, &clock); 6827 if (!ok) { 6828 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6829 return -EINVAL; 6830 } 6831 6832 if (is_lvds && dev_priv->lvds_downclock_avail) { 6833 /* 6834 * Ensure we match the reduced clock's P to the target 6835 * clock. If the clocks don't match, we can't switch 6836 * the display clock by using the FP0/FP1. In such case 6837 * we will disable the LVDS downclock feature. 6838 */ 6839 has_reduced_clock = 6840 dev_priv->display.find_dpll(limit, crtc_state, 6841 dev_priv->lvds_downclock, 6842 refclk, &clock, 6843 &reduced_clock); 6844 } 6845 /* Compat-code for transition, will disappear. */ 6846 crtc_state->dpll.n = clock.n; 6847 crtc_state->dpll.m1 = clock.m1; 6848 crtc_state->dpll.m2 = clock.m2; 6849 crtc_state->dpll.p1 = clock.p1; 6850 crtc_state->dpll.p2 = clock.p2; 6851 } 6852 6853 if (IS_GEN2(dev)) { 6854 i8xx_update_pll(crtc, crtc_state, 6855 has_reduced_clock ? &reduced_clock : NULL, 6856 num_connectors); 6857 } else if (IS_CHERRYVIEW(dev)) { 6858 chv_update_pll(crtc, crtc_state); 6859 } else if (IS_VALLEYVIEW(dev)) { 6860 vlv_update_pll(crtc, crtc_state); 6861 } else { 6862 i9xx_update_pll(crtc, crtc_state, 6863 has_reduced_clock ? &reduced_clock : NULL, 6864 num_connectors); 6865 } 6866 6867 return 0; 6868 } 6869 6870 static void i9xx_get_pfit_config(struct intel_crtc *crtc, 6871 struct intel_crtc_state *pipe_config) 6872 { 6873 struct drm_device *dev = crtc->base.dev; 6874 struct drm_i915_private *dev_priv = dev->dev_private; 6875 uint32_t tmp; 6876 6877 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 6878 return; 6879 6880 tmp = I915_READ(PFIT_CONTROL); 6881 if (!(tmp & PFIT_ENABLE)) 6882 return; 6883 6884 /* Check whether the pfit is attached to our pipe. */ 6885 if (INTEL_INFO(dev)->gen < 4) { 6886 if (crtc->pipe != PIPE_B) 6887 return; 6888 } else { 6889 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) 6890 return; 6891 } 6892 6893 pipe_config->gmch_pfit.control = tmp; 6894 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); 6895 if (INTEL_INFO(dev)->gen < 5) 6896 pipe_config->gmch_pfit.lvds_border_bits = 6897 I915_READ(LVDS) & LVDS_BORDER_ENABLE; 6898 } 6899 6900 static void vlv_crtc_clock_get(struct intel_crtc *crtc, 6901 struct intel_crtc_state *pipe_config) 6902 { 6903 struct drm_device *dev = crtc->base.dev; 6904 struct drm_i915_private *dev_priv = dev->dev_private; 6905 int pipe = pipe_config->cpu_transcoder; 6906 intel_clock_t clock; 6907 u32 mdiv; 6908 int refclk = 100000; 6909 6910 /* In case of MIPI DPLL will not even be used */ 6911 if (!(pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)) 6912 return; 6913 6914 mutex_lock(&dev_priv->dpio_lock); 6915 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe)); 6916 mutex_unlock(&dev_priv->dpio_lock); 6917 6918 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7; 6919 clock.m2 = mdiv & DPIO_M2DIV_MASK; 6920 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf; 6921 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7; 6922 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f; 6923 6924 vlv_clock(refclk, &clock); 6925 6926 /* clock.dot is the fast clock */ 6927 pipe_config->port_clock = clock.dot / 5; 6928 } 6929 6930 static void 6931 i9xx_get_initial_plane_config(struct intel_crtc *crtc, 6932 struct intel_initial_plane_config *plane_config) 6933 { 6934 struct drm_device *dev = crtc->base.dev; 6935 struct drm_i915_private *dev_priv = dev->dev_private; 6936 u32 val, base, offset; 6937 int pipe = crtc->pipe, plane = crtc->plane; 6938 int fourcc, pixel_format; 6939 unsigned int aligned_height; 6940 struct drm_framebuffer *fb; 6941 struct intel_framebuffer *intel_fb; 6942 6943 val = I915_READ(DSPCNTR(plane)); 6944 if (!(val & DISPLAY_PLANE_ENABLE)) 6945 return; 6946 6947 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 6948 if (!intel_fb) { 6949 DRM_DEBUG_KMS("failed to alloc fb\n"); 6950 return; 6951 } 6952 6953 fb = &intel_fb->base; 6954 6955 if (INTEL_INFO(dev)->gen >= 4) { 6956 if (val & DISPPLANE_TILED) { 6957 plane_config->tiling = I915_TILING_X; 6958 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 6959 } 6960 } 6961 6962 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 6963 fourcc = i9xx_format_to_fourcc(pixel_format); 6964 fb->pixel_format = fourcc; 6965 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 6966 6967 if (INTEL_INFO(dev)->gen >= 4) { 6968 if (plane_config->tiling) 6969 offset = I915_READ(DSPTILEOFF(plane)); 6970 else 6971 offset = I915_READ(DSPLINOFF(plane)); 6972 base = I915_READ(DSPSURF(plane)) & 0xfffff000; 6973 } else { 6974 base = I915_READ(DSPADDR(plane)); 6975 } 6976 plane_config->base = base; 6977 6978 val = I915_READ(PIPESRC(pipe)); 6979 fb->width = ((val >> 16) & 0xfff) + 1; 6980 fb->height = ((val >> 0) & 0xfff) + 1; 6981 6982 val = I915_READ(DSPSTRIDE(pipe)); 6983 fb->pitches[0] = val & 0xffffffc0; 6984 6985 aligned_height = intel_fb_align_height(dev, fb->height, 6986 fb->pixel_format, 6987 fb->modifier[0]); 6988 6989 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 6990 6991 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 6992 pipe_name(pipe), plane, fb->width, fb->height, 6993 fb->bits_per_pixel, base, fb->pitches[0], 6994 plane_config->size); 6995 6996 plane_config->fb = intel_fb; 6997 } 6998 6999 static void chv_crtc_clock_get(struct intel_crtc *crtc, 7000 struct intel_crtc_state *pipe_config) 7001 { 7002 struct drm_device *dev = crtc->base.dev; 7003 struct drm_i915_private *dev_priv = dev->dev_private; 7004 int pipe = pipe_config->cpu_transcoder; 7005 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7006 intel_clock_t clock; 7007 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2; 7008 int refclk = 100000; 7009 7010 mutex_lock(&dev_priv->dpio_lock); 7011 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port)); 7012 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port)); 7013 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port)); 7014 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port)); 7015 mutex_unlock(&dev_priv->dpio_lock); 7016 7017 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0; 7018 clock.m2 = ((pll_dw0 & 0xff) << 22) | (pll_dw2 & 0x3fffff); 7019 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf; 7020 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7; 7021 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f; 7022 7023 chv_clock(refclk, &clock); 7024 7025 /* clock.dot is the fast clock */ 7026 pipe_config->port_clock = clock.dot / 5; 7027 } 7028 7029 static bool i9xx_get_pipe_config(struct intel_crtc *crtc, 7030 struct intel_crtc_state *pipe_config) 7031 { 7032 struct drm_device *dev = crtc->base.dev; 7033 struct drm_i915_private *dev_priv = dev->dev_private; 7034 uint32_t tmp; 7035 7036 if (!intel_display_power_is_enabled(dev_priv, 7037 POWER_DOMAIN_PIPE(crtc->pipe))) 7038 return false; 7039 7040 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7041 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 7042 7043 tmp = I915_READ(PIPECONF(crtc->pipe)); 7044 if (!(tmp & PIPECONF_ENABLE)) 7045 return false; 7046 7047 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 7048 switch (tmp & PIPECONF_BPC_MASK) { 7049 case PIPECONF_6BPC: 7050 pipe_config->pipe_bpp = 18; 7051 break; 7052 case PIPECONF_8BPC: 7053 pipe_config->pipe_bpp = 24; 7054 break; 7055 case PIPECONF_10BPC: 7056 pipe_config->pipe_bpp = 30; 7057 break; 7058 default: 7059 break; 7060 } 7061 } 7062 7063 if (IS_VALLEYVIEW(dev) && (tmp & PIPECONF_COLOR_RANGE_SELECT)) 7064 pipe_config->limited_color_range = true; 7065 7066 if (INTEL_INFO(dev)->gen < 4) 7067 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE; 7068 7069 intel_get_pipe_timings(crtc, pipe_config); 7070 7071 i9xx_get_pfit_config(crtc, pipe_config); 7072 7073 if (INTEL_INFO(dev)->gen >= 4) { 7074 tmp = I915_READ(DPLL_MD(crtc->pipe)); 7075 pipe_config->pixel_multiplier = 7076 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) 7077 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; 7078 pipe_config->dpll_hw_state.dpll_md = tmp; 7079 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 7080 tmp = I915_READ(DPLL(crtc->pipe)); 7081 pipe_config->pixel_multiplier = 7082 ((tmp & SDVO_MULTIPLIER_MASK) 7083 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; 7084 } else { 7085 /* Note that on i915G/GM the pixel multiplier is in the sdvo 7086 * port and will be fixed up in the encoder->get_config 7087 * function. */ 7088 pipe_config->pixel_multiplier = 1; 7089 } 7090 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe)); 7091 if (!IS_VALLEYVIEW(dev)) { 7092 /* 7093 * DPLL_DVO_2X_MODE must be enabled for both DPLLs 7094 * on 830. Filter it out here so that we don't 7095 * report errors due to that. 7096 */ 7097 if (IS_I830(dev)) 7098 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE; 7099 7100 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe)); 7101 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe)); 7102 } else { 7103 /* Mask out read-only status bits. */ 7104 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV | 7105 DPLL_PORTC_READY_MASK | 7106 DPLL_PORTB_READY_MASK); 7107 } 7108 7109 if (IS_CHERRYVIEW(dev)) 7110 chv_crtc_clock_get(crtc, pipe_config); 7111 else if (IS_VALLEYVIEW(dev)) 7112 vlv_crtc_clock_get(crtc, pipe_config); 7113 else 7114 i9xx_crtc_clock_get(crtc, pipe_config); 7115 7116 return true; 7117 } 7118 7119 static void ironlake_init_pch_refclk(struct drm_device *dev) 7120 { 7121 struct drm_i915_private *dev_priv = dev->dev_private; 7122 struct intel_encoder *encoder; 7123 u32 val, final; 7124 bool has_lvds = false; 7125 bool has_cpu_edp = false; 7126 bool has_panel = false; 7127 bool has_ck505 = false; 7128 bool can_ssc = false; 7129 7130 /* We need to take the global config into account */ 7131 for_each_intel_encoder(dev, encoder) { 7132 switch (encoder->type) { 7133 case INTEL_OUTPUT_LVDS: 7134 has_panel = true; 7135 has_lvds = true; 7136 break; 7137 case INTEL_OUTPUT_EDP: 7138 has_panel = true; 7139 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 7140 has_cpu_edp = true; 7141 break; 7142 default: 7143 break; 7144 } 7145 } 7146 7147 if (HAS_PCH_IBX(dev)) { 7148 has_ck505 = dev_priv->vbt.display_clock_mode; 7149 can_ssc = has_ck505; 7150 } else { 7151 has_ck505 = false; 7152 can_ssc = true; 7153 } 7154 7155 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n", 7156 has_panel, has_lvds, has_ck505); 7157 7158 /* Ironlake: try to setup display ref clock before DPLL 7159 * enabling. This is only under driver's control after 7160 * PCH B stepping, previous chipset stepping should be 7161 * ignoring this setting. 7162 */ 7163 val = I915_READ(PCH_DREF_CONTROL); 7164 7165 /* As we must carefully and slowly disable/enable each source in turn, 7166 * compute the final state we want first and check if we need to 7167 * make any changes at all. 7168 */ 7169 final = val; 7170 final &= ~DREF_NONSPREAD_SOURCE_MASK; 7171 if (has_ck505) 7172 final |= DREF_NONSPREAD_CK505_ENABLE; 7173 else 7174 final |= DREF_NONSPREAD_SOURCE_ENABLE; 7175 7176 final &= ~DREF_SSC_SOURCE_MASK; 7177 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7178 final &= ~DREF_SSC1_ENABLE; 7179 7180 if (has_panel) { 7181 final |= DREF_SSC_SOURCE_ENABLE; 7182 7183 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7184 final |= DREF_SSC1_ENABLE; 7185 7186 if (has_cpu_edp) { 7187 if (intel_panel_use_ssc(dev_priv) && can_ssc) 7188 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7189 else 7190 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7191 } else 7192 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7193 } else { 7194 final |= DREF_SSC_SOURCE_DISABLE; 7195 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7196 } 7197 7198 if (final == val) 7199 return; 7200 7201 /* Always enable nonspread source */ 7202 val &= ~DREF_NONSPREAD_SOURCE_MASK; 7203 7204 if (has_ck505) 7205 val |= DREF_NONSPREAD_CK505_ENABLE; 7206 else 7207 val |= DREF_NONSPREAD_SOURCE_ENABLE; 7208 7209 if (has_panel) { 7210 val &= ~DREF_SSC_SOURCE_MASK; 7211 val |= DREF_SSC_SOURCE_ENABLE; 7212 7213 /* SSC must be turned on before enabling the CPU output */ 7214 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7215 DRM_DEBUG_KMS("Using SSC on panel\n"); 7216 val |= DREF_SSC1_ENABLE; 7217 } else 7218 val &= ~DREF_SSC1_ENABLE; 7219 7220 /* Get SSC going before enabling the outputs */ 7221 I915_WRITE(PCH_DREF_CONTROL, val); 7222 POSTING_READ(PCH_DREF_CONTROL); 7223 udelay(200); 7224 7225 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7226 7227 /* Enable CPU source on CPU attached eDP */ 7228 if (has_cpu_edp) { 7229 if (intel_panel_use_ssc(dev_priv) && can_ssc) { 7230 DRM_DEBUG_KMS("Using SSC on eDP\n"); 7231 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; 7232 } else 7233 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; 7234 } else 7235 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7236 7237 I915_WRITE(PCH_DREF_CONTROL, val); 7238 POSTING_READ(PCH_DREF_CONTROL); 7239 udelay(200); 7240 } else { 7241 DRM_DEBUG_KMS("Disabling SSC entirely\n"); 7242 7243 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK; 7244 7245 /* Turn off CPU output */ 7246 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE; 7247 7248 I915_WRITE(PCH_DREF_CONTROL, val); 7249 POSTING_READ(PCH_DREF_CONTROL); 7250 udelay(200); 7251 7252 /* Turn off the SSC source */ 7253 val &= ~DREF_SSC_SOURCE_MASK; 7254 val |= DREF_SSC_SOURCE_DISABLE; 7255 7256 /* Turn off SSC1 */ 7257 val &= ~DREF_SSC1_ENABLE; 7258 7259 I915_WRITE(PCH_DREF_CONTROL, val); 7260 POSTING_READ(PCH_DREF_CONTROL); 7261 udelay(200); 7262 } 7263 7264 BUG_ON(val != final); 7265 } 7266 7267 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv) 7268 { 7269 uint32_t tmp; 7270 7271 tmp = I915_READ(SOUTH_CHICKEN2); 7272 tmp |= FDI_MPHY_IOSFSB_RESET_CTL; 7273 I915_WRITE(SOUTH_CHICKEN2, tmp); 7274 7275 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) & 7276 FDI_MPHY_IOSFSB_RESET_STATUS, 100)) 7277 DRM_ERROR("FDI mPHY reset assert timeout\n"); 7278 7279 tmp = I915_READ(SOUTH_CHICKEN2); 7280 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL; 7281 I915_WRITE(SOUTH_CHICKEN2, tmp); 7282 7283 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) & 7284 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) 7285 DRM_ERROR("FDI mPHY reset de-assert timeout\n"); 7286 } 7287 7288 /* WaMPhyProgramming:hsw */ 7289 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv) 7290 { 7291 uint32_t tmp; 7292 7293 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY); 7294 tmp &= ~(0xFF << 24); 7295 tmp |= (0x12 << 24); 7296 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY); 7297 7298 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY); 7299 tmp |= (1 << 11); 7300 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY); 7301 7302 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY); 7303 tmp |= (1 << 11); 7304 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY); 7305 7306 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY); 7307 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7308 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY); 7309 7310 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY); 7311 tmp |= (1 << 24) | (1 << 21) | (1 << 18); 7312 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY); 7313 7314 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY); 7315 tmp &= ~(7 << 13); 7316 tmp |= (5 << 13); 7317 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY); 7318 7319 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY); 7320 tmp &= ~(7 << 13); 7321 tmp |= (5 << 13); 7322 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY); 7323 7324 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY); 7325 tmp &= ~0xFF; 7326 tmp |= 0x1C; 7327 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY); 7328 7329 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY); 7330 tmp &= ~0xFF; 7331 tmp |= 0x1C; 7332 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY); 7333 7334 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY); 7335 tmp &= ~(0xFF << 16); 7336 tmp |= (0x1C << 16); 7337 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY); 7338 7339 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY); 7340 tmp &= ~(0xFF << 16); 7341 tmp |= (0x1C << 16); 7342 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY); 7343 7344 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY); 7345 tmp |= (1 << 27); 7346 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY); 7347 7348 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY); 7349 tmp |= (1 << 27); 7350 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY); 7351 7352 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY); 7353 tmp &= ~(0xF << 28); 7354 tmp |= (4 << 28); 7355 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY); 7356 7357 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY); 7358 tmp &= ~(0xF << 28); 7359 tmp |= (4 << 28); 7360 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY); 7361 } 7362 7363 /* Implements 3 different sequences from BSpec chapter "Display iCLK 7364 * Programming" based on the parameters passed: 7365 * - Sequence to enable CLKOUT_DP 7366 * - Sequence to enable CLKOUT_DP without spread 7367 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O 7368 */ 7369 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 7370 bool with_fdi) 7371 { 7372 struct drm_i915_private *dev_priv = dev->dev_private; 7373 uint32_t reg, tmp; 7374 7375 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 7376 with_spread = true; 7377 if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE && 7378 with_fdi, "LP PCH doesn't have FDI\n")) 7379 with_fdi = false; 7380 7381 mutex_lock(&dev_priv->dpio_lock); 7382 7383 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7384 tmp &= ~SBI_SSCCTL_DISABLE; 7385 tmp |= SBI_SSCCTL_PATHALT; 7386 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7387 7388 udelay(24); 7389 7390 if (with_spread) { 7391 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7392 tmp &= ~SBI_SSCCTL_PATHALT; 7393 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7394 7395 if (with_fdi) { 7396 lpt_reset_fdi_mphy(dev_priv); 7397 lpt_program_fdi_mphy(dev_priv); 7398 } 7399 } 7400 7401 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 7402 SBI_GEN0 : SBI_DBUFF0; 7403 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7404 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7405 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7406 7407 mutex_unlock(&dev_priv->dpio_lock); 7408 } 7409 7410 /* Sequence to disable CLKOUT_DP */ 7411 static void lpt_disable_clkout_dp(struct drm_device *dev) 7412 { 7413 struct drm_i915_private *dev_priv = dev->dev_private; 7414 uint32_t reg, tmp; 7415 7416 mutex_lock(&dev_priv->dpio_lock); 7417 7418 reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ? 7419 SBI_GEN0 : SBI_DBUFF0; 7420 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK); 7421 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE; 7422 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK); 7423 7424 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK); 7425 if (!(tmp & SBI_SSCCTL_DISABLE)) { 7426 if (!(tmp & SBI_SSCCTL_PATHALT)) { 7427 tmp |= SBI_SSCCTL_PATHALT; 7428 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7429 udelay(32); 7430 } 7431 tmp |= SBI_SSCCTL_DISABLE; 7432 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK); 7433 } 7434 7435 mutex_unlock(&dev_priv->dpio_lock); 7436 } 7437 7438 static void lpt_init_pch_refclk(struct drm_device *dev) 7439 { 7440 struct intel_encoder *encoder; 7441 bool has_vga = false; 7442 7443 for_each_intel_encoder(dev, encoder) { 7444 switch (encoder->type) { 7445 case INTEL_OUTPUT_ANALOG: 7446 has_vga = true; 7447 break; 7448 default: 7449 break; 7450 } 7451 } 7452 7453 if (has_vga) 7454 lpt_enable_clkout_dp(dev, true, true); 7455 else 7456 lpt_disable_clkout_dp(dev); 7457 } 7458 7459 /* 7460 * Initialize reference clocks when the driver loads 7461 */ 7462 void intel_init_pch_refclk(struct drm_device *dev) 7463 { 7464 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 7465 ironlake_init_pch_refclk(dev); 7466 else if (HAS_PCH_LPT(dev)) 7467 lpt_init_pch_refclk(dev); 7468 } 7469 7470 static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) 7471 { 7472 struct drm_device *dev = crtc_state->base.crtc->dev; 7473 struct drm_i915_private *dev_priv = dev->dev_private; 7474 struct drm_atomic_state *state = crtc_state->base.state; 7475 struct drm_connector_state *connector_state; 7476 struct intel_encoder *encoder; 7477 int num_connectors = 0, i; 7478 bool is_lvds = false; 7479 7480 for (i = 0; i < state->num_connector; i++) { 7481 if (!state->connectors[i]) 7482 continue; 7483 7484 connector_state = state->connector_states[i]; 7485 if (connector_state->crtc != crtc_state->base.crtc) 7486 continue; 7487 7488 encoder = to_intel_encoder(connector_state->best_encoder); 7489 7490 switch (encoder->type) { 7491 case INTEL_OUTPUT_LVDS: 7492 is_lvds = true; 7493 break; 7494 default: 7495 break; 7496 } 7497 num_connectors++; 7498 } 7499 7500 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 7501 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 7502 dev_priv->vbt.lvds_ssc_freq); 7503 return dev_priv->vbt.lvds_ssc_freq; 7504 } 7505 7506 return 120000; 7507 } 7508 7509 static void ironlake_set_pipeconf(struct drm_crtc *crtc) 7510 { 7511 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 7512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7513 int pipe = intel_crtc->pipe; 7514 uint32_t val; 7515 7516 val = 0; 7517 7518 switch (intel_crtc->config->pipe_bpp) { 7519 case 18: 7520 val |= PIPECONF_6BPC; 7521 break; 7522 case 24: 7523 val |= PIPECONF_8BPC; 7524 break; 7525 case 30: 7526 val |= PIPECONF_10BPC; 7527 break; 7528 case 36: 7529 val |= PIPECONF_12BPC; 7530 break; 7531 default: 7532 /* Case prevented by intel_choose_pipe_bpp_dither. */ 7533 BUG(); 7534 } 7535 7536 if (intel_crtc->config->dither) 7537 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 7538 7539 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 7540 val |= PIPECONF_INTERLACED_ILK; 7541 else 7542 val |= PIPECONF_PROGRESSIVE; 7543 7544 if (intel_crtc->config->limited_color_range) 7545 val |= PIPECONF_COLOR_RANGE_SELECT; 7546 7547 I915_WRITE(PIPECONF(pipe), val); 7548 POSTING_READ(PIPECONF(pipe)); 7549 } 7550 7551 /* 7552 * Set up the pipe CSC unit. 7553 * 7554 * Currently only full range RGB to limited range RGB conversion 7555 * is supported, but eventually this should handle various 7556 * RGB<->YCbCr scenarios as well. 7557 */ 7558 static void intel_set_pipe_csc(struct drm_crtc *crtc) 7559 { 7560 struct drm_device *dev = crtc->dev; 7561 struct drm_i915_private *dev_priv = dev->dev_private; 7562 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7563 int pipe = intel_crtc->pipe; 7564 uint16_t coeff = 0x7800; /* 1.0 */ 7565 7566 /* 7567 * TODO: Check what kind of values actually come out of the pipe 7568 * with these coeff/postoff values and adjust to get the best 7569 * accuracy. Perhaps we even need to take the bpc value into 7570 * consideration. 7571 */ 7572 7573 if (intel_crtc->config->limited_color_range) 7574 coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ 7575 7576 /* 7577 * GY/GU and RY/RU should be the other way around according 7578 * to BSpec, but reality doesn't agree. Just set them up in 7579 * a way that results in the correct picture. 7580 */ 7581 I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); 7582 I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); 7583 7584 I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); 7585 I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); 7586 7587 I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); 7588 I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); 7589 7590 I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); 7591 I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); 7592 I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); 7593 7594 if (INTEL_INFO(dev)->gen > 6) { 7595 uint16_t postoff = 0; 7596 7597 if (intel_crtc->config->limited_color_range) 7598 postoff = (16 * (1 << 12) / 255) & 0x1fff; 7599 7600 I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); 7601 I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); 7602 I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); 7603 7604 I915_WRITE(PIPE_CSC_MODE(pipe), 0); 7605 } else { 7606 uint32_t mode = CSC_MODE_YUV_TO_RGB; 7607 7608 if (intel_crtc->config->limited_color_range) 7609 mode |= CSC_BLACK_SCREEN_OFFSET; 7610 7611 I915_WRITE(PIPE_CSC_MODE(pipe), mode); 7612 } 7613 } 7614 7615 static void haswell_set_pipeconf(struct drm_crtc *crtc) 7616 { 7617 struct drm_device *dev = crtc->dev; 7618 struct drm_i915_private *dev_priv = dev->dev_private; 7619 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7620 enum i915_pipe pipe = intel_crtc->pipe; 7621 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7622 uint32_t val; 7623 7624 val = 0; 7625 7626 if (IS_HASWELL(dev) && intel_crtc->config->dither) 7627 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); 7628 7629 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) 7630 val |= PIPECONF_INTERLACED_ILK; 7631 else 7632 val |= PIPECONF_PROGRESSIVE; 7633 7634 I915_WRITE(PIPECONF(cpu_transcoder), val); 7635 POSTING_READ(PIPECONF(cpu_transcoder)); 7636 7637 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 7638 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 7639 7640 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) { 7641 val = 0; 7642 7643 switch (intel_crtc->config->pipe_bpp) { 7644 case 18: 7645 val |= PIPEMISC_DITHER_6_BPC; 7646 break; 7647 case 24: 7648 val |= PIPEMISC_DITHER_8_BPC; 7649 break; 7650 case 30: 7651 val |= PIPEMISC_DITHER_10_BPC; 7652 break; 7653 case 36: 7654 val |= PIPEMISC_DITHER_12_BPC; 7655 break; 7656 default: 7657 /* Case prevented by pipe_config_set_bpp. */ 7658 BUG(); 7659 } 7660 7661 if (intel_crtc->config->dither) 7662 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP; 7663 7664 I915_WRITE(PIPEMISC(pipe), val); 7665 } 7666 } 7667 7668 static bool ironlake_compute_clocks(struct drm_crtc *crtc, 7669 struct intel_crtc_state *crtc_state, 7670 intel_clock_t *clock, 7671 bool *has_reduced_clock, 7672 intel_clock_t *reduced_clock) 7673 { 7674 struct drm_device *dev = crtc->dev; 7675 struct drm_i915_private *dev_priv = dev->dev_private; 7676 int refclk; 7677 const intel_limit_t *limit; 7678 bool ret, is_lvds = false; 7679 7680 is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); 7681 7682 refclk = ironlake_get_refclk(crtc_state); 7683 7684 /* 7685 * Returns a set of divisors for the desired target clock with the given 7686 * refclk, or FALSE. The returned values represent the clock equation: 7687 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7688 */ 7689 limit = intel_limit(crtc_state, refclk); 7690 ret = dev_priv->display.find_dpll(limit, crtc_state, 7691 crtc_state->port_clock, 7692 refclk, NULL, clock); 7693 if (!ret) 7694 return false; 7695 7696 if (is_lvds && dev_priv->lvds_downclock_avail) { 7697 /* 7698 * Ensure we match the reduced clock's P to the target clock. 7699 * If the clocks don't match, we can't switch the display clock 7700 * by using the FP0/FP1. In such case we will disable the LVDS 7701 * downclock feature. 7702 */ 7703 *has_reduced_clock = 7704 dev_priv->display.find_dpll(limit, crtc_state, 7705 dev_priv->lvds_downclock, 7706 refclk, clock, 7707 reduced_clock); 7708 } 7709 7710 return true; 7711 } 7712 7713 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp) 7714 { 7715 /* 7716 * Account for spread spectrum to avoid 7717 * oversubscribing the link. Max center spread 7718 * is 2.5%; use 5% for safety's sake. 7719 */ 7720 u32 bps = target_clock * bpp * 21 / 20; 7721 return DIV_ROUND_UP(bps, link_bw * 8); 7722 } 7723 7724 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor) 7725 { 7726 return i9xx_dpll_compute_m(dpll) < factor * dpll->n; 7727 } 7728 7729 static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, 7730 struct intel_crtc_state *crtc_state, 7731 u32 *fp, 7732 intel_clock_t *reduced_clock, u32 *fp2) 7733 { 7734 struct drm_crtc *crtc = &intel_crtc->base; 7735 struct drm_device *dev = crtc->dev; 7736 struct drm_i915_private *dev_priv = dev->dev_private; 7737 struct drm_atomic_state *state = crtc_state->base.state; 7738 struct drm_connector_state *connector_state; 7739 struct intel_encoder *encoder; 7740 uint32_t dpll; 7741 int factor, num_connectors = 0, i; 7742 bool is_lvds = false, is_sdvo = false; 7743 7744 for (i = 0; i < state->num_connector; i++) { 7745 if (!state->connectors[i]) 7746 continue; 7747 7748 connector_state = state->connector_states[i]; 7749 if (connector_state->crtc != crtc_state->base.crtc) 7750 continue; 7751 7752 encoder = to_intel_encoder(connector_state->best_encoder); 7753 7754 switch (encoder->type) { 7755 case INTEL_OUTPUT_LVDS: 7756 is_lvds = true; 7757 break; 7758 case INTEL_OUTPUT_SDVO: 7759 case INTEL_OUTPUT_HDMI: 7760 is_sdvo = true; 7761 break; 7762 default: 7763 break; 7764 } 7765 7766 num_connectors++; 7767 } 7768 7769 /* Enable autotuning of the PLL clock (if permissible) */ 7770 factor = 21; 7771 if (is_lvds) { 7772 if ((intel_panel_use_ssc(dev_priv) && 7773 dev_priv->vbt.lvds_ssc_freq == 100000) || 7774 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 7775 factor = 25; 7776 } else if (crtc_state->sdvo_tv_clock) 7777 factor = 20; 7778 7779 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor)) 7780 *fp |= FP_CB_TUNE; 7781 7782 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 7783 *fp2 |= FP_CB_TUNE; 7784 7785 dpll = 0; 7786 7787 if (is_lvds) 7788 dpll |= DPLLB_MODE_LVDS; 7789 else 7790 dpll |= DPLLB_MODE_DAC_SERIAL; 7791 7792 dpll |= (crtc_state->pixel_multiplier - 1) 7793 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 7794 7795 if (is_sdvo) 7796 dpll |= DPLL_SDVO_HIGH_SPEED; 7797 if (crtc_state->has_dp_encoder) 7798 dpll |= DPLL_SDVO_HIGH_SPEED; 7799 7800 /* compute bitmask from p1 value */ 7801 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7802 /* also FPA1 */ 7803 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7804 7805 switch (crtc_state->dpll.p2) { 7806 case 5: 7807 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7808 break; 7809 case 7: 7810 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; 7811 break; 7812 case 10: 7813 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; 7814 break; 7815 case 14: 7816 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 7817 break; 7818 } 7819 7820 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) 7821 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7822 else 7823 dpll |= PLL_REF_INPUT_DREFCLK; 7824 7825 return dpll | DPLL_VCO_ENABLE; 7826 } 7827 7828 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, 7829 struct intel_crtc_state *crtc_state) 7830 { 7831 struct drm_device *dev = crtc->base.dev; 7832 intel_clock_t clock, reduced_clock; 7833 u32 dpll = 0, fp = 0, fp2 = 0; 7834 bool ok, has_reduced_clock = false; 7835 bool is_lvds = false; 7836 struct intel_shared_dpll *pll; 7837 7838 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS); 7839 7840 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7841 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7842 7843 ok = ironlake_compute_clocks(&crtc->base, crtc_state, &clock, 7844 &has_reduced_clock, &reduced_clock); 7845 if (!ok && !crtc_state->clock_set) { 7846 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7847 return -EINVAL; 7848 } 7849 /* Compat-code for transition, will disappear. */ 7850 if (!crtc_state->clock_set) { 7851 crtc_state->dpll.n = clock.n; 7852 crtc_state->dpll.m1 = clock.m1; 7853 crtc_state->dpll.m2 = clock.m2; 7854 crtc_state->dpll.p1 = clock.p1; 7855 crtc_state->dpll.p2 = clock.p2; 7856 } 7857 7858 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7859 if (crtc_state->has_pch_encoder) { 7860 fp = i9xx_dpll_compute_fp(&crtc_state->dpll); 7861 if (has_reduced_clock) 7862 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7863 7864 dpll = ironlake_compute_dpll(crtc, crtc_state, 7865 &fp, &reduced_clock, 7866 has_reduced_clock ? &fp2 : NULL); 7867 7868 crtc_state->dpll_hw_state.dpll = dpll; 7869 crtc_state->dpll_hw_state.fp0 = fp; 7870 if (has_reduced_clock) 7871 crtc_state->dpll_hw_state.fp1 = fp2; 7872 else 7873 crtc_state->dpll_hw_state.fp1 = fp; 7874 7875 pll = intel_get_shared_dpll(crtc, crtc_state); 7876 if (pll == NULL) { 7877 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7878 pipe_name(crtc->pipe)); 7879 return -EINVAL; 7880 } 7881 } 7882 7883 if (is_lvds && has_reduced_clock) 7884 crtc->lowfreq_avail = true; 7885 else 7886 crtc->lowfreq_avail = false; 7887 7888 return 0; 7889 } 7890 7891 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, 7892 struct intel_link_m_n *m_n) 7893 { 7894 struct drm_device *dev = crtc->base.dev; 7895 struct drm_i915_private *dev_priv = dev->dev_private; 7896 enum i915_pipe pipe = crtc->pipe; 7897 7898 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 7899 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe)); 7900 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe)) 7901 & ~TU_SIZE_MASK; 7902 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe)); 7903 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe)) 7904 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7905 } 7906 7907 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, 7908 enum transcoder transcoder, 7909 struct intel_link_m_n *m_n, 7910 struct intel_link_m_n *m2_n2) 7911 { 7912 struct drm_device *dev = crtc->base.dev; 7913 struct drm_i915_private *dev_priv = dev->dev_private; 7914 enum i915_pipe pipe = crtc->pipe; 7915 7916 if (INTEL_INFO(dev)->gen >= 5) { 7917 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder)); 7918 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder)); 7919 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder)) 7920 & ~TU_SIZE_MASK; 7921 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder)); 7922 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder)) 7923 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7924 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for 7925 * gen < 8) and if DRRS is supported (to make sure the 7926 * registers are not unnecessarily read). 7927 */ 7928 if (m2_n2 && INTEL_INFO(dev)->gen < 8 && 7929 crtc->config->has_drrs) { 7930 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder)); 7931 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder)); 7932 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder)) 7933 & ~TU_SIZE_MASK; 7934 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder)); 7935 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder)) 7936 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7937 } 7938 } else { 7939 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe)); 7940 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe)); 7941 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe)) 7942 & ~TU_SIZE_MASK; 7943 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe)); 7944 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe)) 7945 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; 7946 } 7947 } 7948 7949 void intel_dp_get_m_n(struct intel_crtc *crtc, 7950 struct intel_crtc_state *pipe_config) 7951 { 7952 if (pipe_config->has_pch_encoder) 7953 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); 7954 else 7955 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7956 &pipe_config->dp_m_n, 7957 &pipe_config->dp_m2_n2); 7958 } 7959 7960 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc, 7961 struct intel_crtc_state *pipe_config) 7962 { 7963 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, 7964 &pipe_config->fdi_m_n, NULL); 7965 } 7966 7967 static void skylake_get_pfit_config(struct intel_crtc *crtc, 7968 struct intel_crtc_state *pipe_config) 7969 { 7970 struct drm_device *dev = crtc->base.dev; 7971 struct drm_i915_private *dev_priv = dev->dev_private; 7972 uint32_t tmp; 7973 7974 tmp = I915_READ(PS_CTL(crtc->pipe)); 7975 7976 if (tmp & PS_ENABLE) { 7977 pipe_config->pch_pfit.enabled = true; 7978 pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe)); 7979 pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe)); 7980 } 7981 } 7982 7983 static void 7984 skylake_get_initial_plane_config(struct intel_crtc *crtc, 7985 struct intel_initial_plane_config *plane_config) 7986 { 7987 struct drm_device *dev = crtc->base.dev; 7988 struct drm_i915_private *dev_priv = dev->dev_private; 7989 u32 val, base, offset, stride_mult, tiling; 7990 int pipe = crtc->pipe; 7991 int fourcc, pixel_format; 7992 unsigned int aligned_height; 7993 struct drm_framebuffer *fb; 7994 struct intel_framebuffer *intel_fb; 7995 7996 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 7997 if (!intel_fb) { 7998 DRM_DEBUG_KMS("failed to alloc fb\n"); 7999 return; 8000 } 8001 8002 fb = &intel_fb->base; 8003 8004 val = I915_READ(PLANE_CTL(pipe, 0)); 8005 if (!(val & PLANE_CTL_ENABLE)) 8006 goto error; 8007 8008 pixel_format = val & PLANE_CTL_FORMAT_MASK; 8009 fourcc = skl_format_to_fourcc(pixel_format, 8010 val & PLANE_CTL_ORDER_RGBX, 8011 val & PLANE_CTL_ALPHA_MASK); 8012 fb->pixel_format = fourcc; 8013 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8014 8015 tiling = val & PLANE_CTL_TILED_MASK; 8016 switch (tiling) { 8017 case PLANE_CTL_TILED_LINEAR: 8018 fb->modifier[0] = DRM_FORMAT_MOD_NONE; 8019 break; 8020 case PLANE_CTL_TILED_X: 8021 plane_config->tiling = I915_TILING_X; 8022 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8023 break; 8024 case PLANE_CTL_TILED_Y: 8025 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED; 8026 break; 8027 case PLANE_CTL_TILED_YF: 8028 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED; 8029 break; 8030 default: 8031 MISSING_CASE(tiling); 8032 goto error; 8033 } 8034 8035 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000; 8036 plane_config->base = base; 8037 8038 offset = I915_READ(PLANE_OFFSET(pipe, 0)); 8039 8040 val = I915_READ(PLANE_SIZE(pipe, 0)); 8041 fb->height = ((val >> 16) & 0xfff) + 1; 8042 fb->width = ((val >> 0) & 0x1fff) + 1; 8043 8044 val = I915_READ(PLANE_STRIDE(pipe, 0)); 8045 stride_mult = intel_fb_stride_alignment(dev, fb->modifier[0], 8046 fb->pixel_format); 8047 fb->pitches[0] = (val & 0x3ff) * stride_mult; 8048 8049 aligned_height = intel_fb_align_height(dev, fb->height, 8050 fb->pixel_format, 8051 fb->modifier[0]); 8052 8053 plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); 8054 8055 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8056 pipe_name(pipe), fb->width, fb->height, 8057 fb->bits_per_pixel, base, fb->pitches[0], 8058 plane_config->size); 8059 8060 plane_config->fb = intel_fb; 8061 return; 8062 8063 error: 8064 kfree(fb); 8065 } 8066 8067 static void ironlake_get_pfit_config(struct intel_crtc *crtc, 8068 struct intel_crtc_state *pipe_config) 8069 { 8070 struct drm_device *dev = crtc->base.dev; 8071 struct drm_i915_private *dev_priv = dev->dev_private; 8072 uint32_t tmp; 8073 8074 tmp = I915_READ(PF_CTL(crtc->pipe)); 8075 8076 if (tmp & PF_ENABLE) { 8077 pipe_config->pch_pfit.enabled = true; 8078 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe)); 8079 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe)); 8080 8081 /* We currently do not free assignements of panel fitters on 8082 * ivb/hsw (since we don't use the higher upscaling modes which 8083 * differentiates them) so just WARN about this case for now. */ 8084 if (IS_GEN7(dev)) { 8085 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) != 8086 PF_PIPE_SEL_IVB(crtc->pipe)); 8087 } 8088 } 8089 } 8090 8091 static void 8092 ironlake_get_initial_plane_config(struct intel_crtc *crtc, 8093 struct intel_initial_plane_config *plane_config) 8094 { 8095 struct drm_device *dev = crtc->base.dev; 8096 struct drm_i915_private *dev_priv = dev->dev_private; 8097 u32 val, base, offset; 8098 int pipe = crtc->pipe; 8099 int fourcc, pixel_format; 8100 unsigned int aligned_height; 8101 struct drm_framebuffer *fb; 8102 struct intel_framebuffer *intel_fb; 8103 8104 val = I915_READ(DSPCNTR(pipe)); 8105 if (!(val & DISPLAY_PLANE_ENABLE)) 8106 return; 8107 8108 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8109 if (!intel_fb) { 8110 DRM_DEBUG_KMS("failed to alloc fb\n"); 8111 return; 8112 } 8113 8114 fb = &intel_fb->base; 8115 8116 if (INTEL_INFO(dev)->gen >= 4) { 8117 if (val & DISPPLANE_TILED) { 8118 plane_config->tiling = I915_TILING_X; 8119 fb->modifier[0] = I915_FORMAT_MOD_X_TILED; 8120 } 8121 } 8122 8123 pixel_format = val & DISPPLANE_PIXFORMAT_MASK; 8124 fourcc = i9xx_format_to_fourcc(pixel_format); 8125 fb->pixel_format = fourcc; 8126 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; 8127 8128 base = I915_READ(DSPSURF(pipe)) & 0xfffff000; 8129 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 8130 offset = I915_READ(DSPOFFSET(pipe)); 8131 } else { 8132 if (plane_config->tiling) 8133 offset = I915_READ(DSPTILEOFF(pipe)); 8134 else 8135 offset = I915_READ(DSPLINOFF(pipe)); 8136 } 8137 plane_config->base = base; 8138 8139 val = I915_READ(PIPESRC(pipe)); 8140 fb->width = ((val >> 16) & 0xfff) + 1; 8141 fb->height = ((val >> 0) & 0xfff) + 1; 8142 8143 val = I915_READ(DSPSTRIDE(pipe)); 8144 fb->pitches[0] = val & 0xffffffc0; 8145 8146 aligned_height = intel_fb_align_height(dev, fb->height, 8147 fb->pixel_format, 8148 fb->modifier[0]); 8149 8150 plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); 8151 8152 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", 8153 pipe_name(pipe), fb->width, fb->height, 8154 fb->bits_per_pixel, base, fb->pitches[0], 8155 plane_config->size); 8156 8157 plane_config->fb = intel_fb; 8158 } 8159 8160 static bool ironlake_get_pipe_config(struct intel_crtc *crtc, 8161 struct intel_crtc_state *pipe_config) 8162 { 8163 struct drm_device *dev = crtc->base.dev; 8164 struct drm_i915_private *dev_priv = dev->dev_private; 8165 uint32_t tmp; 8166 8167 if (!intel_display_power_is_enabled(dev_priv, 8168 POWER_DOMAIN_PIPE(crtc->pipe))) 8169 return false; 8170 8171 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8172 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8173 8174 tmp = I915_READ(PIPECONF(crtc->pipe)); 8175 if (!(tmp & PIPECONF_ENABLE)) 8176 return false; 8177 8178 switch (tmp & PIPECONF_BPC_MASK) { 8179 case PIPECONF_6BPC: 8180 pipe_config->pipe_bpp = 18; 8181 break; 8182 case PIPECONF_8BPC: 8183 pipe_config->pipe_bpp = 24; 8184 break; 8185 case PIPECONF_10BPC: 8186 pipe_config->pipe_bpp = 30; 8187 break; 8188 case PIPECONF_12BPC: 8189 pipe_config->pipe_bpp = 36; 8190 break; 8191 default: 8192 break; 8193 } 8194 8195 if (tmp & PIPECONF_COLOR_RANGE_SELECT) 8196 pipe_config->limited_color_range = true; 8197 8198 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) { 8199 struct intel_shared_dpll *pll; 8200 8201 pipe_config->has_pch_encoder = true; 8202 8203 tmp = I915_READ(FDI_RX_CTL(crtc->pipe)); 8204 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8205 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8206 8207 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8208 8209 if (HAS_PCH_IBX(dev_priv->dev)) { 8210 pipe_config->shared_dpll = 8211 (enum intel_dpll_id) crtc->pipe; 8212 } else { 8213 tmp = I915_READ(PCH_DPLL_SEL); 8214 if (tmp & TRANS_DPLLB_SEL(crtc->pipe)) 8215 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_B; 8216 else 8217 pipe_config->shared_dpll = DPLL_ID_PCH_PLL_A; 8218 } 8219 8220 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 8221 8222 WARN_ON(!pll->get_hw_state(dev_priv, pll, 8223 &pipe_config->dpll_hw_state)); 8224 8225 tmp = pipe_config->dpll_hw_state.dpll; 8226 pipe_config->pixel_multiplier = 8227 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK) 8228 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1; 8229 8230 ironlake_pch_clock_get(crtc, pipe_config); 8231 } else { 8232 pipe_config->pixel_multiplier = 1; 8233 } 8234 8235 intel_get_pipe_timings(crtc, pipe_config); 8236 8237 ironlake_get_pfit_config(crtc, pipe_config); 8238 8239 return true; 8240 } 8241 8242 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 8243 { 8244 struct drm_device *dev = dev_priv->dev; 8245 struct intel_crtc *crtc; 8246 8247 for_each_intel_crtc(dev, crtc) 8248 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n", 8249 pipe_name(crtc->pipe)); 8250 8251 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); 8252 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n"); 8253 I915_STATE_WARN(I915_READ(WRPLL_CTL1) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n"); 8254 I915_STATE_WARN(I915_READ(WRPLL_CTL2) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n"); 8255 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n"); 8256 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE, 8257 "CPU PWM1 enabled\n"); 8258 if (IS_HASWELL(dev)) 8259 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE, 8260 "CPU PWM2 enabled\n"); 8261 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE, 8262 "PCH PWM1 enabled\n"); 8263 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE, 8264 "Utility pin enabled\n"); 8265 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n"); 8266 8267 /* 8268 * In theory we can still leave IRQs enabled, as long as only the HPD 8269 * interrupts remain enabled. We used to check for that, but since it's 8270 * gen-specific and since we only disable LCPLL after we fully disable 8271 * the interrupts, the check below should be enough. 8272 */ 8273 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n"); 8274 } 8275 8276 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 8277 { 8278 struct drm_device *dev = dev_priv->dev; 8279 8280 if (IS_HASWELL(dev)) 8281 return I915_READ(D_COMP_HSW); 8282 else 8283 return I915_READ(D_COMP_BDW); 8284 } 8285 8286 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 8287 { 8288 struct drm_device *dev = dev_priv->dev; 8289 8290 if (IS_HASWELL(dev)) { 8291 mutex_lock(&dev_priv->rps.hw_lock); 8292 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, 8293 val)) 8294 DRM_ERROR("Failed to write to D_COMP\n"); 8295 mutex_unlock(&dev_priv->rps.hw_lock); 8296 } else { 8297 I915_WRITE(D_COMP_BDW, val); 8298 POSTING_READ(D_COMP_BDW); 8299 } 8300 } 8301 8302 /* 8303 * This function implements pieces of two sequences from BSpec: 8304 * - Sequence for display software to disable LCPLL 8305 * - Sequence for display software to allow package C8+ 8306 * The steps implemented here are just the steps that actually touch the LCPLL 8307 * register. Callers should take care of disabling all the display engine 8308 * functions, doing the mode unset, fixing interrupts, etc. 8309 */ 8310 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv, 8311 bool switch_to_fclk, bool allow_power_down) 8312 { 8313 uint32_t val; 8314 8315 assert_can_disable_lcpll(dev_priv); 8316 8317 val = I915_READ(LCPLL_CTL); 8318 8319 if (switch_to_fclk) { 8320 val |= LCPLL_CD_SOURCE_FCLK; 8321 I915_WRITE(LCPLL_CTL, val); 8322 8323 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) & 8324 LCPLL_CD_SOURCE_FCLK_DONE, 1)) 8325 DRM_ERROR("Switching to FCLK failed\n"); 8326 8327 val = I915_READ(LCPLL_CTL); 8328 } 8329 8330 val |= LCPLL_PLL_DISABLE; 8331 I915_WRITE(LCPLL_CTL, val); 8332 POSTING_READ(LCPLL_CTL); 8333 8334 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 8335 DRM_ERROR("LCPLL still locked\n"); 8336 8337 val = hsw_read_dcomp(dev_priv); 8338 val |= D_COMP_COMP_DISABLE; 8339 hsw_write_dcomp(dev_priv, val); 8340 ndelay(100); 8341 8342 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0, 8343 1)) 8344 DRM_ERROR("D_COMP RCOMP still in progress\n"); 8345 8346 if (allow_power_down) { 8347 val = I915_READ(LCPLL_CTL); 8348 val |= LCPLL_POWER_DOWN_ALLOW; 8349 I915_WRITE(LCPLL_CTL, val); 8350 POSTING_READ(LCPLL_CTL); 8351 } 8352 } 8353 8354 /* 8355 * Fully restores LCPLL, disallowing power down and switching back to LCPLL 8356 * source. 8357 */ 8358 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 8359 { 8360 uint32_t val; 8361 8362 val = I915_READ(LCPLL_CTL); 8363 8364 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK | 8365 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK) 8366 return; 8367 8368 /* 8369 * Make sure we're not on PC8 state before disabling PC8, otherwise 8370 * we'll hang the machine. To prevent PC8 state, just enable force_wake. 8371 */ 8372 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 8373 8374 if (val & LCPLL_POWER_DOWN_ALLOW) { 8375 val &= ~LCPLL_POWER_DOWN_ALLOW; 8376 I915_WRITE(LCPLL_CTL, val); 8377 POSTING_READ(LCPLL_CTL); 8378 } 8379 8380 val = hsw_read_dcomp(dev_priv); 8381 val |= D_COMP_COMP_FORCE; 8382 val &= ~D_COMP_COMP_DISABLE; 8383 hsw_write_dcomp(dev_priv, val); 8384 8385 val = I915_READ(LCPLL_CTL); 8386 val &= ~LCPLL_PLL_DISABLE; 8387 I915_WRITE(LCPLL_CTL, val); 8388 8389 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 8390 DRM_ERROR("LCPLL not locked yet\n"); 8391 8392 if (val & LCPLL_CD_SOURCE_FCLK) { 8393 val = I915_READ(LCPLL_CTL); 8394 val &= ~LCPLL_CD_SOURCE_FCLK; 8395 I915_WRITE(LCPLL_CTL, val); 8396 8397 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) & 8398 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) 8399 DRM_ERROR("Switching back to LCPLL failed\n"); 8400 } 8401 8402 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 8403 } 8404 8405 /* 8406 * Package states C8 and deeper are really deep PC states that can only be 8407 * reached when all the devices on the system allow it, so even if the graphics 8408 * device allows PC8+, it doesn't mean the system will actually get to these 8409 * states. Our driver only allows PC8+ when going into runtime PM. 8410 * 8411 * The requirements for PC8+ are that all the outputs are disabled, the power 8412 * well is disabled and most interrupts are disabled, and these are also 8413 * requirements for runtime PM. When these conditions are met, we manually do 8414 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk 8415 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard 8416 * hang the machine. 8417 * 8418 * When we really reach PC8 or deeper states (not just when we allow it) we lose 8419 * the state of some registers, so when we come back from PC8+ we need to 8420 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't 8421 * need to take care of the registers kept by RC6. Notice that this happens even 8422 * if we don't put the device in PCI D3 state (which is what currently happens 8423 * because of the runtime PM support). 8424 * 8425 * For more, read "Display Sequences for Package C8" on the hardware 8426 * documentation. 8427 */ 8428 void hsw_enable_pc8(struct drm_i915_private *dev_priv) 8429 { 8430 struct drm_device *dev = dev_priv->dev; 8431 uint32_t val; 8432 8433 DRM_DEBUG_KMS("Enabling package C8+\n"); 8434 8435 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 8436 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8437 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; 8438 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8439 } 8440 8441 lpt_disable_clkout_dp(dev); 8442 hsw_disable_lcpll(dev_priv, true, true); 8443 } 8444 8445 void hsw_disable_pc8(struct drm_i915_private *dev_priv) 8446 { 8447 struct drm_device *dev = dev_priv->dev; 8448 uint32_t val; 8449 8450 DRM_DEBUG_KMS("Disabling package C8+\n"); 8451 8452 hsw_restore_lcpll(dev_priv); 8453 lpt_init_pch_refclk(dev); 8454 8455 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 8456 val = I915_READ(SOUTH_DSPCLK_GATE_D); 8457 val |= PCH_LP_PARTITION_LEVEL_DISABLE; 8458 I915_WRITE(SOUTH_DSPCLK_GATE_D, val); 8459 } 8460 8461 intel_prepare_ddi(dev); 8462 } 8463 8464 static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 8465 struct intel_crtc_state *crtc_state) 8466 { 8467 if (!intel_ddi_pll_select(crtc, crtc_state)) 8468 return -EINVAL; 8469 8470 crtc->lowfreq_avail = false; 8471 8472 return 0; 8473 } 8474 8475 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv, 8476 enum port port, 8477 struct intel_crtc_state *pipe_config) 8478 { 8479 u32 temp, dpll_ctl1; 8480 8481 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port); 8482 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1); 8483 8484 switch (pipe_config->ddi_pll_sel) { 8485 case SKL_DPLL0: 8486 /* 8487 * On SKL the eDP DPLL (DPLL0 as we don't use SSC) is not part 8488 * of the shared DPLL framework and thus needs to be read out 8489 * separately 8490 */ 8491 dpll_ctl1 = I915_READ(DPLL_CTRL1); 8492 pipe_config->dpll_hw_state.ctrl1 = dpll_ctl1 & 0x3f; 8493 break; 8494 case SKL_DPLL1: 8495 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1; 8496 break; 8497 case SKL_DPLL2: 8498 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2; 8499 break; 8500 case SKL_DPLL3: 8501 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3; 8502 break; 8503 } 8504 } 8505 8506 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8507 enum port port, 8508 struct intel_crtc_state *pipe_config) 8509 { 8510 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port)); 8511 8512 switch (pipe_config->ddi_pll_sel) { 8513 case PORT_CLK_SEL_WRPLL1: 8514 pipe_config->shared_dpll = DPLL_ID_WRPLL1; 8515 break; 8516 case PORT_CLK_SEL_WRPLL2: 8517 pipe_config->shared_dpll = DPLL_ID_WRPLL2; 8518 break; 8519 } 8520 } 8521 8522 static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 8523 struct intel_crtc_state *pipe_config) 8524 { 8525 struct drm_device *dev = crtc->base.dev; 8526 struct drm_i915_private *dev_priv = dev->dev_private; 8527 struct intel_shared_dpll *pll; 8528 enum port port; 8529 uint32_t tmp; 8530 8531 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder)); 8532 8533 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 8534 8535 if (IS_SKYLAKE(dev)) 8536 skylake_get_ddi_pll(dev_priv, port, pipe_config); 8537 else 8538 haswell_get_ddi_pll(dev_priv, port, pipe_config); 8539 8540 if (pipe_config->shared_dpll >= 0) { 8541 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 8542 8543 WARN_ON(!pll->get_hw_state(dev_priv, pll, 8544 &pipe_config->dpll_hw_state)); 8545 } 8546 8547 /* 8548 * Haswell has only FDI/PCH transcoder A. It is which is connected to 8549 * DDI E. So just check whether this pipe is wired to DDI E and whether 8550 * the PCH transcoder is on. 8551 */ 8552 if (INTEL_INFO(dev)->gen < 9 && 8553 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 8554 pipe_config->has_pch_encoder = true; 8555 8556 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 8557 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> 8558 FDI_DP_PORT_WIDTH_SHIFT) + 1; 8559 8560 ironlake_get_fdi_m_n_config(crtc, pipe_config); 8561 } 8562 } 8563 8564 static bool haswell_get_pipe_config(struct intel_crtc *crtc, 8565 struct intel_crtc_state *pipe_config) 8566 { 8567 struct drm_device *dev = crtc->base.dev; 8568 struct drm_i915_private *dev_priv = dev->dev_private; 8569 enum intel_display_power_domain pfit_domain; 8570 uint32_t tmp; 8571 8572 if (!intel_display_power_is_enabled(dev_priv, 8573 POWER_DOMAIN_PIPE(crtc->pipe))) 8574 return false; 8575 8576 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 8577 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 8578 8579 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); 8580 if (tmp & TRANS_DDI_FUNC_ENABLE) { 8581 enum i915_pipe trans_edp_pipe; 8582 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) { 8583 default: 8584 WARN(1, "unknown pipe linked to edp transcoder\n"); 8585 case TRANS_DDI_EDP_INPUT_A_ONOFF: 8586 case TRANS_DDI_EDP_INPUT_A_ON: 8587 trans_edp_pipe = PIPE_A; 8588 break; 8589 case TRANS_DDI_EDP_INPUT_B_ONOFF: 8590 trans_edp_pipe = PIPE_B; 8591 break; 8592 case TRANS_DDI_EDP_INPUT_C_ONOFF: 8593 trans_edp_pipe = PIPE_C; 8594 break; 8595 } 8596 8597 if (trans_edp_pipe == crtc->pipe) 8598 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8599 } 8600 8601 if (!intel_display_power_is_enabled(dev_priv, 8602 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 8603 return false; 8604 8605 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); 8606 if (!(tmp & PIPECONF_ENABLE)) 8607 return false; 8608 8609 haswell_get_ddi_port_state(crtc, pipe_config); 8610 8611 intel_get_pipe_timings(crtc, pipe_config); 8612 8613 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 8614 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) { 8615 if (IS_SKYLAKE(dev)) 8616 skylake_get_pfit_config(crtc, pipe_config); 8617 else 8618 ironlake_get_pfit_config(crtc, pipe_config); 8619 } 8620 8621 if (IS_HASWELL(dev)) 8622 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 8623 (I915_READ(IPS_CTL) & IPS_ENABLE); 8624 8625 if (pipe_config->cpu_transcoder != TRANSCODER_EDP) { 8626 pipe_config->pixel_multiplier = 8627 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1; 8628 } else { 8629 pipe_config->pixel_multiplier = 1; 8630 } 8631 8632 return true; 8633 } 8634 8635 static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 8636 { 8637 struct drm_device *dev = crtc->dev; 8638 struct drm_i915_private *dev_priv = dev->dev_private; 8639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8640 uint32_t cntl = 0, size = 0; 8641 8642 if (base) { 8643 unsigned int width = intel_crtc->base.cursor->state->crtc_w; 8644 unsigned int height = intel_crtc->base.cursor->state->crtc_h; 8645 unsigned int stride = roundup_pow_of_two(width) * 4; 8646 8647 switch (stride) { 8648 default: 8649 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n", 8650 width, stride); 8651 stride = 256; 8652 /* fallthrough */ 8653 case 256: 8654 case 512: 8655 case 1024: 8656 case 2048: 8657 break; 8658 } 8659 8660 cntl |= CURSOR_ENABLE | 8661 CURSOR_GAMMA_ENABLE | 8662 CURSOR_FORMAT_ARGB | 8663 CURSOR_STRIDE(stride); 8664 8665 size = (height << 12) | width; 8666 } 8667 8668 if (intel_crtc->cursor_cntl != 0 && 8669 (intel_crtc->cursor_base != base || 8670 intel_crtc->cursor_size != size || 8671 intel_crtc->cursor_cntl != cntl)) { 8672 /* On these chipsets we can only modify the base/size/stride 8673 * whilst the cursor is disabled. 8674 */ 8675 I915_WRITE(_CURACNTR, 0); 8676 POSTING_READ(_CURACNTR); 8677 intel_crtc->cursor_cntl = 0; 8678 } 8679 8680 if (intel_crtc->cursor_base != base) { 8681 I915_WRITE(_CURABASE, base); 8682 intel_crtc->cursor_base = base; 8683 } 8684 8685 if (intel_crtc->cursor_size != size) { 8686 I915_WRITE(CURSIZE, size); 8687 intel_crtc->cursor_size = size; 8688 } 8689 8690 if (intel_crtc->cursor_cntl != cntl) { 8691 I915_WRITE(_CURACNTR, cntl); 8692 POSTING_READ(_CURACNTR); 8693 intel_crtc->cursor_cntl = cntl; 8694 } 8695 } 8696 8697 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) 8698 { 8699 struct drm_device *dev = crtc->dev; 8700 struct drm_i915_private *dev_priv = dev->dev_private; 8701 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8702 int pipe = intel_crtc->pipe; 8703 uint32_t cntl; 8704 8705 cntl = 0; 8706 if (base) { 8707 cntl = MCURSOR_GAMMA_ENABLE; 8708 switch (intel_crtc->base.cursor->state->crtc_w) { 8709 case 64: 8710 cntl |= CURSOR_MODE_64_ARGB_AX; 8711 break; 8712 case 128: 8713 cntl |= CURSOR_MODE_128_ARGB_AX; 8714 break; 8715 case 256: 8716 cntl |= CURSOR_MODE_256_ARGB_AX; 8717 break; 8718 default: 8719 MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); 8720 return; 8721 } 8722 cntl |= pipe << 28; /* Connect to correct pipe */ 8723 8724 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8725 cntl |= CURSOR_PIPE_CSC_ENABLE; 8726 } 8727 8728 if (crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) 8729 cntl |= CURSOR_ROTATE_180; 8730 8731 if (intel_crtc->cursor_cntl != cntl) { 8732 I915_WRITE(CURCNTR(pipe), cntl); 8733 POSTING_READ(CURCNTR(pipe)); 8734 intel_crtc->cursor_cntl = cntl; 8735 } 8736 8737 /* and commit changes on next vblank */ 8738 I915_WRITE(CURBASE(pipe), base); 8739 POSTING_READ(CURBASE(pipe)); 8740 8741 intel_crtc->cursor_base = base; 8742 } 8743 8744 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8745 static void intel_crtc_update_cursor(struct drm_crtc *crtc, 8746 bool on) 8747 { 8748 struct drm_device *dev = crtc->dev; 8749 struct drm_i915_private *dev_priv = dev->dev_private; 8750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8751 int pipe = intel_crtc->pipe; 8752 int x = crtc->cursor_x; 8753 int y = crtc->cursor_y; 8754 u32 base = 0, pos = 0; 8755 8756 if (on) 8757 base = intel_crtc->cursor_addr; 8758 8759 if (x >= intel_crtc->config->pipe_src_w) 8760 base = 0; 8761 8762 if (y >= intel_crtc->config->pipe_src_h) 8763 base = 0; 8764 8765 if (x < 0) { 8766 if (x + intel_crtc->base.cursor->state->crtc_w <= 0) 8767 base = 0; 8768 8769 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; 8770 x = -x; 8771 } 8772 pos |= x << CURSOR_X_SHIFT; 8773 8774 if (y < 0) { 8775 if (y + intel_crtc->base.cursor->state->crtc_h <= 0) 8776 base = 0; 8777 8778 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; 8779 y = -y; 8780 } 8781 pos |= y << CURSOR_Y_SHIFT; 8782 8783 if (base == 0 && intel_crtc->cursor_base == 0) 8784 return; 8785 8786 I915_WRITE(CURPOS(pipe), pos); 8787 8788 /* ILK+ do this automagically */ 8789 if (HAS_GMCH_DISPLAY(dev) && 8790 crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { 8791 base += (intel_crtc->base.cursor->state->crtc_h * 8792 intel_crtc->base.cursor->state->crtc_w - 1) * 4; 8793 } 8794 8795 if (IS_845G(dev) || IS_I865G(dev)) 8796 i845_update_cursor(crtc, base); 8797 else 8798 i9xx_update_cursor(crtc, base); 8799 } 8800 8801 static bool cursor_size_ok(struct drm_device *dev, 8802 uint32_t width, uint32_t height) 8803 { 8804 if (width == 0 || height == 0) 8805 return false; 8806 8807 /* 8808 * 845g/865g are special in that they are only limited by 8809 * the width of their cursors, the height is arbitrary up to 8810 * the precision of the register. Everything else requires 8811 * square cursors, limited to a few power-of-two sizes. 8812 */ 8813 if (IS_845G(dev) || IS_I865G(dev)) { 8814 if ((width & 63) != 0) 8815 return false; 8816 8817 if (width > (IS_845G(dev) ? 64 : 512)) 8818 return false; 8819 8820 if (height > 1023) 8821 return false; 8822 } else { 8823 switch (width | height) { 8824 case 256: 8825 case 128: 8826 if (IS_GEN2(dev)) 8827 return false; 8828 case 64: 8829 break; 8830 default: 8831 return false; 8832 } 8833 } 8834 8835 return true; 8836 } 8837 8838 static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 8839 u16 *blue, uint32_t start, uint32_t size) 8840 { 8841 int end = (start + size > 256) ? 256 : start + size, i; 8842 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8843 8844 for (i = start; i < end; i++) { 8845 intel_crtc->lut_r[i] = red[i] >> 8; 8846 intel_crtc->lut_g[i] = green[i] >> 8; 8847 intel_crtc->lut_b[i] = blue[i] >> 8; 8848 } 8849 8850 intel_crtc_load_lut(crtc); 8851 } 8852 8853 /* VESA 640x480x72Hz mode to set on the pipe */ 8854 static struct drm_display_mode load_detect_mode = { 8855 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, 8856 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), 8857 }; 8858 8859 struct drm_framebuffer * 8860 __intel_framebuffer_create(struct drm_device *dev, 8861 struct drm_mode_fb_cmd2 *mode_cmd, 8862 struct drm_i915_gem_object *obj) 8863 { 8864 struct intel_framebuffer *intel_fb; 8865 int ret; 8866 8867 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8868 if (!intel_fb) { 8869 drm_gem_object_unreference(&obj->base); 8870 return ERR_PTR(-ENOMEM); 8871 } 8872 8873 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); 8874 if (ret) 8875 goto err; 8876 8877 return &intel_fb->base; 8878 err: 8879 drm_gem_object_unreference(&obj->base); 8880 kfree(intel_fb); 8881 8882 return ERR_PTR(ret); 8883 } 8884 8885 static struct drm_framebuffer * 8886 intel_framebuffer_create(struct drm_device *dev, 8887 struct drm_mode_fb_cmd2 *mode_cmd, 8888 struct drm_i915_gem_object *obj) 8889 { 8890 struct drm_framebuffer *fb; 8891 int ret; 8892 8893 ret = i915_mutex_lock_interruptible(dev); 8894 if (ret) 8895 return ERR_PTR(ret); 8896 fb = __intel_framebuffer_create(dev, mode_cmd, obj); 8897 mutex_unlock(&dev->struct_mutex); 8898 8899 return fb; 8900 } 8901 8902 static u32 8903 intel_framebuffer_pitch_for_width(int width, int bpp) 8904 { 8905 u32 pitch = DIV_ROUND_UP(width * bpp, 8); 8906 return ALIGN(pitch, 64); 8907 } 8908 8909 static u32 8910 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) 8911 { 8912 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); 8913 return PAGE_ALIGN(pitch * mode->vdisplay); 8914 } 8915 8916 static struct drm_framebuffer * 8917 intel_framebuffer_create_for_mode(struct drm_device *dev, 8918 struct drm_display_mode *mode, 8919 int depth, int bpp) 8920 { 8921 struct drm_i915_gem_object *obj; 8922 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 8923 8924 obj = i915_gem_alloc_object(dev, 8925 intel_framebuffer_size_for_mode(mode, bpp)); 8926 if (obj == NULL) 8927 return ERR_PTR(-ENOMEM); 8928 8929 mode_cmd.width = mode->hdisplay; 8930 mode_cmd.height = mode->vdisplay; 8931 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width, 8932 bpp); 8933 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); 8934 8935 return intel_framebuffer_create(dev, &mode_cmd, obj); 8936 } 8937 8938 static struct drm_framebuffer * 8939 mode_fits_in_fbdev(struct drm_device *dev, 8940 struct drm_display_mode *mode) 8941 { 8942 #ifdef CONFIG_DRM_I915_FBDEV 8943 struct drm_i915_private *dev_priv = dev->dev_private; 8944 struct drm_i915_gem_object *obj; 8945 struct drm_framebuffer *fb; 8946 8947 if (!dev_priv->fbdev) 8948 return NULL; 8949 8950 if (!dev_priv->fbdev->fb) 8951 return NULL; 8952 8953 obj = dev_priv->fbdev->fb->obj; 8954 BUG_ON(!obj); 8955 8956 fb = &dev_priv->fbdev->fb->base; 8957 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, 8958 fb->bits_per_pixel)) 8959 return NULL; 8960 8961 if (obj->base.size < mode->vdisplay * fb->pitches[0]) 8962 return NULL; 8963 8964 return fb; 8965 #else 8966 return NULL; 8967 #endif 8968 } 8969 8970 bool intel_get_load_detect_pipe(struct drm_connector *connector, 8971 struct drm_display_mode *mode, 8972 struct intel_load_detect_pipe *old, 8973 struct drm_modeset_acquire_ctx *ctx) 8974 { 8975 struct intel_crtc *intel_crtc; 8976 struct intel_encoder *intel_encoder = 8977 intel_attached_encoder(connector); 8978 struct drm_crtc *possible_crtc; 8979 struct drm_encoder *encoder = &intel_encoder->base; 8980 struct drm_crtc *crtc = NULL; 8981 struct drm_device *dev = encoder->dev; 8982 struct drm_framebuffer *fb; 8983 struct drm_mode_config *config = &dev->mode_config; 8984 struct drm_atomic_state *state = NULL; 8985 struct drm_connector_state *connector_state; 8986 int ret, i = -1; 8987 8988 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 8989 connector->base.id, connector->name, 8990 encoder->base.id, encoder->name); 8991 8992 retry: 8993 ret = drm_modeset_lock(&config->connection_mutex, ctx); 8994 if (ret) 8995 goto fail_unlock; 8996 8997 /* 8998 * Algorithm gets a little messy: 8999 * 9000 * - if the connector already has an assigned crtc, use it (but make 9001 * sure it's on first) 9002 * 9003 * - try to find the first unused crtc that can drive this connector, 9004 * and use that if we find one 9005 */ 9006 9007 /* See if we already have a CRTC for this connector */ 9008 if (encoder->crtc) { 9009 crtc = encoder->crtc; 9010 9011 ret = drm_modeset_lock(&crtc->mutex, ctx); 9012 if (ret) 9013 goto fail_unlock; 9014 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9015 if (ret) 9016 goto fail_unlock; 9017 9018 old->dpms_mode = connector->dpms; 9019 old->load_detect_temp = false; 9020 9021 /* Make sure the crtc and connector are running */ 9022 if (connector->dpms != DRM_MODE_DPMS_ON) 9023 connector->funcs->dpms(connector, DRM_MODE_DPMS_ON); 9024 9025 return true; 9026 } 9027 9028 /* Find an unused one (if possible) */ 9029 for_each_crtc(dev, possible_crtc) { 9030 i++; 9031 if (!(encoder->possible_crtcs & (1 << i))) 9032 continue; 9033 if (possible_crtc->state->enable) 9034 continue; 9035 /* This can occur when applying the pipe A quirk on resume. */ 9036 if (to_intel_crtc(possible_crtc)->new_enabled) 9037 continue; 9038 9039 crtc = possible_crtc; 9040 break; 9041 } 9042 9043 /* 9044 * If we didn't find an unused CRTC, don't use any. 9045 */ 9046 if (!crtc) { 9047 DRM_DEBUG_KMS("no pipe available for load-detect\n"); 9048 goto fail_unlock; 9049 } 9050 9051 ret = drm_modeset_lock(&crtc->mutex, ctx); 9052 if (ret) 9053 goto fail_unlock; 9054 ret = drm_modeset_lock(&crtc->primary->mutex, ctx); 9055 if (ret) 9056 goto fail_unlock; 9057 intel_encoder->new_crtc = to_intel_crtc(crtc); 9058 to_intel_connector(connector)->new_encoder = intel_encoder; 9059 9060 intel_crtc = to_intel_crtc(crtc); 9061 intel_crtc->new_enabled = true; 9062 intel_crtc->new_config = intel_crtc->config; 9063 old->dpms_mode = connector->dpms; 9064 old->load_detect_temp = true; 9065 old->release_fb = NULL; 9066 9067 state = drm_atomic_state_alloc(dev); 9068 if (!state) 9069 return false; 9070 9071 state->acquire_ctx = ctx; 9072 9073 connector_state = drm_atomic_get_connector_state(state, connector); 9074 if (IS_ERR(connector_state)) { 9075 ret = PTR_ERR(connector_state); 9076 goto fail; 9077 } 9078 9079 connector_state->crtc = crtc; 9080 connector_state->best_encoder = &intel_encoder->base; 9081 9082 if (!mode) 9083 mode = &load_detect_mode; 9084 9085 /* We need a framebuffer large enough to accommodate all accesses 9086 * that the plane may generate whilst we perform load detection. 9087 * We can not rely on the fbcon either being present (we get called 9088 * during its initialisation to detect all boot displays, or it may 9089 * not even exist) or that it is large enough to satisfy the 9090 * requested mode. 9091 */ 9092 fb = mode_fits_in_fbdev(dev, mode); 9093 if (fb == NULL) { 9094 DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); 9095 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); 9096 old->release_fb = fb; 9097 } else 9098 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); 9099 if (IS_ERR(fb)) { 9100 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); 9101 goto fail; 9102 } 9103 9104 if (intel_set_mode(crtc, mode, 0, 0, fb, state)) { 9105 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); 9106 if (old->release_fb) 9107 old->release_fb->funcs->destroy(old->release_fb); 9108 goto fail; 9109 } 9110 crtc->primary->crtc = crtc; 9111 9112 /* let the connector get through one full cycle before testing */ 9113 intel_wait_for_vblank(dev, intel_crtc->pipe); 9114 return true; 9115 9116 fail: 9117 intel_crtc->new_enabled = crtc->state->enable; 9118 if (intel_crtc->new_enabled) 9119 intel_crtc->new_config = intel_crtc->config; 9120 else 9121 intel_crtc->new_config = NULL; 9122 fail_unlock: 9123 if (state) { 9124 drm_atomic_state_free(state); 9125 state = NULL; 9126 } 9127 9128 if (ret == -EDEADLK) { 9129 drm_modeset_backoff(ctx); 9130 goto retry; 9131 } 9132 9133 return false; 9134 } 9135 9136 void intel_release_load_detect_pipe(struct drm_connector *connector, 9137 struct intel_load_detect_pipe *old, 9138 struct drm_modeset_acquire_ctx *ctx) 9139 { 9140 struct drm_device *dev = connector->dev; 9141 struct intel_encoder *intel_encoder = 9142 intel_attached_encoder(connector); 9143 struct drm_encoder *encoder = &intel_encoder->base; 9144 struct drm_crtc *crtc = encoder->crtc; 9145 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9146 struct drm_atomic_state *state; 9147 struct drm_connector_state *connector_state; 9148 9149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", 9150 connector->base.id, connector->name, 9151 encoder->base.id, encoder->name); 9152 9153 if (old->load_detect_temp) { 9154 state = drm_atomic_state_alloc(dev); 9155 if (!state) 9156 goto fail; 9157 9158 state->acquire_ctx = ctx; 9159 9160 connector_state = drm_atomic_get_connector_state(state, connector); 9161 if (IS_ERR(connector_state)) 9162 goto fail; 9163 9164 to_intel_connector(connector)->new_encoder = NULL; 9165 intel_encoder->new_crtc = NULL; 9166 intel_crtc->new_enabled = false; 9167 intel_crtc->new_config = NULL; 9168 9169 connector_state->best_encoder = NULL; 9170 connector_state->crtc = NULL; 9171 9172 intel_set_mode(crtc, NULL, 0, 0, NULL, state); 9173 9174 drm_atomic_state_free(state); 9175 9176 if (old->release_fb) { 9177 drm_framebuffer_unregister_private(old->release_fb); 9178 drm_framebuffer_unreference(old->release_fb); 9179 } 9180 9181 return; 9182 } 9183 9184 /* Switch crtc and encoder back off if necessary */ 9185 if (old->dpms_mode != DRM_MODE_DPMS_ON) 9186 connector->funcs->dpms(connector, old->dpms_mode); 9187 9188 return; 9189 fail: 9190 DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); 9191 drm_atomic_state_free(state); 9192 } 9193 9194 static int i9xx_pll_refclk(struct drm_device *dev, 9195 const struct intel_crtc_state *pipe_config) 9196 { 9197 struct drm_i915_private *dev_priv = dev->dev_private; 9198 u32 dpll = pipe_config->dpll_hw_state.dpll; 9199 9200 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 9201 return dev_priv->vbt.lvds_ssc_freq; 9202 else if (HAS_PCH_SPLIT(dev)) 9203 return 120000; 9204 else if (!IS_GEN2(dev)) 9205 return 96000; 9206 else 9207 return 48000; 9208 } 9209 9210 /* Returns the clock of the currently programmed mode of the given pipe. */ 9211 static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 9212 struct intel_crtc_state *pipe_config) 9213 { 9214 struct drm_device *dev = crtc->base.dev; 9215 struct drm_i915_private *dev_priv = dev->dev_private; 9216 int pipe = pipe_config->cpu_transcoder; 9217 u32 dpll = pipe_config->dpll_hw_state.dpll; 9218 u32 fp; 9219 intel_clock_t clock; 9220 int refclk = i9xx_pll_refclk(dev, pipe_config); 9221 9222 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) 9223 fp = pipe_config->dpll_hw_state.fp0; 9224 else 9225 fp = pipe_config->dpll_hw_state.fp1; 9226 9227 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; 9228 if (IS_PINEVIEW(dev)) { 9229 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; 9230 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; 9231 } else { 9232 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; 9233 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; 9234 } 9235 9236 if (!IS_GEN2(dev)) { 9237 if (IS_PINEVIEW(dev)) 9238 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> 9239 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); 9240 else 9241 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> 9242 DPLL_FPA01_P1_POST_DIV_SHIFT); 9243 9244 switch (dpll & DPLL_MODE_MASK) { 9245 case DPLLB_MODE_DAC_SERIAL: 9246 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? 9247 5 : 10; 9248 break; 9249 case DPLLB_MODE_LVDS: 9250 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? 9251 7 : 14; 9252 break; 9253 default: 9254 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " 9255 "mode\n", (int)(dpll & DPLL_MODE_MASK)); 9256 return; 9257 } 9258 9259 if (IS_PINEVIEW(dev)) 9260 pineview_clock(refclk, &clock); 9261 else 9262 i9xx_clock(refclk, &clock); 9263 } else { 9264 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS); 9265 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN); 9266 9267 if (is_lvds) { 9268 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> 9269 DPLL_FPA01_P1_POST_DIV_SHIFT); 9270 9271 if (lvds & LVDS_CLKB_POWER_UP) 9272 clock.p2 = 7; 9273 else 9274 clock.p2 = 14; 9275 } else { 9276 if (dpll & PLL_P1_DIVIDE_BY_TWO) 9277 clock.p1 = 2; 9278 else { 9279 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> 9280 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; 9281 } 9282 if (dpll & PLL_P2_DIVIDE_BY_4) 9283 clock.p2 = 4; 9284 else 9285 clock.p2 = 2; 9286 } 9287 9288 i9xx_clock(refclk, &clock); 9289 } 9290 9291 /* 9292 * This value includes pixel_multiplier. We will use 9293 * port_clock to compute adjusted_mode.crtc_clock in the 9294 * encoder's get_config() function. 9295 */ 9296 pipe_config->port_clock = clock.dot; 9297 } 9298 9299 int intel_dotclock_calculate(int link_freq, 9300 const struct intel_link_m_n *m_n) 9301 { 9302 /* 9303 * The calculation for the data clock is: 9304 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp 9305 * But we want to avoid losing precison if possible, so: 9306 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp)) 9307 * 9308 * and the link clock is simpler: 9309 * link_clock = (m * link_clock) / n 9310 */ 9311 9312 if (!m_n->link_n) 9313 return 0; 9314 9315 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n); 9316 } 9317 9318 static void ironlake_pch_clock_get(struct intel_crtc *crtc, 9319 struct intel_crtc_state *pipe_config) 9320 { 9321 struct drm_device *dev = crtc->base.dev; 9322 9323 /* read out port_clock from the DPLL */ 9324 i9xx_crtc_clock_get(crtc, pipe_config); 9325 9326 /* 9327 * This value does not include pixel_multiplier. 9328 * We will check that port_clock and adjusted_mode.crtc_clock 9329 * agree once we know their relationship in the encoder's 9330 * get_config() function. 9331 */ 9332 pipe_config->base.adjusted_mode.crtc_clock = 9333 intel_dotclock_calculate(intel_fdi_link_freq(dev) * 10000, 9334 &pipe_config->fdi_m_n); 9335 } 9336 9337 /** Returns the currently programmed mode of the given pipe. */ 9338 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 9339 struct drm_crtc *crtc) 9340 { 9341 struct drm_i915_private *dev_priv = dev->dev_private; 9342 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9343 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 9344 struct drm_display_mode *mode; 9345 struct intel_crtc_state pipe_config; 9346 int htot = I915_READ(HTOTAL(cpu_transcoder)); 9347 int hsync = I915_READ(HSYNC(cpu_transcoder)); 9348 int vtot = I915_READ(VTOTAL(cpu_transcoder)); 9349 int vsync = I915_READ(VSYNC(cpu_transcoder)); 9350 enum i915_pipe pipe = intel_crtc->pipe; 9351 9352 mode = kzalloc(sizeof(*mode), GFP_KERNEL); 9353 if (!mode) 9354 return NULL; 9355 9356 /* 9357 * Construct a pipe_config sufficient for getting the clock info 9358 * back out of crtc_clock_get. 9359 * 9360 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need 9361 * to use a real value here instead. 9362 */ 9363 pipe_config.cpu_transcoder = (enum transcoder) pipe; 9364 pipe_config.pixel_multiplier = 1; 9365 pipe_config.dpll_hw_state.dpll = I915_READ(DPLL(pipe)); 9366 pipe_config.dpll_hw_state.fp0 = I915_READ(FP0(pipe)); 9367 pipe_config.dpll_hw_state.fp1 = I915_READ(FP1(pipe)); 9368 i9xx_crtc_clock_get(intel_crtc, &pipe_config); 9369 9370 mode->clock = pipe_config.port_clock / pipe_config.pixel_multiplier; 9371 mode->hdisplay = (htot & 0xffff) + 1; 9372 mode->htotal = ((htot & 0xffff0000) >> 16) + 1; 9373 mode->hsync_start = (hsync & 0xffff) + 1; 9374 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; 9375 mode->vdisplay = (vtot & 0xffff) + 1; 9376 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; 9377 mode->vsync_start = (vsync & 0xffff) + 1; 9378 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; 9379 9380 drm_mode_set_name(mode); 9381 9382 return mode; 9383 } 9384 9385 static void intel_decrease_pllclock(struct drm_crtc *crtc) 9386 { 9387 struct drm_device *dev = crtc->dev; 9388 struct drm_i915_private *dev_priv = dev->dev_private; 9389 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9390 9391 if (!HAS_GMCH_DISPLAY(dev)) 9392 return; 9393 9394 if (!dev_priv->lvds_downclock_avail) 9395 return; 9396 9397 /* 9398 * Since this is called by a timer, we should never get here in 9399 * the manual case. 9400 */ 9401 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { 9402 int pipe = intel_crtc->pipe; 9403 int dpll_reg = DPLL(pipe); 9404 int dpll; 9405 9406 DRM_DEBUG_DRIVER("downclocking LVDS\n"); 9407 9408 assert_panel_unlocked(dev_priv, pipe); 9409 9410 dpll = I915_READ(dpll_reg); 9411 dpll |= DISPLAY_RATE_SELECT_FPA1; 9412 I915_WRITE(dpll_reg, dpll); 9413 intel_wait_for_vblank(dev, pipe); 9414 dpll = I915_READ(dpll_reg); 9415 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) 9416 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); 9417 } 9418 9419 } 9420 9421 void intel_mark_busy(struct drm_device *dev) 9422 { 9423 struct drm_i915_private *dev_priv = dev->dev_private; 9424 9425 if (dev_priv->mm.busy) 9426 return; 9427 9428 intel_runtime_pm_get(dev_priv); 9429 i915_update_gfx_val(dev_priv); 9430 if (INTEL_INFO(dev)->gen >= 6) 9431 gen6_rps_busy(dev_priv); 9432 dev_priv->mm.busy = true; 9433 } 9434 9435 void intel_mark_idle(struct drm_device *dev) 9436 { 9437 struct drm_i915_private *dev_priv = dev->dev_private; 9438 struct drm_crtc *crtc; 9439 9440 if (!dev_priv->mm.busy) 9441 return; 9442 9443 dev_priv->mm.busy = false; 9444 9445 for_each_crtc(dev, crtc) { 9446 if (!crtc->primary->fb) 9447 continue; 9448 9449 intel_decrease_pllclock(crtc); 9450 } 9451 9452 if (INTEL_INFO(dev)->gen >= 6) 9453 gen6_rps_idle(dev->dev_private); 9454 9455 intel_runtime_pm_put(dev_priv); 9456 } 9457 9458 static void intel_crtc_set_state(struct intel_crtc *crtc, 9459 struct intel_crtc_state *crtc_state) 9460 { 9461 kfree(crtc->config); 9462 crtc->config = crtc_state; 9463 crtc->base.state = &crtc_state->base; 9464 } 9465 9466 static void intel_crtc_destroy(struct drm_crtc *crtc) 9467 { 9468 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9469 struct drm_device *dev = crtc->dev; 9470 struct intel_unpin_work *work; 9471 9472 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9473 work = intel_crtc->unpin_work; 9474 intel_crtc->unpin_work = NULL; 9475 lockmgr(&dev->event_lock, LK_RELEASE); 9476 9477 if (work) { 9478 cancel_work_sync(&work->work); 9479 kfree(work); 9480 } 9481 9482 intel_crtc_set_state(intel_crtc, NULL); 9483 drm_crtc_cleanup(crtc); 9484 9485 kfree(intel_crtc); 9486 } 9487 9488 static void intel_unpin_work_fn(struct work_struct *__work) 9489 { 9490 struct intel_unpin_work *work = 9491 container_of(__work, struct intel_unpin_work, work); 9492 struct drm_device *dev = work->crtc->dev; 9493 enum i915_pipe pipe = to_intel_crtc(work->crtc)->pipe; 9494 9495 mutex_lock(&dev->struct_mutex); 9496 intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state); 9497 drm_gem_object_unreference(&work->pending_flip_obj->base); 9498 9499 intel_fbc_update(dev); 9500 9501 if (work->flip_queued_req) 9502 i915_gem_request_assign(&work->flip_queued_req, NULL); 9503 mutex_unlock(&dev->struct_mutex); 9504 9505 intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 9506 drm_framebuffer_unreference(work->old_fb); 9507 9508 BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); 9509 atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); 9510 9511 kfree(work); 9512 } 9513 9514 static void do_intel_finish_page_flip(struct drm_device *dev, 9515 struct drm_crtc *crtc) 9516 { 9517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9518 struct intel_unpin_work *work; 9519 9520 /* Ignore early vblank irqs */ 9521 if (intel_crtc == NULL) 9522 return; 9523 9524 /* 9525 * This is called both by irq handlers and the reset code (to complete 9526 * lost pageflips) so needs the full irqsave spinlocks. 9527 */ 9528 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9529 work = intel_crtc->unpin_work; 9530 9531 /* Ensure we don't miss a work->pending update ... */ 9532 smp_rmb(); 9533 9534 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 9535 lockmgr(&dev->event_lock, LK_RELEASE); 9536 return; 9537 } 9538 9539 page_flip_completed(intel_crtc); 9540 9541 lockmgr(&dev->event_lock, LK_RELEASE); 9542 } 9543 9544 void intel_finish_page_flip(struct drm_device *dev, int pipe) 9545 { 9546 struct drm_i915_private *dev_priv = dev->dev_private; 9547 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9548 9549 do_intel_finish_page_flip(dev, crtc); 9550 } 9551 9552 void intel_finish_page_flip_plane(struct drm_device *dev, int plane) 9553 { 9554 struct drm_i915_private *dev_priv = dev->dev_private; 9555 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; 9556 9557 do_intel_finish_page_flip(dev, crtc); 9558 } 9559 9560 /* Is 'a' after or equal to 'b'? */ 9561 static bool g4x_flip_count_after_eq(u32 a, u32 b) 9562 { 9563 return !((a - b) & 0x80000000); 9564 } 9565 9566 static bool page_flip_finished(struct intel_crtc *crtc) 9567 { 9568 struct drm_device *dev = crtc->base.dev; 9569 struct drm_i915_private *dev_priv = dev->dev_private; 9570 9571 if (i915_reset_in_progress(&dev_priv->gpu_error) || 9572 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 9573 return true; 9574 9575 /* 9576 * The relevant registers doen't exist on pre-ctg. 9577 * As the flip done interrupt doesn't trigger for mmio 9578 * flips on gmch platforms, a flip count check isn't 9579 * really needed there. But since ctg has the registers, 9580 * include it in the check anyway. 9581 */ 9582 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev)) 9583 return true; 9584 9585 /* 9586 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips 9587 * used the same base address. In that case the mmio flip might 9588 * have completed, but the CS hasn't even executed the flip yet. 9589 * 9590 * A flip count check isn't enough as the CS might have updated 9591 * the base address just after start of vblank, but before we 9592 * managed to process the interrupt. This means we'd complete the 9593 * CS flip too soon. 9594 * 9595 * Combining both checks should get us a good enough result. It may 9596 * still happen that the CS flip has been executed, but has not 9597 * yet actually completed. But in case the base address is the same 9598 * anyway, we don't really care. 9599 */ 9600 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 9601 crtc->unpin_work->gtt_offset && 9602 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_GM45(crtc->pipe)), 9603 crtc->unpin_work->flip_count); 9604 } 9605 9606 void intel_prepare_page_flip(struct drm_device *dev, int plane) 9607 { 9608 struct drm_i915_private *dev_priv = dev->dev_private; 9609 struct intel_crtc *intel_crtc = 9610 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9611 9612 9613 /* 9614 * This is called both by irq handlers and the reset code (to complete 9615 * lost pageflips) so needs the full irqsave spinlocks. 9616 * 9617 * NB: An MMIO update of the plane base pointer will also 9618 * generate a page-flip completion irq, i.e. every modeset 9619 * is also accompanied by a spurious intel_prepare_page_flip(). 9620 */ 9621 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 9622 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 9623 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 9624 lockmgr(&dev->event_lock, LK_RELEASE); 9625 } 9626 9627 static inline void intel_mark_page_flip_active(struct intel_crtc *intel_crtc) 9628 { 9629 /* Ensure that the work item is consistent when activating it ... */ 9630 smp_wmb(); 9631 atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING); 9632 /* and that it is marked active as soon as the irq could fire. */ 9633 smp_wmb(); 9634 } 9635 9636 static int intel_gen2_queue_flip(struct drm_device *dev, 9637 struct drm_crtc *crtc, 9638 struct drm_framebuffer *fb, 9639 struct drm_i915_gem_object *obj, 9640 struct intel_engine_cs *ring, 9641 uint32_t flags) 9642 { 9643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9644 u32 flip_mask; 9645 int ret; 9646 9647 ret = intel_ring_begin(ring, 6); 9648 if (ret) 9649 return ret; 9650 9651 /* Can't queue multiple flips, so wait for the previous 9652 * one to finish before executing the next. 9653 */ 9654 if (intel_crtc->plane) 9655 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9656 else 9657 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9658 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9659 intel_ring_emit(ring, MI_NOOP); 9660 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9661 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9662 intel_ring_emit(ring, fb->pitches[0]); 9663 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9664 intel_ring_emit(ring, 0); /* aux display base address, unused */ 9665 9666 intel_mark_page_flip_active(intel_crtc); 9667 __intel_ring_advance(ring); 9668 return 0; 9669 } 9670 9671 static int intel_gen3_queue_flip(struct drm_device *dev, 9672 struct drm_crtc *crtc, 9673 struct drm_framebuffer *fb, 9674 struct drm_i915_gem_object *obj, 9675 struct intel_engine_cs *ring, 9676 uint32_t flags) 9677 { 9678 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9679 u32 flip_mask; 9680 int ret; 9681 9682 ret = intel_ring_begin(ring, 6); 9683 if (ret) 9684 return ret; 9685 9686 if (intel_crtc->plane) 9687 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 9688 else 9689 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; 9690 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); 9691 intel_ring_emit(ring, MI_NOOP); 9692 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | 9693 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9694 intel_ring_emit(ring, fb->pitches[0]); 9695 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9696 intel_ring_emit(ring, MI_NOOP); 9697 9698 intel_mark_page_flip_active(intel_crtc); 9699 __intel_ring_advance(ring); 9700 return 0; 9701 } 9702 9703 static int intel_gen4_queue_flip(struct drm_device *dev, 9704 struct drm_crtc *crtc, 9705 struct drm_framebuffer *fb, 9706 struct drm_i915_gem_object *obj, 9707 struct intel_engine_cs *ring, 9708 uint32_t flags) 9709 { 9710 struct drm_i915_private *dev_priv = dev->dev_private; 9711 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9712 uint32_t pf, pipesrc; 9713 int ret; 9714 9715 ret = intel_ring_begin(ring, 4); 9716 if (ret) 9717 return ret; 9718 9719 /* i965+ uses the linear or tiled offsets from the 9720 * Display Registers (which do not change across a page-flip) 9721 * so we need only reprogram the base address. 9722 */ 9723 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9724 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9725 intel_ring_emit(ring, fb->pitches[0]); 9726 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset | 9727 obj->tiling_mode); 9728 9729 /* XXX Enabling the panel-fitter across page-flip is so far 9730 * untested on non-native modes, so ignore it for now. 9731 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 9732 */ 9733 pf = 0; 9734 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9735 intel_ring_emit(ring, pf | pipesrc); 9736 9737 intel_mark_page_flip_active(intel_crtc); 9738 __intel_ring_advance(ring); 9739 return 0; 9740 } 9741 9742 static int intel_gen6_queue_flip(struct drm_device *dev, 9743 struct drm_crtc *crtc, 9744 struct drm_framebuffer *fb, 9745 struct drm_i915_gem_object *obj, 9746 struct intel_engine_cs *ring, 9747 uint32_t flags) 9748 { 9749 struct drm_i915_private *dev_priv = dev->dev_private; 9750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9751 uint32_t pf, pipesrc; 9752 int ret; 9753 9754 ret = intel_ring_begin(ring, 4); 9755 if (ret) 9756 return ret; 9757 9758 intel_ring_emit(ring, MI_DISPLAY_FLIP | 9759 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 9760 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode); 9761 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9762 9763 /* Contrary to the suggestions in the documentation, 9764 * "Enable Panel Fitter" does not seem to be required when page 9765 * flipping with a non-native mode, and worse causes a normal 9766 * modeset to fail. 9767 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; 9768 */ 9769 pf = 0; 9770 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 9771 intel_ring_emit(ring, pf | pipesrc); 9772 9773 intel_mark_page_flip_active(intel_crtc); 9774 __intel_ring_advance(ring); 9775 return 0; 9776 } 9777 9778 static int intel_gen7_queue_flip(struct drm_device *dev, 9779 struct drm_crtc *crtc, 9780 struct drm_framebuffer *fb, 9781 struct drm_i915_gem_object *obj, 9782 struct intel_engine_cs *ring, 9783 uint32_t flags) 9784 { 9785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9786 uint32_t plane_bit = 0; 9787 int len, ret; 9788 9789 switch (intel_crtc->plane) { 9790 case PLANE_A: 9791 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A; 9792 break; 9793 case PLANE_B: 9794 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B; 9795 break; 9796 case PLANE_C: 9797 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C; 9798 break; 9799 default: 9800 WARN_ONCE(1, "unknown plane in flip command\n"); 9801 return -ENODEV; 9802 } 9803 9804 len = 4; 9805 if (ring->id == RCS) { 9806 len += 6; 9807 /* 9808 * On Gen 8, SRM is now taking an extra dword to accommodate 9809 * 48bits addresses, and we need a NOOP for the batch size to 9810 * stay even. 9811 */ 9812 if (IS_GEN8(dev)) 9813 len += 2; 9814 } 9815 9816 /* 9817 * BSpec MI_DISPLAY_FLIP for IVB: 9818 * "The full packet must be contained within the same cache line." 9819 * 9820 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same 9821 * cacheline, if we ever start emitting more commands before 9822 * the MI_DISPLAY_FLIP we may need to first emit everything else, 9823 * then do the cacheline alignment, and finally emit the 9824 * MI_DISPLAY_FLIP. 9825 */ 9826 ret = intel_ring_cacheline_align(ring); 9827 if (ret) 9828 return ret; 9829 9830 ret = intel_ring_begin(ring, len); 9831 if (ret) 9832 return ret; 9833 9834 /* Unmask the flip-done completion message. Note that the bspec says that 9835 * we should do this for both the BCS and RCS, and that we must not unmask 9836 * more than one flip event at any time (or ensure that one flip message 9837 * can be sent by waiting for flip-done prior to queueing new flips). 9838 * Experimentation says that BCS works despite DERRMR masking all 9839 * flip-done completion events and that unmasking all planes at once 9840 * for the RCS also doesn't appear to drop events. Setting the DERRMR 9841 * to zero does lead to lockups within MI_DISPLAY_FLIP. 9842 */ 9843 if (ring->id == RCS) { 9844 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 9845 intel_ring_emit(ring, DERRMR); 9846 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 9847 DERRMR_PIPEB_PRI_FLIP_DONE | 9848 DERRMR_PIPEC_PRI_FLIP_DONE)); 9849 if (IS_GEN8(dev)) 9850 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) | 9851 MI_SRM_LRM_GLOBAL_GTT); 9852 else 9853 intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | 9854 MI_SRM_LRM_GLOBAL_GTT); 9855 intel_ring_emit(ring, DERRMR); 9856 intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 9857 if (IS_GEN8(dev)) { 9858 intel_ring_emit(ring, 0); 9859 intel_ring_emit(ring, MI_NOOP); 9860 } 9861 } 9862 9863 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 9864 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 9865 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset); 9866 intel_ring_emit(ring, (MI_NOOP)); 9867 9868 intel_mark_page_flip_active(intel_crtc); 9869 __intel_ring_advance(ring); 9870 return 0; 9871 } 9872 9873 static bool use_mmio_flip(struct intel_engine_cs *ring, 9874 struct drm_i915_gem_object *obj) 9875 { 9876 /* 9877 * This is not being used for older platforms, because 9878 * non-availability of flip done interrupt forces us to use 9879 * CS flips. Older platforms derive flip done using some clever 9880 * tricks involving the flip_pending status bits and vblank irqs. 9881 * So using MMIO flips there would disrupt this mechanism. 9882 */ 9883 9884 if (ring == NULL) 9885 return true; 9886 9887 if (INTEL_INFO(ring->dev)->gen < 5) 9888 return false; 9889 9890 if (i915.use_mmio_flip < 0) 9891 return false; 9892 else if (i915.use_mmio_flip > 0) 9893 return true; 9894 else if (i915.enable_execlists) 9895 return true; 9896 else 9897 return ring != i915_gem_request_get_ring(obj->last_read_req); 9898 } 9899 9900 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc) 9901 { 9902 struct drm_device *dev = intel_crtc->base.dev; 9903 struct drm_i915_private *dev_priv = dev->dev_private; 9904 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 9905 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 9906 struct drm_i915_gem_object *obj = intel_fb->obj; 9907 const enum i915_pipe pipe = intel_crtc->pipe; 9908 u32 ctl, stride; 9909 9910 ctl = I915_READ(PLANE_CTL(pipe, 0)); 9911 ctl &= ~PLANE_CTL_TILED_MASK; 9912 if (obj->tiling_mode == I915_TILING_X) 9913 ctl |= PLANE_CTL_TILED_X; 9914 9915 /* 9916 * The stride is either expressed as a multiple of 64 bytes chunks for 9917 * linear buffers or in number of tiles for tiled buffers. 9918 */ 9919 stride = fb->pitches[0] >> 6; 9920 if (obj->tiling_mode == I915_TILING_X) 9921 stride = fb->pitches[0] >> 9; /* X tiles are 512 bytes wide */ 9922 9923 /* 9924 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on 9925 * PLANE_SURF updates, the update is then guaranteed to be atomic. 9926 */ 9927 I915_WRITE(PLANE_CTL(pipe, 0), ctl); 9928 I915_WRITE(PLANE_STRIDE(pipe, 0), stride); 9929 9930 I915_WRITE(PLANE_SURF(pipe, 0), intel_crtc->unpin_work->gtt_offset); 9931 POSTING_READ(PLANE_SURF(pipe, 0)); 9932 } 9933 9934 static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc) 9935 { 9936 struct drm_device *dev = intel_crtc->base.dev; 9937 struct drm_i915_private *dev_priv = dev->dev_private; 9938 struct intel_framebuffer *intel_fb = 9939 to_intel_framebuffer(intel_crtc->base.primary->fb); 9940 struct drm_i915_gem_object *obj = intel_fb->obj; 9941 u32 dspcntr; 9942 u32 reg; 9943 9944 reg = DSPCNTR(intel_crtc->plane); 9945 dspcntr = I915_READ(reg); 9946 9947 if (obj->tiling_mode != I915_TILING_NONE) 9948 dspcntr |= DISPPLANE_TILED; 9949 else 9950 dspcntr &= ~DISPPLANE_TILED; 9951 9952 I915_WRITE(reg, dspcntr); 9953 9954 I915_WRITE(DSPSURF(intel_crtc->plane), 9955 intel_crtc->unpin_work->gtt_offset); 9956 POSTING_READ(DSPSURF(intel_crtc->plane)); 9957 9958 } 9959 9960 /* 9961 * XXX: This is the temporary way to update the plane registers until we get 9962 * around to using the usual plane update functions for MMIO flips 9963 */ 9964 static void intel_do_mmio_flip(struct intel_crtc *intel_crtc) 9965 { 9966 struct drm_device *dev = intel_crtc->base.dev; 9967 bool atomic_update; 9968 u32 start_vbl_count; 9969 9970 intel_mark_page_flip_active(intel_crtc); 9971 9972 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count); 9973 9974 if (INTEL_INFO(dev)->gen >= 9) 9975 skl_do_mmio_flip(intel_crtc); 9976 else 9977 /* use_mmio_flip() retricts MMIO flips to ilk+ */ 9978 ilk_do_mmio_flip(intel_crtc); 9979 9980 if (atomic_update) 9981 intel_pipe_update_end(intel_crtc, start_vbl_count); 9982 } 9983 9984 static void intel_mmio_flip_work_func(struct work_struct *work) 9985 { 9986 struct intel_crtc *crtc = 9987 container_of(work, struct intel_crtc, mmio_flip.work); 9988 struct intel_mmio_flip *mmio_flip; 9989 9990 mmio_flip = &crtc->mmio_flip; 9991 if (mmio_flip->req) 9992 WARN_ON(__i915_wait_request(mmio_flip->req, 9993 crtc->reset_counter, 9994 false, NULL, NULL) != 0); 9995 9996 intel_do_mmio_flip(crtc); 9997 if (mmio_flip->req) { 9998 mutex_lock(&crtc->base.dev->struct_mutex); 9999 i915_gem_request_assign(&mmio_flip->req, NULL); 10000 mutex_unlock(&crtc->base.dev->struct_mutex); 10001 } 10002 } 10003 10004 static int intel_queue_mmio_flip(struct drm_device *dev, 10005 struct drm_crtc *crtc, 10006 struct drm_framebuffer *fb, 10007 struct drm_i915_gem_object *obj, 10008 struct intel_engine_cs *ring, 10009 uint32_t flags) 10010 { 10011 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10012 10013 i915_gem_request_assign(&intel_crtc->mmio_flip.req, 10014 obj->last_write_req); 10015 10016 schedule_work(&intel_crtc->mmio_flip.work); 10017 10018 return 0; 10019 } 10020 10021 static int intel_default_queue_flip(struct drm_device *dev, 10022 struct drm_crtc *crtc, 10023 struct drm_framebuffer *fb, 10024 struct drm_i915_gem_object *obj, 10025 struct intel_engine_cs *ring, 10026 uint32_t flags) 10027 { 10028 return -ENODEV; 10029 } 10030 10031 static bool __intel_pageflip_stall_check(struct drm_device *dev, 10032 struct drm_crtc *crtc) 10033 { 10034 struct drm_i915_private *dev_priv = dev->dev_private; 10035 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10036 struct intel_unpin_work *work = intel_crtc->unpin_work; 10037 u32 addr; 10038 10039 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) 10040 return true; 10041 10042 if (!work->enable_stall_check) 10043 return false; 10044 10045 if (work->flip_ready_vblank == 0) { 10046 if (work->flip_queued_req && 10047 !i915_gem_request_completed(work->flip_queued_req, true)) 10048 return false; 10049 10050 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 10051 } 10052 10053 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 10054 return false; 10055 10056 /* Potential stall - if we see that the flip has happened, 10057 * assume a missed interrupt. */ 10058 if (INTEL_INFO(dev)->gen >= 4) 10059 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 10060 else 10061 addr = I915_READ(DSPADDR(intel_crtc->plane)); 10062 10063 /* There is a potential issue here with a false positive after a flip 10064 * to the same address. We could address this by checking for a 10065 * non-incrementing frame counter. 10066 */ 10067 return addr == work->gtt_offset; 10068 } 10069 10070 void intel_check_page_flip(struct drm_device *dev, int pipe) 10071 { 10072 struct drm_i915_private *dev_priv = dev->dev_private; 10073 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 10074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10075 10076 if (crtc == NULL) 10077 return; 10078 10079 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10080 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 10081 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 10082 intel_crtc->unpin_work->flip_queued_vblank, 10083 drm_vblank_count(dev, pipe)); 10084 page_flip_completed(intel_crtc); 10085 } 10086 lockmgr(&dev->event_lock, LK_RELEASE); 10087 } 10088 10089 static int intel_crtc_page_flip(struct drm_crtc *crtc, 10090 struct drm_framebuffer *fb, 10091 struct drm_pending_vblank_event *event, 10092 uint32_t page_flip_flags) 10093 { 10094 struct drm_device *dev = crtc->dev; 10095 struct drm_i915_private *dev_priv = dev->dev_private; 10096 struct drm_framebuffer *old_fb = crtc->primary->fb; 10097 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 10098 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10099 struct drm_plane *primary = crtc->primary; 10100 enum i915_pipe pipe = intel_crtc->pipe; 10101 struct intel_unpin_work *work; 10102 struct intel_engine_cs *ring; 10103 int ret; 10104 10105 /* 10106 * drm_mode_page_flip_ioctl() should already catch this, but double 10107 * check to be safe. In the future we may enable pageflipping from 10108 * a disabled primary plane. 10109 */ 10110 if (WARN_ON(intel_fb_obj(old_fb) == NULL)) 10111 return -EBUSY; 10112 10113 /* Can't change pixel format via MI display flips. */ 10114 if (fb->pixel_format != crtc->primary->fb->pixel_format) 10115 return -EINVAL; 10116 10117 /* 10118 * TILEOFF/LINOFF registers can't be changed via MI display flips. 10119 * Note that pitch changes could also affect these register. 10120 */ 10121 if (INTEL_INFO(dev)->gen > 3 && 10122 (fb->offsets[0] != crtc->primary->fb->offsets[0] || 10123 fb->pitches[0] != crtc->primary->fb->pitches[0])) 10124 return -EINVAL; 10125 10126 if (i915_terminally_wedged(&dev_priv->gpu_error)) 10127 goto out_hang; 10128 10129 work = kzalloc(sizeof(*work), GFP_KERNEL); 10130 if (work == NULL) 10131 return -ENOMEM; 10132 10133 work->event = event; 10134 work->crtc = crtc; 10135 work->old_fb = old_fb; 10136 INIT_WORK(&work->work, intel_unpin_work_fn); 10137 10138 ret = drm_crtc_vblank_get(crtc); 10139 if (ret) 10140 goto free_work; 10141 10142 /* We borrow the event spin lock for protecting unpin_work */ 10143 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10144 if (intel_crtc->unpin_work) { 10145 /* Before declaring the flip queue wedged, check if 10146 * the hardware completed the operation behind our backs. 10147 */ 10148 if (__intel_pageflip_stall_check(dev, crtc)) { 10149 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 10150 page_flip_completed(intel_crtc); 10151 } else { 10152 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 10153 lockmgr(&dev->event_lock, LK_RELEASE); 10154 10155 drm_crtc_vblank_put(crtc); 10156 kfree(work); 10157 return -EBUSY; 10158 } 10159 } 10160 intel_crtc->unpin_work = work; 10161 lockmgr(&dev->event_lock, LK_RELEASE); 10162 10163 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 10164 flush_workqueue(dev_priv->wq); 10165 10166 /* Reference the objects for the scheduled work. */ 10167 drm_framebuffer_reference(work->old_fb); 10168 drm_gem_object_reference(&obj->base); 10169 10170 crtc->primary->fb = fb; 10171 update_state_fb(crtc->primary); 10172 10173 /* Keep state structure in sync */ 10174 if (crtc->primary->state->fb) 10175 drm_framebuffer_unreference(crtc->primary->state->fb); 10176 crtc->primary->state->fb = fb; 10177 if (crtc->primary->state->fb) 10178 drm_framebuffer_reference(crtc->primary->state->fb); 10179 10180 work->pending_flip_obj = obj; 10181 10182 ret = i915_mutex_lock_interruptible(dev); 10183 if (ret) 10184 goto cleanup; 10185 10186 atomic_inc(&intel_crtc->unpin_work_count); 10187 intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 10188 10189 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 10190 work->flip_count = I915_READ(PIPE_FLIPCOUNT_GM45(pipe)) + 1; 10191 10192 if (IS_VALLEYVIEW(dev)) { 10193 ring = &dev_priv->ring[BCS]; 10194 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode) 10195 /* vlv: DISPLAY_FLIP fails to change tiling */ 10196 ring = NULL; 10197 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { 10198 ring = &dev_priv->ring[BCS]; 10199 } else if (INTEL_INFO(dev)->gen >= 7) { 10200 ring = i915_gem_request_get_ring(obj->last_read_req); 10201 if (ring == NULL || ring->id != RCS) 10202 ring = &dev_priv->ring[BCS]; 10203 } else { 10204 ring = &dev_priv->ring[RCS]; 10205 } 10206 10207 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, 10208 crtc->primary->state, ring); 10209 if (ret) 10210 goto cleanup_pending; 10211 10212 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) 10213 + intel_crtc->dspaddr_offset; 10214 10215 if (use_mmio_flip(ring, obj)) { 10216 ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, 10217 page_flip_flags); 10218 if (ret) 10219 goto cleanup_unpin; 10220 10221 i915_gem_request_assign(&work->flip_queued_req, 10222 obj->last_write_req); 10223 } else { 10224 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, ring, 10225 page_flip_flags); 10226 if (ret) 10227 goto cleanup_unpin; 10228 10229 i915_gem_request_assign(&work->flip_queued_req, 10230 intel_ring_get_request(ring)); 10231 } 10232 10233 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 10234 work->enable_stall_check = true; 10235 10236 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj, 10237 INTEL_FRONTBUFFER_PRIMARY(pipe)); 10238 10239 intel_fbc_disable(dev); 10240 intel_frontbuffer_flip_prepare(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); 10241 mutex_unlock(&dev->struct_mutex); 10242 10243 trace_i915_flip_request(intel_crtc->plane, obj); 10244 10245 return 0; 10246 10247 cleanup_unpin: 10248 intel_unpin_fb_obj(fb, crtc->primary->state); 10249 cleanup_pending: 10250 atomic_dec(&intel_crtc->unpin_work_count); 10251 mutex_unlock(&dev->struct_mutex); 10252 cleanup: 10253 crtc->primary->fb = old_fb; 10254 update_state_fb(crtc->primary); 10255 10256 drm_gem_object_unreference_unlocked(&obj->base); 10257 drm_framebuffer_unreference(work->old_fb); 10258 10259 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10260 intel_crtc->unpin_work = NULL; 10261 lockmgr(&dev->event_lock, LK_RELEASE); 10262 10263 drm_crtc_vblank_put(crtc); 10264 free_work: 10265 kfree(work); 10266 10267 if (ret == -EIO) { 10268 out_hang: 10269 ret = intel_plane_restore(primary); 10270 if (ret == 0 && event) { 10271 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 10272 drm_send_vblank_event(dev, pipe, event); 10273 lockmgr(&dev->event_lock, LK_RELEASE); 10274 } 10275 } 10276 return ret; 10277 } 10278 10279 static struct drm_crtc_helper_funcs intel_helper_funcs = { 10280 .mode_set_base_atomic = intel_pipe_set_base_atomic, 10281 .load_lut = intel_crtc_load_lut, 10282 .atomic_begin = intel_begin_crtc_commit, 10283 .atomic_flush = intel_finish_crtc_commit, 10284 }; 10285 10286 /** 10287 * intel_modeset_update_staged_output_state 10288 * 10289 * Updates the staged output configuration state, e.g. after we've read out the 10290 * current hw state. 10291 */ 10292 static void intel_modeset_update_staged_output_state(struct drm_device *dev) 10293 { 10294 struct intel_crtc *crtc; 10295 struct intel_encoder *encoder; 10296 struct intel_connector *connector; 10297 10298 for_each_intel_connector(dev, connector) { 10299 connector->new_encoder = 10300 to_intel_encoder(connector->base.encoder); 10301 } 10302 10303 for_each_intel_encoder(dev, encoder) { 10304 encoder->new_crtc = 10305 to_intel_crtc(encoder->base.crtc); 10306 } 10307 10308 for_each_intel_crtc(dev, crtc) { 10309 crtc->new_enabled = crtc->base.state->enable; 10310 10311 if (crtc->new_enabled) 10312 crtc->new_config = crtc->config; 10313 else 10314 crtc->new_config = NULL; 10315 } 10316 } 10317 10318 /* Transitional helper to copy current connector/encoder state to 10319 * connector->state. This is needed so that code that is partially 10320 * converted to atomic does the right thing. 10321 */ 10322 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) 10323 { 10324 struct intel_connector *connector; 10325 10326 for_each_intel_connector(dev, connector) { 10327 if (connector->base.encoder) { 10328 connector->base.state->best_encoder = 10329 connector->base.encoder; 10330 connector->base.state->crtc = 10331 connector->base.encoder->crtc; 10332 } else { 10333 connector->base.state->best_encoder = NULL; 10334 connector->base.state->crtc = NULL; 10335 } 10336 } 10337 } 10338 10339 /** 10340 * intel_modeset_commit_output_state 10341 * 10342 * This function copies the stage display pipe configuration to the real one. 10343 */ 10344 static void intel_modeset_commit_output_state(struct drm_device *dev) 10345 { 10346 struct intel_crtc *crtc; 10347 struct intel_encoder *encoder; 10348 struct intel_connector *connector; 10349 10350 for_each_intel_connector(dev, connector) { 10351 connector->base.encoder = &connector->new_encoder->base; 10352 } 10353 10354 for_each_intel_encoder(dev, encoder) { 10355 encoder->base.crtc = &encoder->new_crtc->base; 10356 } 10357 10358 for_each_intel_crtc(dev, crtc) { 10359 crtc->base.state->enable = crtc->new_enabled; 10360 crtc->base.enabled = crtc->new_enabled; 10361 } 10362 10363 intel_modeset_update_connector_atomic_state(dev); 10364 } 10365 10366 static void 10367 connected_sink_compute_bpp(struct intel_connector *connector, 10368 struct intel_crtc_state *pipe_config) 10369 { 10370 int bpp = pipe_config->pipe_bpp; 10371 10372 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n", 10373 connector->base.base.id, 10374 connector->base.name); 10375 10376 /* Don't use an invalid EDID bpc value */ 10377 if (connector->base.display_info.bpc && 10378 connector->base.display_info.bpc * 3 < bpp) { 10379 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n", 10380 bpp, connector->base.display_info.bpc*3); 10381 pipe_config->pipe_bpp = connector->base.display_info.bpc*3; 10382 } 10383 10384 /* Clamp bpp to 8 on screens without EDID 1.4 */ 10385 if (connector->base.display_info.bpc == 0 && bpp > 24) { 10386 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", 10387 bpp); 10388 pipe_config->pipe_bpp = 24; 10389 } 10390 } 10391 10392 static int 10393 compute_baseline_pipe_bpp(struct intel_crtc *crtc, 10394 struct drm_framebuffer *fb, 10395 struct intel_crtc_state *pipe_config) 10396 { 10397 struct drm_device *dev = crtc->base.dev; 10398 struct drm_atomic_state *state; 10399 struct intel_connector *connector; 10400 int bpp, i; 10401 10402 switch (fb->pixel_format) { 10403 case DRM_FORMAT_C8: 10404 bpp = 8*3; /* since we go through a colormap */ 10405 break; 10406 case DRM_FORMAT_XRGB1555: 10407 case DRM_FORMAT_ARGB1555: 10408 /* checked in intel_framebuffer_init already */ 10409 if (WARN_ON(INTEL_INFO(dev)->gen > 3)) 10410 return -EINVAL; 10411 case DRM_FORMAT_RGB565: 10412 bpp = 6*3; /* min is 18bpp */ 10413 break; 10414 case DRM_FORMAT_XBGR8888: 10415 case DRM_FORMAT_ABGR8888: 10416 /* checked in intel_framebuffer_init already */ 10417 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10418 return -EINVAL; 10419 case DRM_FORMAT_XRGB8888: 10420 case DRM_FORMAT_ARGB8888: 10421 bpp = 8*3; 10422 break; 10423 case DRM_FORMAT_XRGB2101010: 10424 case DRM_FORMAT_ARGB2101010: 10425 case DRM_FORMAT_XBGR2101010: 10426 case DRM_FORMAT_ABGR2101010: 10427 /* checked in intel_framebuffer_init already */ 10428 if (WARN_ON(INTEL_INFO(dev)->gen < 4)) 10429 return -EINVAL; 10430 bpp = 10*3; 10431 break; 10432 /* TODO: gen4+ supports 16 bpc floating point, too. */ 10433 default: 10434 DRM_DEBUG_KMS("unsupported depth\n"); 10435 return -EINVAL; 10436 } 10437 10438 pipe_config->pipe_bpp = bpp; 10439 10440 state = pipe_config->base.state; 10441 10442 /* Clamp display bpp to EDID value */ 10443 for (i = 0; i < state->num_connector; i++) { 10444 if (!state->connectors[i]) 10445 continue; 10446 10447 connector = to_intel_connector(state->connectors[i]); 10448 if (state->connector_states[i]->crtc != &crtc->base) 10449 continue; 10450 10451 connected_sink_compute_bpp(connector, pipe_config); 10452 } 10453 10454 return bpp; 10455 } 10456 10457 static void intel_dump_crtc_timings(const struct drm_display_mode *mode) 10458 { 10459 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, " 10460 "type: 0x%x flags: 0x%x\n", 10461 mode->crtc_clock, 10462 mode->crtc_hdisplay, mode->crtc_hsync_start, 10463 mode->crtc_hsync_end, mode->crtc_htotal, 10464 mode->crtc_vdisplay, mode->crtc_vsync_start, 10465 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags); 10466 } 10467 10468 static void intel_dump_pipe_config(struct intel_crtc *crtc, 10469 struct intel_crtc_state *pipe_config, 10470 const char *context) 10471 { 10472 DRM_DEBUG_KMS("[CRTC:%d]%s config for pipe %c\n", crtc->base.base.id, 10473 context, pipe_name(crtc->pipe)); 10474 10475 DRM_DEBUG_KMS("cpu_transcoder: %c\n", transcoder_name(pipe_config->cpu_transcoder)); 10476 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n", 10477 pipe_config->pipe_bpp, pipe_config->dither); 10478 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10479 pipe_config->has_pch_encoder, 10480 pipe_config->fdi_lanes, 10481 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n, 10482 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 10483 pipe_config->fdi_m_n.tu); 10484 DRM_DEBUG_KMS("dp: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 10485 pipe_config->has_dp_encoder, 10486 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 10487 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 10488 pipe_config->dp_m_n.tu); 10489 10490 DRM_DEBUG_KMS("dp: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 10491 pipe_config->has_dp_encoder, 10492 pipe_config->dp_m2_n2.gmch_m, 10493 pipe_config->dp_m2_n2.gmch_n, 10494 pipe_config->dp_m2_n2.link_m, 10495 pipe_config->dp_m2_n2.link_n, 10496 pipe_config->dp_m2_n2.tu); 10497 10498 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n", 10499 pipe_config->has_audio, 10500 pipe_config->has_infoframe); 10501 10502 DRM_DEBUG_KMS("requested mode:\n"); 10503 drm_mode_debug_printmodeline(&pipe_config->base.mode); 10504 DRM_DEBUG_KMS("adjusted mode:\n"); 10505 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode); 10506 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode); 10507 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock); 10508 DRM_DEBUG_KMS("pipe src size: %dx%d\n", 10509 pipe_config->pipe_src_w, pipe_config->pipe_src_h); 10510 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n", 10511 pipe_config->gmch_pfit.control, 10512 pipe_config->gmch_pfit.pgm_ratios, 10513 pipe_config->gmch_pfit.lvds_border_bits); 10514 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n", 10515 pipe_config->pch_pfit.pos, 10516 pipe_config->pch_pfit.size, 10517 pipe_config->pch_pfit.enabled ? "enabled" : "disabled"); 10518 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled); 10519 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide); 10520 } 10521 10522 static bool encoders_cloneable(const struct intel_encoder *a, 10523 const struct intel_encoder *b) 10524 { 10525 /* masks could be asymmetric, so check both ways */ 10526 return a == b || (a->cloneable & (1 << b->type) && 10527 b->cloneable & (1 << a->type)); 10528 } 10529 10530 static bool check_single_encoder_cloning(struct intel_crtc *crtc, 10531 struct intel_encoder *encoder) 10532 { 10533 struct drm_device *dev = crtc->base.dev; 10534 struct intel_encoder *source_encoder; 10535 10536 for_each_intel_encoder(dev, source_encoder) { 10537 if (source_encoder->new_crtc != crtc) 10538 continue; 10539 10540 if (!encoders_cloneable(encoder, source_encoder)) 10541 return false; 10542 } 10543 10544 return true; 10545 } 10546 10547 static bool check_encoder_cloning(struct intel_crtc *crtc) 10548 { 10549 struct drm_device *dev = crtc->base.dev; 10550 struct intel_encoder *encoder; 10551 10552 for_each_intel_encoder(dev, encoder) { 10553 if (encoder->new_crtc != crtc) 10554 continue; 10555 10556 if (!check_single_encoder_cloning(crtc, encoder)) 10557 return false; 10558 } 10559 10560 return true; 10561 } 10562 10563 static bool check_digital_port_conflicts(struct drm_device *dev) 10564 { 10565 struct intel_connector *connector; 10566 unsigned int used_ports = 0; 10567 10568 /* 10569 * Walk the connector list instead of the encoder 10570 * list to detect the problem on ddi platforms 10571 * where there's just one encoder per digital port. 10572 */ 10573 for_each_intel_connector(dev, connector) { 10574 struct intel_encoder *encoder = connector->new_encoder; 10575 10576 if (!encoder) 10577 continue; 10578 10579 WARN_ON(!encoder->new_crtc); 10580 10581 switch (encoder->type) { 10582 unsigned int port_mask; 10583 case INTEL_OUTPUT_UNKNOWN: 10584 if (WARN_ON(!HAS_DDI(dev))) 10585 break; 10586 case INTEL_OUTPUT_DISPLAYPORT: 10587 case INTEL_OUTPUT_HDMI: 10588 case INTEL_OUTPUT_EDP: 10589 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 10590 10591 /* the same port mustn't appear more than once */ 10592 if (used_ports & port_mask) 10593 return false; 10594 10595 used_ports |= port_mask; 10596 default: 10597 break; 10598 } 10599 } 10600 10601 return true; 10602 } 10603 10604 static void 10605 clear_intel_crtc_state(struct intel_crtc_state *crtc_state) 10606 { 10607 struct drm_crtc_state tmp_state; 10608 10609 /* Clear only the intel specific part of the crtc state */ 10610 tmp_state = crtc_state->base; 10611 memset(crtc_state, 0, sizeof *crtc_state); 10612 crtc_state->base = tmp_state; 10613 } 10614 10615 static struct intel_crtc_state * 10616 intel_modeset_pipe_config(struct drm_crtc *crtc, 10617 struct drm_framebuffer *fb, 10618 struct drm_display_mode *mode, 10619 struct drm_atomic_state *state) 10620 { 10621 struct drm_device *dev = crtc->dev; 10622 struct intel_encoder *encoder; 10623 struct intel_connector *connector; 10624 struct drm_connector_state *connector_state; 10625 struct intel_crtc_state *pipe_config; 10626 int plane_bpp, ret = -EINVAL; 10627 int i; 10628 bool retry = true; 10629 10630 if (!check_encoder_cloning(to_intel_crtc(crtc))) { 10631 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n"); 10632 return ERR_PTR(-EINVAL); 10633 } 10634 10635 if (!check_digital_port_conflicts(dev)) { 10636 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); 10637 return ERR_PTR(-EINVAL); 10638 } 10639 10640 pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); 10641 if (IS_ERR(pipe_config)) 10642 return pipe_config; 10643 10644 clear_intel_crtc_state(pipe_config); 10645 10646 pipe_config->base.crtc = crtc; 10647 drm_mode_copy(&pipe_config->base.adjusted_mode, mode); 10648 drm_mode_copy(&pipe_config->base.mode, mode); 10649 10650 pipe_config->cpu_transcoder = 10651 (enum transcoder) to_intel_crtc(crtc)->pipe; 10652 pipe_config->shared_dpll = DPLL_ID_PRIVATE; 10653 10654 /* 10655 * Sanitize sync polarity flags based on requested ones. If neither 10656 * positive or negative polarity is requested, treat this as meaning 10657 * negative polarity. 10658 */ 10659 if (!(pipe_config->base.adjusted_mode.flags & 10660 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC))) 10661 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC; 10662 10663 if (!(pipe_config->base.adjusted_mode.flags & 10664 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC))) 10665 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC; 10666 10667 /* Compute a starting value for pipe_config->pipe_bpp taking the source 10668 * plane pixel format and any sink constraints into account. Returns the 10669 * source plane bpp so that dithering can be selected on mismatches 10670 * after encoders and crtc also have had their say. */ 10671 plane_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc), 10672 fb, pipe_config); 10673 if (plane_bpp < 0) 10674 goto fail; 10675 10676 /* 10677 * Determine the real pipe dimensions. Note that stereo modes can 10678 * increase the actual pipe size due to the frame doubling and 10679 * insertion of additional space for blanks between the frame. This 10680 * is stored in the crtc timings. We use the requested mode to do this 10681 * computation to clearly distinguish it from the adjusted mode, which 10682 * can be changed by the connectors in the below retry loop. 10683 */ 10684 drm_crtc_get_hv_timing(&pipe_config->base.mode, 10685 &pipe_config->pipe_src_w, 10686 &pipe_config->pipe_src_h); 10687 10688 encoder_retry: 10689 /* Ensure the port clock defaults are reset when retrying. */ 10690 pipe_config->port_clock = 0; 10691 pipe_config->pixel_multiplier = 1; 10692 10693 /* Fill in default crtc timings, allow encoders to overwrite them. */ 10694 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, 10695 CRTC_STEREO_DOUBLE); 10696 10697 /* Pass our mode to the connectors and the CRTC to give them a chance to 10698 * adjust it according to limitations or connector properties, and also 10699 * a chance to reject the mode entirely. 10700 */ 10701 for (i = 0; i < state->num_connector; i++) { 10702 connector = to_intel_connector(state->connectors[i]); 10703 if (!connector) 10704 continue; 10705 10706 connector_state = state->connector_states[i]; 10707 if (connector_state->crtc != crtc) 10708 continue; 10709 10710 encoder = to_intel_encoder(connector_state->best_encoder); 10711 10712 if (!(encoder->compute_config(encoder, pipe_config))) { 10713 DRM_DEBUG_KMS("Encoder config failure\n"); 10714 goto fail; 10715 } 10716 } 10717 10718 /* Set default port clock if not overwritten by the encoder. Needs to be 10719 * done afterwards in case the encoder adjusts the mode. */ 10720 if (!pipe_config->port_clock) 10721 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock 10722 * pipe_config->pixel_multiplier; 10723 10724 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config); 10725 if (ret < 0) { 10726 DRM_DEBUG_KMS("CRTC fixup failed\n"); 10727 goto fail; 10728 } 10729 10730 if (ret == RETRY) { 10731 if (WARN(!retry, "loop in pipe configuration computation\n")) { 10732 ret = -EINVAL; 10733 goto fail; 10734 } 10735 10736 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n"); 10737 retry = false; 10738 goto encoder_retry; 10739 } 10740 10741 pipe_config->dither = pipe_config->pipe_bpp != plane_bpp; 10742 DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n", 10743 plane_bpp, pipe_config->pipe_bpp, pipe_config->dither); 10744 10745 return pipe_config; 10746 fail: 10747 return ERR_PTR(ret); 10748 } 10749 10750 /* Computes which crtcs are affected and sets the relevant bits in the mask. For 10751 * simplicity we use the crtc's pipe number (because it's easier to obtain). */ 10752 static void 10753 intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, 10754 unsigned *prepare_pipes, unsigned *disable_pipes) 10755 { 10756 struct intel_crtc *intel_crtc; 10757 struct drm_device *dev = crtc->dev; 10758 struct intel_encoder *encoder; 10759 struct intel_connector *connector; 10760 struct drm_crtc *tmp_crtc; 10761 10762 *disable_pipes = *modeset_pipes = *prepare_pipes = 0; 10763 10764 /* Check which crtcs have changed outputs connected to them, these need 10765 * to be part of the prepare_pipes mask. We don't (yet) support global 10766 * modeset across multiple crtcs, so modeset_pipes will only have one 10767 * bit set at most. */ 10768 for_each_intel_connector(dev, connector) { 10769 if (connector->base.encoder == &connector->new_encoder->base) 10770 continue; 10771 10772 if (connector->base.encoder) { 10773 tmp_crtc = connector->base.encoder->crtc; 10774 10775 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10776 } 10777 10778 if (connector->new_encoder) 10779 *prepare_pipes |= 10780 1 << connector->new_encoder->new_crtc->pipe; 10781 } 10782 10783 for_each_intel_encoder(dev, encoder) { 10784 if (encoder->base.crtc == &encoder->new_crtc->base) 10785 continue; 10786 10787 if (encoder->base.crtc) { 10788 tmp_crtc = encoder->base.crtc; 10789 10790 *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe; 10791 } 10792 10793 if (encoder->new_crtc) 10794 *prepare_pipes |= 1 << encoder->new_crtc->pipe; 10795 } 10796 10797 /* Check for pipes that will be enabled/disabled ... */ 10798 for_each_intel_crtc(dev, intel_crtc) { 10799 if (intel_crtc->base.state->enable == intel_crtc->new_enabled) 10800 continue; 10801 10802 if (!intel_crtc->new_enabled) 10803 *disable_pipes |= 1 << intel_crtc->pipe; 10804 else 10805 *prepare_pipes |= 1 << intel_crtc->pipe; 10806 } 10807 10808 10809 /* set_mode is also used to update properties on life display pipes. */ 10810 intel_crtc = to_intel_crtc(crtc); 10811 if (intel_crtc->new_enabled) 10812 *prepare_pipes |= 1 << intel_crtc->pipe; 10813 10814 /* 10815 * For simplicity do a full modeset on any pipe where the output routing 10816 * changed. We could be more clever, but that would require us to be 10817 * more careful with calling the relevant encoder->mode_set functions. 10818 */ 10819 if (*prepare_pipes) 10820 *modeset_pipes = *prepare_pipes; 10821 10822 /* ... and mask these out. */ 10823 *modeset_pipes &= ~(*disable_pipes); 10824 *prepare_pipes &= ~(*disable_pipes); 10825 10826 /* 10827 * HACK: We don't (yet) fully support global modesets. intel_set_config 10828 * obies this rule, but the modeset restore mode of 10829 * intel_modeset_setup_hw_state does not. 10830 */ 10831 *modeset_pipes &= 1 << intel_crtc->pipe; 10832 *prepare_pipes &= 1 << intel_crtc->pipe; 10833 10834 DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n", 10835 *modeset_pipes, *prepare_pipes, *disable_pipes); 10836 } 10837 10838 static bool intel_crtc_in_use(struct drm_crtc *crtc) 10839 { 10840 struct drm_encoder *encoder; 10841 struct drm_device *dev = crtc->dev; 10842 10843 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 10844 if (encoder->crtc == crtc) 10845 return true; 10846 10847 return false; 10848 } 10849 10850 static void 10851 intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 10852 { 10853 struct drm_i915_private *dev_priv = dev->dev_private; 10854 struct intel_encoder *intel_encoder; 10855 struct intel_crtc *intel_crtc; 10856 struct drm_connector *connector; 10857 10858 intel_shared_dpll_commit(dev_priv); 10859 10860 for_each_intel_encoder(dev, intel_encoder) { 10861 if (!intel_encoder->base.crtc) 10862 continue; 10863 10864 intel_crtc = to_intel_crtc(intel_encoder->base.crtc); 10865 10866 if (prepare_pipes & (1 << intel_crtc->pipe)) 10867 intel_encoder->connectors_active = false; 10868 } 10869 10870 intel_modeset_commit_output_state(dev); 10871 10872 /* Double check state. */ 10873 for_each_intel_crtc(dev, intel_crtc) { 10874 WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base)); 10875 WARN_ON(intel_crtc->new_config && 10876 intel_crtc->new_config != intel_crtc->config); 10877 WARN_ON(intel_crtc->base.state->enable != !!intel_crtc->new_config); 10878 } 10879 10880 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 10881 if (!connector->encoder || !connector->encoder->crtc) 10882 continue; 10883 10884 intel_crtc = to_intel_crtc(connector->encoder->crtc); 10885 10886 if (prepare_pipes & (1 << intel_crtc->pipe)) { 10887 struct drm_property *dpms_property = 10888 dev->mode_config.dpms_property; 10889 10890 connector->dpms = DRM_MODE_DPMS_ON; 10891 drm_object_property_set_value(&connector->base, 10892 dpms_property, 10893 DRM_MODE_DPMS_ON); 10894 10895 intel_encoder = to_intel_encoder(connector->encoder); 10896 intel_encoder->connectors_active = true; 10897 } 10898 } 10899 10900 } 10901 10902 static bool intel_fuzzy_clock_check(int clock1, int clock2) 10903 { 10904 int diff; 10905 10906 if (clock1 == clock2) 10907 return true; 10908 10909 if (!clock1 || !clock2) 10910 return false; 10911 10912 diff = abs(clock1 - clock2); 10913 10914 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105) 10915 return true; 10916 10917 return false; 10918 } 10919 10920 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \ 10921 list_for_each_entry((intel_crtc), \ 10922 &(dev)->mode_config.crtc_list, \ 10923 base.head) \ 10924 if (mask & (1 <<(intel_crtc)->pipe)) 10925 10926 static bool 10927 intel_pipe_config_compare(struct drm_device *dev, 10928 struct intel_crtc_state *current_config, 10929 struct intel_crtc_state *pipe_config) 10930 { 10931 #define PIPE_CONF_CHECK_X(name) \ 10932 if (current_config->name != pipe_config->name) { \ 10933 DRM_ERROR("mismatch in " #name " " \ 10934 "(expected 0x%08x, found 0x%08x)\n", \ 10935 current_config->name, \ 10936 pipe_config->name); \ 10937 return false; \ 10938 } 10939 10940 #define PIPE_CONF_CHECK_I(name) \ 10941 if (current_config->name != pipe_config->name) { \ 10942 DRM_ERROR("mismatch in " #name " " \ 10943 "(expected %i, found %i)\n", \ 10944 current_config->name, \ 10945 pipe_config->name); \ 10946 return false; \ 10947 } 10948 10949 /* This is required for BDW+ where there is only one set of registers for 10950 * switching between high and low RR. 10951 * This macro can be used whenever a comparison has to be made between one 10952 * hw state and multiple sw state variables. 10953 */ 10954 #define PIPE_CONF_CHECK_I_ALT(name, alt_name) \ 10955 if ((current_config->name != pipe_config->name) && \ 10956 (current_config->alt_name != pipe_config->name)) { \ 10957 DRM_ERROR("mismatch in " #name " " \ 10958 "(expected %i or %i, found %i)\n", \ 10959 current_config->name, \ 10960 current_config->alt_name, \ 10961 pipe_config->name); \ 10962 return false; \ 10963 } 10964 10965 #define PIPE_CONF_CHECK_FLAGS(name, mask) \ 10966 if ((current_config->name ^ pipe_config->name) & (mask)) { \ 10967 DRM_ERROR("mismatch in " #name "(" #mask ") " \ 10968 "(expected %i, found %i)\n", \ 10969 current_config->name & (mask), \ 10970 pipe_config->name & (mask)); \ 10971 return false; \ 10972 } 10973 10974 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \ 10975 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ 10976 DRM_ERROR("mismatch in " #name " " \ 10977 "(expected %i, found %i)\n", \ 10978 current_config->name, \ 10979 pipe_config->name); \ 10980 return false; \ 10981 } 10982 10983 #define PIPE_CONF_QUIRK(quirk) \ 10984 ((current_config->quirks | pipe_config->quirks) & (quirk)) 10985 10986 PIPE_CONF_CHECK_I(cpu_transcoder); 10987 10988 PIPE_CONF_CHECK_I(has_pch_encoder); 10989 PIPE_CONF_CHECK_I(fdi_lanes); 10990 PIPE_CONF_CHECK_I(fdi_m_n.gmch_m); 10991 PIPE_CONF_CHECK_I(fdi_m_n.gmch_n); 10992 PIPE_CONF_CHECK_I(fdi_m_n.link_m); 10993 PIPE_CONF_CHECK_I(fdi_m_n.link_n); 10994 PIPE_CONF_CHECK_I(fdi_m_n.tu); 10995 10996 PIPE_CONF_CHECK_I(has_dp_encoder); 10997 10998 if (INTEL_INFO(dev)->gen < 8) { 10999 PIPE_CONF_CHECK_I(dp_m_n.gmch_m); 11000 PIPE_CONF_CHECK_I(dp_m_n.gmch_n); 11001 PIPE_CONF_CHECK_I(dp_m_n.link_m); 11002 PIPE_CONF_CHECK_I(dp_m_n.link_n); 11003 PIPE_CONF_CHECK_I(dp_m_n.tu); 11004 11005 if (current_config->has_drrs) { 11006 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_m); 11007 PIPE_CONF_CHECK_I(dp_m2_n2.gmch_n); 11008 PIPE_CONF_CHECK_I(dp_m2_n2.link_m); 11009 PIPE_CONF_CHECK_I(dp_m2_n2.link_n); 11010 PIPE_CONF_CHECK_I(dp_m2_n2.tu); 11011 } 11012 } else { 11013 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_m, dp_m2_n2.gmch_m); 11014 PIPE_CONF_CHECK_I_ALT(dp_m_n.gmch_n, dp_m2_n2.gmch_n); 11015 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_m, dp_m2_n2.link_m); 11016 PIPE_CONF_CHECK_I_ALT(dp_m_n.link_n, dp_m2_n2.link_n); 11017 PIPE_CONF_CHECK_I_ALT(dp_m_n.tu, dp_m2_n2.tu); 11018 } 11019 11020 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 11021 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 11022 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start); 11023 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end); 11024 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start); 11025 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end); 11026 11027 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay); 11028 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal); 11029 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start); 11030 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end); 11031 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start); 11032 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end); 11033 11034 PIPE_CONF_CHECK_I(pixel_multiplier); 11035 PIPE_CONF_CHECK_I(has_hdmi_sink); 11036 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 11037 IS_VALLEYVIEW(dev)) 11038 PIPE_CONF_CHECK_I(limited_color_range); 11039 PIPE_CONF_CHECK_I(has_infoframe); 11040 11041 PIPE_CONF_CHECK_I(has_audio); 11042 11043 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11044 DRM_MODE_FLAG_INTERLACE); 11045 11046 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) { 11047 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11048 DRM_MODE_FLAG_PHSYNC); 11049 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11050 DRM_MODE_FLAG_NHSYNC); 11051 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11052 DRM_MODE_FLAG_PVSYNC); 11053 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags, 11054 DRM_MODE_FLAG_NVSYNC); 11055 } 11056 11057 PIPE_CONF_CHECK_I(pipe_src_w); 11058 PIPE_CONF_CHECK_I(pipe_src_h); 11059 11060 /* 11061 * FIXME: BIOS likes to set up a cloned config with lvds+external 11062 * screen. Since we don't yet re-compute the pipe config when moving 11063 * just the lvds port away to another pipe the sw tracking won't match. 11064 * 11065 * Proper atomic modesets with recomputed global state will fix this. 11066 * Until then just don't check gmch state for inherited modes. 11067 */ 11068 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) { 11069 PIPE_CONF_CHECK_I(gmch_pfit.control); 11070 /* pfit ratios are autocomputed by the hw on gen4+ */ 11071 if (INTEL_INFO(dev)->gen < 4) 11072 PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios); 11073 PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits); 11074 } 11075 11076 PIPE_CONF_CHECK_I(pch_pfit.enabled); 11077 if (current_config->pch_pfit.enabled) { 11078 PIPE_CONF_CHECK_I(pch_pfit.pos); 11079 PIPE_CONF_CHECK_I(pch_pfit.size); 11080 } 11081 11082 /* BDW+ don't expose a synchronous way to read the state */ 11083 if (IS_HASWELL(dev)) 11084 PIPE_CONF_CHECK_I(ips_enabled); 11085 11086 PIPE_CONF_CHECK_I(double_wide); 11087 11088 PIPE_CONF_CHECK_X(ddi_pll_sel); 11089 11090 PIPE_CONF_CHECK_I(shared_dpll); 11091 PIPE_CONF_CHECK_X(dpll_hw_state.dpll); 11092 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md); 11093 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 11094 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 11095 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 11096 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1); 11097 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1); 11098 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2); 11099 11100 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 11101 PIPE_CONF_CHECK_I(pipe_bpp); 11102 11103 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock); 11104 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock); 11105 11106 #undef PIPE_CONF_CHECK_X 11107 #undef PIPE_CONF_CHECK_I 11108 #undef PIPE_CONF_CHECK_I_ALT 11109 #undef PIPE_CONF_CHECK_FLAGS 11110 #undef PIPE_CONF_CHECK_CLOCK_FUZZY 11111 #undef PIPE_CONF_QUIRK 11112 11113 return true; 11114 } 11115 11116 static void check_wm_state(struct drm_device *dev) 11117 { 11118 struct drm_i915_private *dev_priv = dev->dev_private; 11119 struct skl_ddb_allocation hw_ddb, *sw_ddb; 11120 struct intel_crtc *intel_crtc; 11121 int plane; 11122 11123 if (INTEL_INFO(dev)->gen < 9) 11124 return; 11125 11126 skl_ddb_get_hw_state(dev_priv, &hw_ddb); 11127 sw_ddb = &dev_priv->wm.skl_hw.ddb; 11128 11129 for_each_intel_crtc(dev, intel_crtc) { 11130 struct skl_ddb_entry *hw_entry, *sw_entry; 11131 const enum i915_pipe pipe = intel_crtc->pipe; 11132 11133 if (!intel_crtc->active) 11134 continue; 11135 11136 /* planes */ 11137 for_each_plane(dev_priv, pipe, plane) { 11138 hw_entry = &hw_ddb.plane[pipe][plane]; 11139 sw_entry = &sw_ddb->plane[pipe][plane]; 11140 11141 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 11142 continue; 11143 11144 DRM_ERROR("mismatch in DDB state pipe %c plane %d " 11145 "(expected (%u,%u), found (%u,%u))\n", 11146 pipe_name(pipe), plane + 1, 11147 sw_entry->start, sw_entry->end, 11148 hw_entry->start, hw_entry->end); 11149 } 11150 11151 /* cursor */ 11152 hw_entry = &hw_ddb.cursor[pipe]; 11153 sw_entry = &sw_ddb->cursor[pipe]; 11154 11155 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 11156 continue; 11157 11158 DRM_ERROR("mismatch in DDB state pipe %c cursor " 11159 "(expected (%u,%u), found (%u,%u))\n", 11160 pipe_name(pipe), 11161 sw_entry->start, sw_entry->end, 11162 hw_entry->start, hw_entry->end); 11163 } 11164 } 11165 11166 static void 11167 check_connector_state(struct drm_device *dev) 11168 { 11169 struct intel_connector *connector; 11170 11171 for_each_intel_connector(dev, connector) { 11172 /* This also checks the encoder/connector hw state with the 11173 * ->get_hw_state callbacks. */ 11174 intel_connector_check_state(connector); 11175 11176 I915_STATE_WARN(&connector->new_encoder->base != connector->base.encoder, 11177 "connector's staged encoder doesn't match current encoder\n"); 11178 } 11179 } 11180 11181 static void 11182 check_encoder_state(struct drm_device *dev) 11183 { 11184 struct intel_encoder *encoder; 11185 struct intel_connector *connector; 11186 11187 for_each_intel_encoder(dev, encoder) { 11188 bool enabled = false; 11189 bool active = false; 11190 enum i915_pipe pipe, tracked_pipe; 11191 11192 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", 11193 encoder->base.base.id, 11194 encoder->base.name); 11195 11196 I915_STATE_WARN(&encoder->new_crtc->base != encoder->base.crtc, 11197 "encoder's stage crtc doesn't match current crtc\n"); 11198 I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, 11199 "encoder's active_connectors set, but no crtc\n"); 11200 11201 for_each_intel_connector(dev, connector) { 11202 if (connector->base.encoder != &encoder->base) 11203 continue; 11204 enabled = true; 11205 if (connector->base.dpms != DRM_MODE_DPMS_OFF) 11206 active = true; 11207 } 11208 /* 11209 * for MST connectors if we unplug the connector is gone 11210 * away but the encoder is still connected to a crtc 11211 * until a modeset happens in response to the hotplug. 11212 */ 11213 if (!enabled && encoder->base.encoder_type == DRM_MODE_ENCODER_DPMST) 11214 continue; 11215 11216 I915_STATE_WARN(!!encoder->base.crtc != enabled, 11217 "encoder's enabled state mismatch " 11218 "(expected %i, found %i)\n", 11219 !!encoder->base.crtc, enabled); 11220 I915_STATE_WARN(active && !encoder->base.crtc, 11221 "active encoder with no crtc\n"); 11222 11223 I915_STATE_WARN(encoder->connectors_active != active, 11224 "encoder's computed active state doesn't match tracked active state " 11225 "(expected %i, found %i)\n", active, encoder->connectors_active); 11226 11227 active = encoder->get_hw_state(encoder, &pipe); 11228 I915_STATE_WARN(active != encoder->connectors_active, 11229 "encoder's hw state doesn't match sw tracking " 11230 "(expected %i, found %i)\n", 11231 encoder->connectors_active, active); 11232 11233 if (!encoder->base.crtc) 11234 continue; 11235 11236 tracked_pipe = to_intel_crtc(encoder->base.crtc)->pipe; 11237 I915_STATE_WARN(active && pipe != tracked_pipe, 11238 "active encoder's pipe doesn't match" 11239 "(expected %i, found %i)\n", 11240 tracked_pipe, pipe); 11241 11242 } 11243 } 11244 11245 static void 11246 check_crtc_state(struct drm_device *dev) 11247 { 11248 struct drm_i915_private *dev_priv = dev->dev_private; 11249 struct intel_crtc *crtc; 11250 struct intel_encoder *encoder; 11251 struct intel_crtc_state pipe_config; 11252 11253 for_each_intel_crtc(dev, crtc) { 11254 bool enabled = false; 11255 bool active = false; 11256 11257 memset(&pipe_config, 0, sizeof(pipe_config)); 11258 11259 DRM_DEBUG_KMS("[CRTC:%d]\n", 11260 crtc->base.base.id); 11261 11262 I915_STATE_WARN(crtc->active && !crtc->base.state->enable, 11263 "active crtc, but not enabled in sw tracking\n"); 11264 11265 for_each_intel_encoder(dev, encoder) { 11266 if (encoder->base.crtc != &crtc->base) 11267 continue; 11268 enabled = true; 11269 if (encoder->connectors_active) 11270 active = true; 11271 } 11272 11273 I915_STATE_WARN(active != crtc->active, 11274 "crtc's computed active state doesn't match tracked active state " 11275 "(expected %i, found %i)\n", active, crtc->active); 11276 I915_STATE_WARN(enabled != crtc->base.state->enable, 11277 "crtc's computed enabled state doesn't match tracked enabled state " 11278 "(expected %i, found %i)\n", enabled, 11279 crtc->base.state->enable); 11280 11281 active = dev_priv->display.get_pipe_config(crtc, 11282 &pipe_config); 11283 11284 /* hw state is inconsistent with the pipe quirk */ 11285 if ((crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) || 11286 (crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 11287 active = crtc->active; 11288 11289 for_each_intel_encoder(dev, encoder) { 11290 enum i915_pipe pipe; 11291 if (encoder->base.crtc != &crtc->base) 11292 continue; 11293 if (encoder->get_hw_state(encoder, &pipe)) 11294 encoder->get_config(encoder, &pipe_config); 11295 } 11296 11297 I915_STATE_WARN(crtc->active != active, 11298 "crtc active state doesn't match with hw state " 11299 "(expected %i, found %i)\n", crtc->active, active); 11300 11301 if (active && 11302 !intel_pipe_config_compare(dev, crtc->config, &pipe_config)) { 11303 I915_STATE_WARN(1, "pipe state doesn't match!\n"); 11304 intel_dump_pipe_config(crtc, &pipe_config, 11305 "[hw state]"); 11306 intel_dump_pipe_config(crtc, crtc->config, 11307 "[sw state]"); 11308 } 11309 } 11310 } 11311 11312 static void 11313 check_shared_dpll_state(struct drm_device *dev) 11314 { 11315 struct drm_i915_private *dev_priv = dev->dev_private; 11316 struct intel_crtc *crtc; 11317 struct intel_dpll_hw_state dpll_hw_state; 11318 int i; 11319 11320 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 11321 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 11322 int enabled_crtcs = 0, active_crtcs = 0; 11323 bool active; 11324 11325 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state)); 11326 11327 DRM_DEBUG_KMS("%s\n", pll->name); 11328 11329 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 11330 11331 I915_STATE_WARN(pll->active > hweight32(pll->config.crtc_mask), 11332 "more active pll users than references: %i vs %i\n", 11333 pll->active, hweight32(pll->config.crtc_mask)); 11334 I915_STATE_WARN(pll->active && !pll->on, 11335 "pll in active use but not on in sw tracking\n"); 11336 I915_STATE_WARN(pll->on && !pll->active, 11337 "pll in on but not on in use in sw tracking\n"); 11338 I915_STATE_WARN(pll->on != active, 11339 "pll on state mismatch (expected %i, found %i)\n", 11340 pll->on, active); 11341 11342 for_each_intel_crtc(dev, crtc) { 11343 if (crtc->base.state->enable && intel_crtc_to_shared_dpll(crtc) == pll) 11344 enabled_crtcs++; 11345 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 11346 active_crtcs++; 11347 } 11348 I915_STATE_WARN(pll->active != active_crtcs, 11349 "pll active crtcs mismatch (expected %i, found %i)\n", 11350 pll->active, active_crtcs); 11351 I915_STATE_WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs, 11352 "pll enabled crtcs mismatch (expected %i, found %i)\n", 11353 hweight32(pll->config.crtc_mask), enabled_crtcs); 11354 11355 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state, 11356 sizeof(dpll_hw_state)), 11357 "pll hw state mismatch\n"); 11358 } 11359 } 11360 11361 void 11362 intel_modeset_check_state(struct drm_device *dev) 11363 { 11364 check_wm_state(dev); 11365 check_connector_state(dev); 11366 check_encoder_state(dev); 11367 check_crtc_state(dev); 11368 check_shared_dpll_state(dev); 11369 } 11370 11371 void ironlake_check_encoder_dotclock(const struct intel_crtc_state *pipe_config, 11372 int dotclock) 11373 { 11374 /* 11375 * FDI already provided one idea for the dotclock. 11376 * Yell if the encoder disagrees. 11377 */ 11378 WARN(!intel_fuzzy_clock_check(pipe_config->base.adjusted_mode.crtc_clock, dotclock), 11379 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n", 11380 pipe_config->base.adjusted_mode.crtc_clock, dotclock); 11381 } 11382 11383 static void update_scanline_offset(struct intel_crtc *crtc) 11384 { 11385 struct drm_device *dev = crtc->base.dev; 11386 11387 /* 11388 * The scanline counter increments at the leading edge of hsync. 11389 * 11390 * On most platforms it starts counting from vtotal-1 on the 11391 * first active line. That means the scanline counter value is 11392 * always one less than what we would expect. Ie. just after 11393 * start of vblank, which also occurs at start of hsync (on the 11394 * last active line), the scanline counter will read vblank_start-1. 11395 * 11396 * On gen2 the scanline counter starts counting from 1 instead 11397 * of vtotal-1, so we have to subtract one (or rather add vtotal-1 11398 * to keep the value positive), instead of adding one. 11399 * 11400 * On HSW+ the behaviour of the scanline counter depends on the output 11401 * type. For DP ports it behaves like most other platforms, but on HDMI 11402 * there's an extra 1 line difference. So we need to add two instead of 11403 * one to the value. 11404 */ 11405 if (IS_GEN2(dev)) { 11406 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 11407 int vtotal; 11408 11409 vtotal = mode->crtc_vtotal; 11410 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 11411 vtotal /= 2; 11412 11413 crtc->scanline_offset = vtotal - 1; 11414 } else if (HAS_DDI(dev) && 11415 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 11416 crtc->scanline_offset = 2; 11417 } else 11418 crtc->scanline_offset = 1; 11419 } 11420 11421 static struct intel_crtc_state * 11422 intel_modeset_compute_config(struct drm_crtc *crtc, 11423 struct drm_display_mode *mode, 11424 struct drm_framebuffer *fb, 11425 struct drm_atomic_state *state, 11426 unsigned *modeset_pipes, 11427 unsigned *prepare_pipes, 11428 unsigned *disable_pipes) 11429 { 11430 struct drm_device *dev = crtc->dev; 11431 struct intel_crtc_state *pipe_config = NULL; 11432 struct intel_crtc *intel_crtc; 11433 int ret = 0; 11434 11435 ret = drm_atomic_add_affected_connectors(state, crtc); 11436 if (ret) 11437 return ERR_PTR(ret); 11438 11439 intel_modeset_affected_pipes(crtc, modeset_pipes, 11440 prepare_pipes, disable_pipes); 11441 11442 for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) { 11443 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); 11444 if (IS_ERR(pipe_config)) 11445 return pipe_config; 11446 11447 pipe_config->base.enable = false; 11448 } 11449 11450 /* 11451 * Note this needs changes when we start tracking multiple modes 11452 * and crtcs. At that point we'll need to compute the whole config 11453 * (i.e. one pipe_config for each crtc) rather than just the one 11454 * for this crtc. 11455 */ 11456 for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) { 11457 /* FIXME: For now we still expect modeset_pipes has at most 11458 * one bit set. */ 11459 if (WARN_ON(&intel_crtc->base != crtc)) 11460 continue; 11461 11462 pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state); 11463 if (IS_ERR(pipe_config)) 11464 return pipe_config; 11465 11466 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 11467 "[modeset]"); 11468 } 11469 11470 return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));; 11471 } 11472 11473 static int __intel_set_mode_setup_plls(struct drm_device *dev, 11474 unsigned modeset_pipes, 11475 unsigned disable_pipes) 11476 { 11477 struct drm_i915_private *dev_priv = to_i915(dev); 11478 unsigned clear_pipes = modeset_pipes | disable_pipes; 11479 struct intel_crtc *intel_crtc; 11480 int ret = 0; 11481 11482 if (!dev_priv->display.crtc_compute_clock) 11483 return 0; 11484 11485 ret = intel_shared_dpll_start_config(dev_priv, clear_pipes); 11486 if (ret) 11487 goto done; 11488 11489 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 11490 struct intel_crtc_state *state = intel_crtc->new_config; 11491 ret = dev_priv->display.crtc_compute_clock(intel_crtc, 11492 state); 11493 if (ret) { 11494 intel_shared_dpll_abort_config(dev_priv); 11495 goto done; 11496 } 11497 } 11498 11499 done: 11500 return ret; 11501 } 11502 11503 static int __intel_set_mode(struct drm_crtc *crtc, 11504 struct drm_display_mode *mode, 11505 int x, int y, struct drm_framebuffer *fb, 11506 struct intel_crtc_state *pipe_config, 11507 unsigned modeset_pipes, 11508 unsigned prepare_pipes, 11509 unsigned disable_pipes) 11510 { 11511 struct drm_device *dev = crtc->dev; 11512 struct drm_i915_private *dev_priv = dev->dev_private; 11513 struct drm_display_mode *saved_mode; 11514 struct intel_crtc_state *crtc_state_copy = NULL; 11515 struct intel_crtc *intel_crtc; 11516 int ret = 0; 11517 11518 saved_mode = kmalloc(sizeof(*saved_mode), M_DRM, M_WAITOK); 11519 if (!saved_mode) 11520 return -ENOMEM; 11521 11522 crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), M_DRM, M_WAITOK); 11523 if (!crtc_state_copy) { 11524 ret = -ENOMEM; 11525 goto done; 11526 } 11527 11528 *saved_mode = crtc->mode; 11529 11530 if (modeset_pipes) 11531 to_intel_crtc(crtc)->new_config = pipe_config; 11532 11533 /* 11534 * See if the config requires any additional preparation, e.g. 11535 * to adjust global state with pipes off. We need to do this 11536 * here so we can get the modeset_pipe updated config for the new 11537 * mode set on this crtc. For other crtcs we need to use the 11538 * adjusted_mode bits in the crtc directly. 11539 */ 11540 if (IS_VALLEYVIEW(dev)) { 11541 valleyview_modeset_global_pipes(dev, &prepare_pipes); 11542 11543 /* may have added more to prepare_pipes than we should */ 11544 prepare_pipes &= ~disable_pipes; 11545 } 11546 11547 ret = __intel_set_mode_setup_plls(dev, modeset_pipes, disable_pipes); 11548 if (ret) 11549 goto done; 11550 11551 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 11552 intel_crtc_disable(&intel_crtc->base); 11553 11554 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11555 if (intel_crtc->base.state->enable) 11556 dev_priv->display.crtc_disable(&intel_crtc->base); 11557 } 11558 11559 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 11560 * to set it here already despite that we pass it down the callchain. 11561 * 11562 * Note we'll need to fix this up when we start tracking multiple 11563 * pipes; here we assume a single modeset_pipe and only track the 11564 * single crtc and mode. 11565 */ 11566 if (modeset_pipes) { 11567 crtc->mode = *mode; 11568 /* mode_set/enable/disable functions rely on a correct pipe 11569 * config. */ 11570 intel_crtc_set_state(to_intel_crtc(crtc), pipe_config); 11571 11572 /* 11573 * Calculate and store various constants which 11574 * are later needed by vblank and swap-completion 11575 * timestamping. They are derived from true hwmode. 11576 */ 11577 drm_calc_timestamping_constants(crtc, 11578 &pipe_config->base.adjusted_mode); 11579 } 11580 11581 /* Only after disabling all output pipelines that will be changed can we 11582 * update the the output configuration. */ 11583 intel_modeset_update_state(dev, prepare_pipes); 11584 11585 modeset_update_crtc_power_domains(pipe_config->base.state); 11586 11587 /* Set up the DPLL and any encoders state that needs to adjust or depend 11588 * on the DPLL. 11589 */ 11590 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { 11591 struct drm_plane *primary = intel_crtc->base.primary; 11592 int vdisplay, hdisplay; 11593 11594 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay); 11595 ret = primary->funcs->update_plane(primary, &intel_crtc->base, 11596 fb, 0, 0, 11597 hdisplay, vdisplay, 11598 x << 16, y << 16, 11599 hdisplay << 16, vdisplay << 16); 11600 } 11601 11602 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 11603 for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) { 11604 update_scanline_offset(intel_crtc); 11605 11606 dev_priv->display.crtc_enable(&intel_crtc->base); 11607 } 11608 11609 /* FIXME: add subpixel order */ 11610 done: 11611 if (ret && crtc->state->enable) 11612 crtc->mode = *saved_mode; 11613 11614 if (ret == 0 && pipe_config) { 11615 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11616 11617 /* The pipe_config will be freed with the atomic state, so 11618 * make a copy. */ 11619 memcpy(crtc_state_copy, intel_crtc->config, 11620 sizeof *crtc_state_copy); 11621 intel_crtc->config = crtc_state_copy; 11622 intel_crtc->base.state = &crtc_state_copy->base; 11623 11624 if (modeset_pipes) 11625 intel_crtc->new_config = intel_crtc->config; 11626 } else { 11627 kfree(crtc_state_copy); 11628 } 11629 11630 kfree(saved_mode); 11631 return ret; 11632 } 11633 11634 static int intel_set_mode_pipes(struct drm_crtc *crtc, 11635 struct drm_display_mode *mode, 11636 int x, int y, struct drm_framebuffer *fb, 11637 struct intel_crtc_state *pipe_config, 11638 unsigned modeset_pipes, 11639 unsigned prepare_pipes, 11640 unsigned disable_pipes) 11641 { 11642 int ret; 11643 11644 ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes, 11645 prepare_pipes, disable_pipes); 11646 11647 if (ret == 0) 11648 intel_modeset_check_state(crtc->dev); 11649 11650 return ret; 11651 } 11652 11653 static int intel_set_mode(struct drm_crtc *crtc, 11654 struct drm_display_mode *mode, 11655 int x, int y, struct drm_framebuffer *fb, 11656 struct drm_atomic_state *state) 11657 { 11658 struct intel_crtc_state *pipe_config; 11659 unsigned modeset_pipes, prepare_pipes, disable_pipes; 11660 int ret = 0; 11661 11662 pipe_config = intel_modeset_compute_config(crtc, mode, fb, state, 11663 &modeset_pipes, 11664 &prepare_pipes, 11665 &disable_pipes); 11666 11667 if (IS_ERR(pipe_config)) { 11668 ret = PTR_ERR(pipe_config); 11669 goto out; 11670 } 11671 11672 ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, 11673 modeset_pipes, prepare_pipes, 11674 disable_pipes); 11675 if (ret) 11676 goto out; 11677 11678 out: 11679 return ret; 11680 } 11681 11682 void intel_crtc_restore_mode(struct drm_crtc *crtc) 11683 { 11684 struct drm_device *dev = crtc->dev; 11685 struct drm_atomic_state *state; 11686 struct intel_encoder *encoder; 11687 struct intel_connector *connector; 11688 struct drm_connector_state *connector_state; 11689 11690 state = drm_atomic_state_alloc(dev); 11691 if (!state) { 11692 DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory", 11693 crtc->base.id); 11694 return; 11695 } 11696 11697 state->acquire_ctx = dev->mode_config.acquire_ctx; 11698 11699 /* The force restore path in the HW readout code relies on the staged 11700 * config still keeping the user requested config while the actual 11701 * state has been overwritten by the configuration read from HW. We 11702 * need to copy the staged config to the atomic state, otherwise the 11703 * mode set will just reapply the state the HW is already in. */ 11704 for_each_intel_encoder(dev, encoder) { 11705 if (&encoder->new_crtc->base != crtc) 11706 continue; 11707 11708 for_each_intel_connector(dev, connector) { 11709 if (connector->new_encoder != encoder) 11710 continue; 11711 11712 connector_state = drm_atomic_get_connector_state(state, &connector->base); 11713 if (IS_ERR(connector_state)) { 11714 DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n", 11715 connector->base.base.id, 11716 connector->base.name, 11717 PTR_ERR(connector_state)); 11718 continue; 11719 } 11720 11721 connector_state->crtc = crtc; 11722 connector_state->best_encoder = &encoder->base; 11723 } 11724 } 11725 11726 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb, 11727 state); 11728 11729 drm_atomic_state_free(state); 11730 } 11731 11732 #undef for_each_intel_crtc_masked 11733 11734 static void intel_set_config_free(struct intel_set_config *config) 11735 { 11736 if (!config) 11737 return; 11738 11739 kfree(config->save_connector_encoders); 11740 kfree(config->save_encoder_crtcs); 11741 kfree(config->save_crtc_enabled); 11742 kfree(config); 11743 } 11744 11745 static int intel_set_config_save_state(struct drm_device *dev, 11746 struct intel_set_config *config) 11747 { 11748 struct drm_crtc *crtc; 11749 struct drm_encoder *encoder; 11750 struct drm_connector *connector; 11751 int count; 11752 11753 config->save_crtc_enabled = 11754 kcalloc(dev->mode_config.num_crtc, 11755 sizeof(bool), GFP_KERNEL); 11756 if (!config->save_crtc_enabled) 11757 return -ENOMEM; 11758 11759 config->save_encoder_crtcs = 11760 kcalloc(dev->mode_config.num_encoder, 11761 sizeof(struct drm_crtc *), GFP_KERNEL); 11762 if (!config->save_encoder_crtcs) 11763 return -ENOMEM; 11764 11765 config->save_connector_encoders = 11766 kcalloc(dev->mode_config.num_connector, 11767 sizeof(struct drm_encoder *), GFP_KERNEL); 11768 if (!config->save_connector_encoders) 11769 return -ENOMEM; 11770 11771 /* Copy data. Note that driver private data is not affected. 11772 * Should anything bad happen only the expected state is 11773 * restored, not the drivers personal bookkeeping. 11774 */ 11775 count = 0; 11776 for_each_crtc(dev, crtc) { 11777 config->save_crtc_enabled[count++] = crtc->state->enable; 11778 } 11779 11780 count = 0; 11781 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 11782 config->save_encoder_crtcs[count++] = encoder->crtc; 11783 } 11784 11785 count = 0; 11786 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 11787 config->save_connector_encoders[count++] = connector->encoder; 11788 } 11789 11790 return 0; 11791 } 11792 11793 static void intel_set_config_restore_state(struct drm_device *dev, 11794 struct intel_set_config *config) 11795 { 11796 struct intel_crtc *crtc; 11797 struct intel_encoder *encoder; 11798 struct intel_connector *connector; 11799 int count; 11800 11801 count = 0; 11802 for_each_intel_crtc(dev, crtc) { 11803 crtc->new_enabled = config->save_crtc_enabled[count++]; 11804 11805 if (crtc->new_enabled) 11806 crtc->new_config = crtc->config; 11807 else 11808 crtc->new_config = NULL; 11809 } 11810 11811 count = 0; 11812 for_each_intel_encoder(dev, encoder) { 11813 encoder->new_crtc = 11814 to_intel_crtc(config->save_encoder_crtcs[count++]); 11815 } 11816 11817 count = 0; 11818 for_each_intel_connector(dev, connector) { 11819 connector->new_encoder = 11820 to_intel_encoder(config->save_connector_encoders[count++]); 11821 } 11822 } 11823 11824 static bool 11825 is_crtc_connector_off(struct drm_mode_set *set) 11826 { 11827 int i; 11828 11829 if (set->num_connectors == 0) 11830 return false; 11831 11832 if (WARN_ON(set->connectors == NULL)) 11833 return false; 11834 11835 for (i = 0; i < set->num_connectors; i++) 11836 if (set->connectors[i]->encoder && 11837 set->connectors[i]->encoder->crtc == set->crtc && 11838 set->connectors[i]->dpms != DRM_MODE_DPMS_ON) 11839 return true; 11840 11841 return false; 11842 } 11843 11844 static void 11845 intel_set_config_compute_mode_changes(struct drm_mode_set *set, 11846 struct intel_set_config *config) 11847 { 11848 11849 /* We should be able to check here if the fb has the same properties 11850 * and then just flip_or_move it */ 11851 if (is_crtc_connector_off(set)) { 11852 config->mode_changed = true; 11853 } else if (set->crtc->primary->fb != set->fb) { 11854 /* 11855 * If we have no fb, we can only flip as long as the crtc is 11856 * active, otherwise we need a full mode set. The crtc may 11857 * be active if we've only disabled the primary plane, or 11858 * in fastboot situations. 11859 */ 11860 if (set->crtc->primary->fb == NULL) { 11861 struct intel_crtc *intel_crtc = 11862 to_intel_crtc(set->crtc); 11863 11864 if (intel_crtc->active) { 11865 DRM_DEBUG_KMS("crtc has no fb, will flip\n"); 11866 config->fb_changed = true; 11867 } else { 11868 DRM_DEBUG_KMS("inactive crtc, full mode set\n"); 11869 config->mode_changed = true; 11870 } 11871 } else if (set->fb == NULL) { 11872 config->mode_changed = true; 11873 } else if (set->fb->pixel_format != 11874 set->crtc->primary->fb->pixel_format) { 11875 config->mode_changed = true; 11876 } else { 11877 config->fb_changed = true; 11878 } 11879 } 11880 11881 if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y)) 11882 config->fb_changed = true; 11883 11884 if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { 11885 DRM_DEBUG_KMS("modes are different, full mode set\n"); 11886 drm_mode_debug_printmodeline(&set->crtc->mode); 11887 drm_mode_debug_printmodeline(set->mode); 11888 config->mode_changed = true; 11889 } 11890 11891 DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n", 11892 set->crtc->base.id, config->mode_changed, config->fb_changed); 11893 } 11894 11895 static int 11896 intel_modeset_stage_output_state(struct drm_device *dev, 11897 struct drm_mode_set *set, 11898 struct intel_set_config *config, 11899 struct drm_atomic_state *state) 11900 { 11901 struct intel_connector *connector; 11902 struct drm_connector_state *connector_state; 11903 struct intel_encoder *encoder; 11904 struct intel_crtc *crtc; 11905 int ro; 11906 11907 /* The upper layers ensure that we either disable a crtc or have a list 11908 * of connectors. For paranoia, double-check this. */ 11909 WARN_ON(!set->fb && (set->num_connectors != 0)); 11910 WARN_ON(set->fb && (set->num_connectors == 0)); 11911 11912 for_each_intel_connector(dev, connector) { 11913 /* Otherwise traverse passed in connector list and get encoders 11914 * for them. */ 11915 for (ro = 0; ro < set->num_connectors; ro++) { 11916 if (set->connectors[ro] == &connector->base) { 11917 connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe); 11918 break; 11919 } 11920 } 11921 11922 /* If we disable the crtc, disable all its connectors. Also, if 11923 * the connector is on the changing crtc but not on the new 11924 * connector list, disable it. */ 11925 if ((!set->fb || ro == set->num_connectors) && 11926 connector->base.encoder && 11927 connector->base.encoder->crtc == set->crtc) { 11928 connector->new_encoder = NULL; 11929 11930 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n", 11931 connector->base.base.id, 11932 connector->base.name); 11933 } 11934 11935 11936 if (&connector->new_encoder->base != connector->base.encoder) { 11937 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n", 11938 connector->base.base.id, 11939 connector->base.name); 11940 config->mode_changed = true; 11941 } 11942 } 11943 /* connector->new_encoder is now updated for all connectors. */ 11944 11945 /* Update crtc of enabled connectors. */ 11946 for_each_intel_connector(dev, connector) { 11947 struct drm_crtc *new_crtc; 11948 11949 if (!connector->new_encoder) 11950 continue; 11951 11952 new_crtc = connector->new_encoder->base.crtc; 11953 11954 for (ro = 0; ro < set->num_connectors; ro++) { 11955 if (set->connectors[ro] == &connector->base) 11956 new_crtc = set->crtc; 11957 } 11958 11959 /* Make sure the new CRTC will work with the encoder */ 11960 if (!drm_encoder_crtc_ok(&connector->new_encoder->base, 11961 new_crtc)) { 11962 return -EINVAL; 11963 } 11964 connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); 11965 11966 connector_state = 11967 drm_atomic_get_connector_state(state, &connector->base); 11968 if (IS_ERR(connector_state)) 11969 return PTR_ERR(connector_state); 11970 11971 connector_state->crtc = new_crtc; 11972 connector_state->best_encoder = &connector->new_encoder->base; 11973 11974 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", 11975 connector->base.base.id, 11976 connector->base.name, 11977 new_crtc->base.id); 11978 } 11979 11980 /* Check for any encoders that needs to be disabled. */ 11981 for_each_intel_encoder(dev, encoder) { 11982 int num_connectors = 0; 11983 for_each_intel_connector(dev, connector) { 11984 if (connector->new_encoder == encoder) { 11985 WARN_ON(!connector->new_encoder->new_crtc); 11986 num_connectors++; 11987 } 11988 } 11989 11990 if (num_connectors == 0) 11991 encoder->new_crtc = NULL; 11992 else if (num_connectors > 1) 11993 return -EINVAL; 11994 11995 /* Only now check for crtc changes so we don't miss encoders 11996 * that will be disabled. */ 11997 if (&encoder->new_crtc->base != encoder->base.crtc) { 11998 DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n", 11999 encoder->base.base.id, 12000 encoder->base.name); 12001 config->mode_changed = true; 12002 } 12003 } 12004 /* Now we've also updated encoder->new_crtc for all encoders. */ 12005 for_each_intel_connector(dev, connector) { 12006 connector_state = 12007 drm_atomic_get_connector_state(state, &connector->base); 12008 if (IS_ERR(connector_state)) 12009 return PTR_ERR(connector_state); 12010 12011 if (connector->new_encoder) { 12012 if (connector->new_encoder != connector->encoder) 12013 connector->encoder = connector->new_encoder; 12014 } else { 12015 connector_state->crtc = NULL; 12016 } 12017 } 12018 for_each_intel_crtc(dev, crtc) { 12019 crtc->new_enabled = false; 12020 12021 for_each_intel_encoder(dev, encoder) { 12022 if (encoder->new_crtc == crtc) { 12023 crtc->new_enabled = true; 12024 break; 12025 } 12026 } 12027 12028 if (crtc->new_enabled != crtc->base.state->enable) { 12029 DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n", 12030 crtc->base.base.id, 12031 crtc->new_enabled ? "en" : "dis"); 12032 config->mode_changed = true; 12033 } 12034 12035 if (crtc->new_enabled) 12036 crtc->new_config = crtc->config; 12037 else 12038 crtc->new_config = NULL; 12039 } 12040 12041 return 0; 12042 } 12043 12044 static void disable_crtc_nofb(struct intel_crtc *crtc) 12045 { 12046 struct drm_device *dev = crtc->base.dev; 12047 struct intel_encoder *encoder; 12048 struct intel_connector *connector; 12049 12050 DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", 12051 pipe_name(crtc->pipe)); 12052 12053 for_each_intel_connector(dev, connector) { 12054 if (connector->new_encoder && 12055 connector->new_encoder->new_crtc == crtc) 12056 connector->new_encoder = NULL; 12057 } 12058 12059 for_each_intel_encoder(dev, encoder) { 12060 if (encoder->new_crtc == crtc) 12061 encoder->new_crtc = NULL; 12062 } 12063 12064 crtc->new_enabled = false; 12065 crtc->new_config = NULL; 12066 } 12067 12068 static int intel_crtc_set_config(struct drm_mode_set *set) 12069 { 12070 struct drm_device *dev; 12071 struct drm_mode_set save_set; 12072 struct drm_atomic_state *state = NULL; 12073 struct intel_set_config *config; 12074 struct intel_crtc_state *pipe_config; 12075 unsigned modeset_pipes, prepare_pipes, disable_pipes; 12076 int ret; 12077 12078 BUG_ON(!set); 12079 BUG_ON(!set->crtc); 12080 BUG_ON(!set->crtc->helper_private); 12081 12082 /* Enforce sane interface api - has been abused by the fb helper. */ 12083 BUG_ON(!set->mode && set->fb); 12084 BUG_ON(set->fb && set->num_connectors == 0); 12085 12086 if (set->fb) { 12087 DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", 12088 set->crtc->base.id, set->fb->base.id, 12089 (int)set->num_connectors, set->x, set->y); 12090 } else { 12091 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 12092 } 12093 12094 dev = set->crtc->dev; 12095 12096 ret = -ENOMEM; 12097 config = kzalloc(sizeof(*config), GFP_KERNEL); 12098 if (!config) 12099 goto out_config; 12100 12101 ret = intel_set_config_save_state(dev, config); 12102 if (ret) 12103 goto out_config; 12104 12105 save_set.crtc = set->crtc; 12106 save_set.mode = &set->crtc->mode; 12107 save_set.x = set->crtc->x; 12108 save_set.y = set->crtc->y; 12109 save_set.fb = set->crtc->primary->fb; 12110 12111 /* Compute whether we need a full modeset, only an fb base update or no 12112 * change at all. In the future we might also check whether only the 12113 * mode changed, e.g. for LVDS where we only change the panel fitter in 12114 * such cases. */ 12115 intel_set_config_compute_mode_changes(set, config); 12116 12117 state = drm_atomic_state_alloc(dev); 12118 if (!state) { 12119 ret = -ENOMEM; 12120 goto out_config; 12121 } 12122 12123 state->acquire_ctx = dev->mode_config.acquire_ctx; 12124 12125 ret = intel_modeset_stage_output_state(dev, set, config, state); 12126 if (ret) 12127 goto fail; 12128 12129 pipe_config = intel_modeset_compute_config(set->crtc, set->mode, 12130 set->fb, state, 12131 &modeset_pipes, 12132 &prepare_pipes, 12133 &disable_pipes); 12134 if (IS_ERR(pipe_config)) { 12135 ret = PTR_ERR(pipe_config); 12136 goto fail; 12137 } else if (pipe_config) { 12138 if (pipe_config->has_audio != 12139 to_intel_crtc(set->crtc)->config->has_audio) 12140 config->mode_changed = true; 12141 12142 /* 12143 * Note we have an issue here with infoframes: current code 12144 * only updates them on the full mode set path per hw 12145 * requirements. So here we should be checking for any 12146 * required changes and forcing a mode set. 12147 */ 12148 } 12149 12150 intel_update_pipe_size(to_intel_crtc(set->crtc)); 12151 12152 if (config->mode_changed) { 12153 ret = intel_set_mode_pipes(set->crtc, set->mode, 12154 set->x, set->y, set->fb, pipe_config, 12155 modeset_pipes, prepare_pipes, 12156 disable_pipes); 12157 } else if (config->fb_changed) { 12158 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 12159 struct drm_plane *primary = set->crtc->primary; 12160 int vdisplay, hdisplay; 12161 12162 drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay); 12163 ret = primary->funcs->update_plane(primary, set->crtc, set->fb, 12164 0, 0, hdisplay, vdisplay, 12165 set->x << 16, set->y << 16, 12166 hdisplay << 16, vdisplay << 16); 12167 12168 /* 12169 * We need to make sure the primary plane is re-enabled if it 12170 * has previously been turned off. 12171 */ 12172 if (!intel_crtc->primary_enabled && ret == 0) { 12173 WARN_ON(!intel_crtc->active); 12174 intel_enable_primary_hw_plane(set->crtc->primary, set->crtc); 12175 } 12176 12177 /* 12178 * In the fastboot case this may be our only check of the 12179 * state after boot. It would be better to only do it on 12180 * the first update, but we don't have a nice way of doing that 12181 * (and really, set_config isn't used much for high freq page 12182 * flipping, so increasing its cost here shouldn't be a big 12183 * deal). 12184 */ 12185 if (i915.fastboot && ret == 0) 12186 intel_modeset_check_state(set->crtc->dev); 12187 } 12188 12189 if (ret) { 12190 DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n", 12191 set->crtc->base.id, ret); 12192 fail: 12193 intel_set_config_restore_state(dev, config); 12194 12195 drm_atomic_state_clear(state); 12196 12197 /* 12198 * HACK: if the pipe was on, but we didn't have a framebuffer, 12199 * force the pipe off to avoid oopsing in the modeset code 12200 * due to fb==NULL. This should only happen during boot since 12201 * we don't yet reconstruct the FB from the hardware state. 12202 */ 12203 if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb) 12204 disable_crtc_nofb(to_intel_crtc(save_set.crtc)); 12205 12206 /* Try to restore the config */ 12207 if (config->mode_changed && 12208 intel_set_mode(save_set.crtc, save_set.mode, 12209 save_set.x, save_set.y, save_set.fb, 12210 state)) 12211 DRM_ERROR("failed to restore config after modeset failure\n"); 12212 } 12213 12214 out_config: 12215 if (state) 12216 drm_atomic_state_free(state); 12217 12218 intel_set_config_free(config); 12219 return ret; 12220 } 12221 12222 static const struct drm_crtc_funcs intel_crtc_funcs = { 12223 .gamma_set = intel_crtc_gamma_set, 12224 .set_config = intel_crtc_set_config, 12225 .destroy = intel_crtc_destroy, 12226 .page_flip = intel_crtc_page_flip, 12227 .atomic_duplicate_state = intel_crtc_duplicate_state, 12228 .atomic_destroy_state = intel_crtc_destroy_state, 12229 }; 12230 12231 static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv, 12232 struct intel_shared_dpll *pll, 12233 struct intel_dpll_hw_state *hw_state) 12234 { 12235 uint32_t val; 12236 12237 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS)) 12238 return false; 12239 12240 val = I915_READ(PCH_DPLL(pll->id)); 12241 hw_state->dpll = val; 12242 hw_state->fp0 = I915_READ(PCH_FP0(pll->id)); 12243 hw_state->fp1 = I915_READ(PCH_FP1(pll->id)); 12244 12245 return val & DPLL_VCO_ENABLE; 12246 } 12247 12248 static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 12249 struct intel_shared_dpll *pll) 12250 { 12251 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0); 12252 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1); 12253 } 12254 12255 static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 12256 struct intel_shared_dpll *pll) 12257 { 12258 /* PCH refclock must be enabled first */ 12259 ibx_assert_pch_refclk_enabled(dev_priv); 12260 12261 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 12262 12263 /* Wait for the clocks to stabilize. */ 12264 POSTING_READ(PCH_DPLL(pll->id)); 12265 udelay(150); 12266 12267 /* The pixel multiplier can only be updated once the 12268 * DPLL is enabled and the clocks are stable. 12269 * 12270 * So write it again. 12271 */ 12272 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll); 12273 POSTING_READ(PCH_DPLL(pll->id)); 12274 udelay(200); 12275 } 12276 12277 static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 12278 struct intel_shared_dpll *pll) 12279 { 12280 struct drm_device *dev = dev_priv->dev; 12281 struct intel_crtc *crtc; 12282 12283 /* Make sure no transcoder isn't still depending on us. */ 12284 for_each_intel_crtc(dev, crtc) { 12285 if (intel_crtc_to_shared_dpll(crtc) == pll) 12286 assert_pch_transcoder_disabled(dev_priv, crtc->pipe); 12287 } 12288 12289 I915_WRITE(PCH_DPLL(pll->id), 0); 12290 POSTING_READ(PCH_DPLL(pll->id)); 12291 udelay(200); 12292 } 12293 12294 static char *ibx_pch_dpll_names[] = { 12295 "PCH DPLL A", 12296 "PCH DPLL B", 12297 }; 12298 12299 static void ibx_pch_dpll_init(struct drm_device *dev) 12300 { 12301 struct drm_i915_private *dev_priv = dev->dev_private; 12302 int i; 12303 12304 dev_priv->num_shared_dpll = 2; 12305 12306 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 12307 dev_priv->shared_dplls[i].id = i; 12308 dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i]; 12309 dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set; 12310 dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable; 12311 dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable; 12312 dev_priv->shared_dplls[i].get_hw_state = 12313 ibx_pch_dpll_get_hw_state; 12314 } 12315 } 12316 12317 static void intel_shared_dpll_init(struct drm_device *dev) 12318 { 12319 struct drm_i915_private *dev_priv = dev->dev_private; 12320 12321 if (HAS_DDI(dev)) 12322 intel_ddi_pll_init(dev); 12323 else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 12324 ibx_pch_dpll_init(dev); 12325 else 12326 dev_priv->num_shared_dpll = 0; 12327 12328 BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS); 12329 } 12330 12331 /** 12332 * intel_wm_need_update - Check whether watermarks need updating 12333 * @plane: drm plane 12334 * @state: new plane state 12335 * 12336 * Check current plane state versus the new one to determine whether 12337 * watermarks need to be recalculated. 12338 * 12339 * Returns true or false. 12340 */ 12341 bool intel_wm_need_update(struct drm_plane *plane, 12342 struct drm_plane_state *state) 12343 { 12344 /* Update watermarks on tiling changes. */ 12345 if (!plane->state->fb || !state->fb || 12346 plane->state->fb->modifier[0] != state->fb->modifier[0] || 12347 plane->state->rotation != state->rotation) 12348 return true; 12349 12350 return false; 12351 } 12352 12353 /** 12354 * intel_prepare_plane_fb - Prepare fb for usage on plane 12355 * @plane: drm plane to prepare for 12356 * @fb: framebuffer to prepare for presentation 12357 * 12358 * Prepares a framebuffer for usage on a display plane. Generally this 12359 * involves pinning the underlying object and updating the frontbuffer tracking 12360 * bits. Some older platforms need special physical address handling for 12361 * cursor planes. 12362 * 12363 * Returns 0 on success, negative error code on failure. 12364 */ 12365 int 12366 intel_prepare_plane_fb(struct drm_plane *plane, 12367 struct drm_framebuffer *fb, 12368 const struct drm_plane_state *new_state) 12369 { 12370 struct drm_device *dev = plane->dev; 12371 struct intel_plane *intel_plane = to_intel_plane(plane); 12372 enum i915_pipe pipe = intel_plane->pipe; 12373 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12374 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 12375 unsigned frontbuffer_bits = 0; 12376 int ret = 0; 12377 12378 if (!obj) 12379 return 0; 12380 12381 switch (plane->type) { 12382 case DRM_PLANE_TYPE_PRIMARY: 12383 frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(pipe); 12384 break; 12385 case DRM_PLANE_TYPE_CURSOR: 12386 frontbuffer_bits = INTEL_FRONTBUFFER_CURSOR(pipe); 12387 break; 12388 case DRM_PLANE_TYPE_OVERLAY: 12389 frontbuffer_bits = INTEL_FRONTBUFFER_SPRITE(pipe); 12390 break; 12391 } 12392 12393 mutex_lock(&dev->struct_mutex); 12394 12395 if (plane->type == DRM_PLANE_TYPE_CURSOR && 12396 INTEL_INFO(dev)->cursor_needs_physical) { 12397 int align = IS_I830(dev) ? 16 * 1024 : 256; 12398 ret = i915_gem_object_attach_phys(obj, align); 12399 if (ret) 12400 DRM_DEBUG_KMS("failed to attach phys object\n"); 12401 } else { 12402 ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL); 12403 } 12404 12405 if (ret == 0) 12406 i915_gem_track_fb(old_obj, obj, frontbuffer_bits); 12407 12408 mutex_unlock(&dev->struct_mutex); 12409 12410 return ret; 12411 } 12412 12413 /** 12414 * intel_cleanup_plane_fb - Cleans up an fb after plane use 12415 * @plane: drm plane to clean up for 12416 * @fb: old framebuffer that was on plane 12417 * 12418 * Cleans up a framebuffer that has just been removed from a plane. 12419 */ 12420 void 12421 intel_cleanup_plane_fb(struct drm_plane *plane, 12422 struct drm_framebuffer *fb, 12423 const struct drm_plane_state *old_state) 12424 { 12425 struct drm_device *dev = plane->dev; 12426 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12427 12428 if (WARN_ON(!obj)) 12429 return; 12430 12431 if (plane->type != DRM_PLANE_TYPE_CURSOR || 12432 !INTEL_INFO(dev)->cursor_needs_physical) { 12433 mutex_lock(&dev->struct_mutex); 12434 intel_unpin_fb_obj(fb, old_state); 12435 mutex_unlock(&dev->struct_mutex); 12436 } 12437 } 12438 12439 static int 12440 intel_check_primary_plane(struct drm_plane *plane, 12441 struct intel_plane_state *state) 12442 { 12443 struct drm_device *dev = plane->dev; 12444 struct drm_i915_private *dev_priv = dev->dev_private; 12445 struct drm_crtc *crtc = state->base.crtc; 12446 struct intel_crtc *intel_crtc; 12447 struct drm_framebuffer *fb = state->base.fb; 12448 struct drm_rect *dest = &state->dst; 12449 struct drm_rect *src = &state->src; 12450 const struct drm_rect *clip = &state->clip; 12451 int ret; 12452 12453 crtc = crtc ? crtc : plane->crtc; 12454 intel_crtc = to_intel_crtc(crtc); 12455 12456 ret = drm_plane_helper_check_update(plane, crtc, fb, 12457 src, dest, clip, 12458 DRM_PLANE_HELPER_NO_SCALING, 12459 DRM_PLANE_HELPER_NO_SCALING, 12460 false, true, &state->visible); 12461 if (ret) 12462 return ret; 12463 12464 if (intel_crtc->active) { 12465 intel_crtc->atomic.wait_for_flips = true; 12466 12467 /* 12468 * FBC does not work on some platforms for rotated 12469 * planes, so disable it when rotation is not 0 and 12470 * update it when rotation is set back to 0. 12471 * 12472 * FIXME: This is redundant with the fbc update done in 12473 * the primary plane enable function except that that 12474 * one is done too late. We eventually need to unify 12475 * this. 12476 */ 12477 if (intel_crtc->primary_enabled && 12478 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 12479 dev_priv->fbc.crtc == intel_crtc && 12480 state->base.rotation != BIT(DRM_ROTATE_0)) { 12481 intel_crtc->atomic.disable_fbc = true; 12482 } 12483 12484 if (state->visible) { 12485 /* 12486 * BDW signals flip done immediately if the plane 12487 * is disabled, even if the plane enable is already 12488 * armed to occur at the next vblank :( 12489 */ 12490 if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled) 12491 intel_crtc->atomic.wait_vblank = true; 12492 } 12493 12494 intel_crtc->atomic.fb_bits |= 12495 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 12496 12497 intel_crtc->atomic.update_fbc = true; 12498 12499 if (intel_wm_need_update(plane, &state->base)) 12500 intel_crtc->atomic.update_wm = true; 12501 } 12502 12503 return 0; 12504 } 12505 12506 static void 12507 intel_commit_primary_plane(struct drm_plane *plane, 12508 struct intel_plane_state *state) 12509 { 12510 struct drm_crtc *crtc = state->base.crtc; 12511 struct drm_framebuffer *fb = state->base.fb; 12512 struct drm_device *dev = plane->dev; 12513 struct drm_i915_private *dev_priv = dev->dev_private; 12514 struct intel_crtc *intel_crtc; 12515 struct drm_rect *src = &state->src; 12516 12517 crtc = crtc ? crtc : plane->crtc; 12518 intel_crtc = to_intel_crtc(crtc); 12519 12520 plane->fb = fb; 12521 crtc->x = src->x1 >> 16; 12522 crtc->y = src->y1 >> 16; 12523 12524 if (intel_crtc->active) { 12525 if (state->visible) { 12526 /* FIXME: kill this fastboot hack */ 12527 intel_update_pipe_size(intel_crtc); 12528 12529 intel_crtc->primary_enabled = true; 12530 12531 dev_priv->display.update_primary_plane(crtc, plane->fb, 12532 crtc->x, crtc->y); 12533 } else { 12534 /* 12535 * If clipping results in a non-visible primary plane, 12536 * we'll disable the primary plane. Note that this is 12537 * a bit different than what happens if userspace 12538 * explicitly disables the plane by passing fb=0 12539 * because plane->fb still gets set and pinned. 12540 */ 12541 intel_disable_primary_hw_plane(plane, crtc); 12542 } 12543 } 12544 } 12545 12546 static void intel_begin_crtc_commit(struct drm_crtc *crtc) 12547 { 12548 struct drm_device *dev = crtc->dev; 12549 struct drm_i915_private *dev_priv = dev->dev_private; 12550 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12551 struct intel_plane *intel_plane; 12552 struct drm_plane *p; 12553 unsigned fb_bits = 0; 12554 12555 /* Track fb's for any planes being disabled */ 12556 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 12557 intel_plane = to_intel_plane(p); 12558 12559 if (intel_crtc->atomic.disabled_planes & 12560 (1 << drm_plane_index(p))) { 12561 switch (p->type) { 12562 case DRM_PLANE_TYPE_PRIMARY: 12563 fb_bits = INTEL_FRONTBUFFER_PRIMARY(intel_plane->pipe); 12564 break; 12565 case DRM_PLANE_TYPE_CURSOR: 12566 fb_bits = INTEL_FRONTBUFFER_CURSOR(intel_plane->pipe); 12567 break; 12568 case DRM_PLANE_TYPE_OVERLAY: 12569 fb_bits = INTEL_FRONTBUFFER_SPRITE(intel_plane->pipe); 12570 break; 12571 } 12572 12573 mutex_lock(&dev->struct_mutex); 12574 i915_gem_track_fb(intel_fb_obj(p->fb), NULL, fb_bits); 12575 mutex_unlock(&dev->struct_mutex); 12576 } 12577 } 12578 12579 if (intel_crtc->atomic.wait_for_flips) 12580 intel_crtc_wait_for_pending_flips(crtc); 12581 12582 if (intel_crtc->atomic.disable_fbc) 12583 intel_fbc_disable(dev); 12584 12585 if (intel_crtc->atomic.pre_disable_primary) 12586 intel_pre_disable_primary(crtc); 12587 12588 if (intel_crtc->atomic.update_wm) 12589 intel_update_watermarks(crtc); 12590 12591 intel_runtime_pm_get(dev_priv); 12592 12593 /* Perform vblank evasion around commit operation */ 12594 if (intel_crtc->active) 12595 intel_crtc->atomic.evade = 12596 intel_pipe_update_start(intel_crtc, 12597 &intel_crtc->atomic.start_vbl_count); 12598 } 12599 12600 static void intel_finish_crtc_commit(struct drm_crtc *crtc) 12601 { 12602 struct drm_device *dev = crtc->dev; 12603 struct drm_i915_private *dev_priv = dev->dev_private; 12604 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12605 struct drm_plane *p; 12606 12607 if (intel_crtc->atomic.evade) 12608 intel_pipe_update_end(intel_crtc, 12609 intel_crtc->atomic.start_vbl_count); 12610 12611 intel_runtime_pm_put(dev_priv); 12612 12613 if (intel_crtc->atomic.wait_vblank) 12614 intel_wait_for_vblank(dev, intel_crtc->pipe); 12615 12616 intel_frontbuffer_flip(dev, intel_crtc->atomic.fb_bits); 12617 12618 if (intel_crtc->atomic.update_fbc) { 12619 mutex_lock(&dev->struct_mutex); 12620 intel_fbc_update(dev); 12621 mutex_unlock(&dev->struct_mutex); 12622 } 12623 12624 if (intel_crtc->atomic.post_enable_primary) 12625 intel_post_enable_primary(crtc); 12626 12627 drm_for_each_legacy_plane(p, &dev->mode_config.plane_list) 12628 if (intel_crtc->atomic.update_sprite_watermarks & drm_plane_index(p)) 12629 intel_update_sprite_watermarks(p, crtc, 0, 0, 0, 12630 false, false); 12631 12632 memset(&intel_crtc->atomic, 0, sizeof(intel_crtc->atomic)); 12633 } 12634 12635 /** 12636 * intel_plane_destroy - destroy a plane 12637 * @plane: plane to destroy 12638 * 12639 * Common destruction function for all types of planes (primary, cursor, 12640 * sprite). 12641 */ 12642 void intel_plane_destroy(struct drm_plane *plane) 12643 { 12644 struct intel_plane *intel_plane = to_intel_plane(plane); 12645 drm_plane_cleanup(plane); 12646 kfree(intel_plane); 12647 } 12648 12649 const struct drm_plane_funcs intel_plane_funcs = { 12650 .update_plane = drm_plane_helper_update, 12651 .disable_plane = drm_plane_helper_disable, 12652 .destroy = intel_plane_destroy, 12653 .set_property = drm_atomic_helper_plane_set_property, 12654 .atomic_get_property = intel_plane_atomic_get_property, 12655 .atomic_set_property = intel_plane_atomic_set_property, 12656 .atomic_duplicate_state = intel_plane_duplicate_state, 12657 .atomic_destroy_state = intel_plane_destroy_state, 12658 12659 }; 12660 12661 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev, 12662 int pipe) 12663 { 12664 struct intel_plane *primary; 12665 struct intel_plane_state *state; 12666 const uint32_t *intel_primary_formats; 12667 int num_formats; 12668 12669 primary = kzalloc(sizeof(*primary), GFP_KERNEL); 12670 if (primary == NULL) 12671 return NULL; 12672 12673 state = intel_create_plane_state(&primary->base); 12674 if (!state) { 12675 kfree(primary); 12676 return NULL; 12677 } 12678 primary->base.state = &state->base; 12679 12680 primary->can_scale = false; 12681 primary->max_downscale = 1; 12682 primary->pipe = pipe; 12683 primary->plane = pipe; 12684 primary->check_plane = intel_check_primary_plane; 12685 primary->commit_plane = intel_commit_primary_plane; 12686 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) 12687 primary->plane = !pipe; 12688 12689 if (INTEL_INFO(dev)->gen <= 3) { 12690 intel_primary_formats = intel_primary_formats_gen2; 12691 num_formats = ARRAY_SIZE(intel_primary_formats_gen2); 12692 } else { 12693 intel_primary_formats = intel_primary_formats_gen4; 12694 num_formats = ARRAY_SIZE(intel_primary_formats_gen4); 12695 } 12696 12697 drm_universal_plane_init(dev, &primary->base, 0, 12698 &intel_plane_funcs, 12699 intel_primary_formats, num_formats, 12700 DRM_PLANE_TYPE_PRIMARY); 12701 12702 if (INTEL_INFO(dev)->gen >= 4) { 12703 if (!dev->mode_config.rotation_property) 12704 dev->mode_config.rotation_property = 12705 drm_mode_create_rotation_property(dev, 12706 BIT(DRM_ROTATE_0) | 12707 BIT(DRM_ROTATE_180)); 12708 if (dev->mode_config.rotation_property) 12709 drm_object_attach_property(&primary->base.base, 12710 dev->mode_config.rotation_property, 12711 state->base.rotation); 12712 } 12713 12714 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs); 12715 12716 return &primary->base; 12717 } 12718 12719 static int 12720 intel_check_cursor_plane(struct drm_plane *plane, 12721 struct intel_plane_state *state) 12722 { 12723 struct drm_crtc *crtc = state->base.crtc; 12724 struct drm_device *dev = plane->dev; 12725 struct drm_framebuffer *fb = state->base.fb; 12726 struct drm_rect *dest = &state->dst; 12727 struct drm_rect *src = &state->src; 12728 const struct drm_rect *clip = &state->clip; 12729 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 12730 struct intel_crtc *intel_crtc; 12731 unsigned stride; 12732 int ret; 12733 12734 crtc = crtc ? crtc : plane->crtc; 12735 intel_crtc = to_intel_crtc(crtc); 12736 12737 ret = drm_plane_helper_check_update(plane, crtc, fb, 12738 src, dest, clip, 12739 DRM_PLANE_HELPER_NO_SCALING, 12740 DRM_PLANE_HELPER_NO_SCALING, 12741 true, true, &state->visible); 12742 if (ret) 12743 return ret; 12744 12745 12746 /* if we want to turn off the cursor ignore width and height */ 12747 if (!obj) 12748 goto finish; 12749 12750 /* Check for which cursor types we support */ 12751 if (!cursor_size_ok(dev, state->base.crtc_w, state->base.crtc_h)) { 12752 DRM_DEBUG("Cursor dimension %dx%d not supported\n", 12753 state->base.crtc_w, state->base.crtc_h); 12754 return -EINVAL; 12755 } 12756 12757 stride = roundup_pow_of_two(state->base.crtc_w) * 4; 12758 if (obj->base.size < stride * state->base.crtc_h) { 12759 DRM_DEBUG_KMS("buffer is too small\n"); 12760 return -ENOMEM; 12761 } 12762 12763 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) { 12764 DRM_DEBUG_KMS("cursor cannot be tiled\n"); 12765 ret = -EINVAL; 12766 } 12767 12768 finish: 12769 if (intel_crtc->active) { 12770 if (plane->state->crtc_w != state->base.crtc_w) 12771 intel_crtc->atomic.update_wm = true; 12772 12773 intel_crtc->atomic.fb_bits |= 12774 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe); 12775 } 12776 12777 return ret; 12778 } 12779 12780 static void 12781 intel_commit_cursor_plane(struct drm_plane *plane, 12782 struct intel_plane_state *state) 12783 { 12784 struct drm_crtc *crtc = state->base.crtc; 12785 struct drm_device *dev = plane->dev; 12786 struct intel_crtc *intel_crtc; 12787 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); 12788 uint32_t addr; 12789 12790 crtc = crtc ? crtc : plane->crtc; 12791 intel_crtc = to_intel_crtc(crtc); 12792 12793 plane->fb = state->base.fb; 12794 crtc->cursor_x = state->base.crtc_x; 12795 crtc->cursor_y = state->base.crtc_y; 12796 12797 if (intel_crtc->cursor_bo == obj) 12798 goto update; 12799 12800 if (!obj) 12801 addr = 0; 12802 else if (!INTEL_INFO(dev)->cursor_needs_physical) 12803 addr = i915_gem_obj_ggtt_offset(obj); 12804 else 12805 addr = obj->phys_handle->busaddr; 12806 12807 intel_crtc->cursor_addr = addr; 12808 intel_crtc->cursor_bo = obj; 12809 update: 12810 12811 if (intel_crtc->active) 12812 intel_crtc_update_cursor(crtc, state->visible); 12813 } 12814 12815 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 12816 int pipe) 12817 { 12818 struct intel_plane *cursor; 12819 struct intel_plane_state *state; 12820 12821 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL); 12822 if (cursor == NULL) 12823 return NULL; 12824 12825 state = intel_create_plane_state(&cursor->base); 12826 if (!state) { 12827 kfree(cursor); 12828 return NULL; 12829 } 12830 cursor->base.state = &state->base; 12831 12832 cursor->can_scale = false; 12833 cursor->max_downscale = 1; 12834 cursor->pipe = pipe; 12835 cursor->plane = pipe; 12836 cursor->check_plane = intel_check_cursor_plane; 12837 cursor->commit_plane = intel_commit_cursor_plane; 12838 12839 drm_universal_plane_init(dev, &cursor->base, 0, 12840 &intel_plane_funcs, 12841 intel_cursor_formats, 12842 ARRAY_SIZE(intel_cursor_formats), 12843 DRM_PLANE_TYPE_CURSOR); 12844 12845 if (INTEL_INFO(dev)->gen >= 4) { 12846 if (!dev->mode_config.rotation_property) 12847 dev->mode_config.rotation_property = 12848 drm_mode_create_rotation_property(dev, 12849 BIT(DRM_ROTATE_0) | 12850 BIT(DRM_ROTATE_180)); 12851 if (dev->mode_config.rotation_property) 12852 drm_object_attach_property(&cursor->base.base, 12853 dev->mode_config.rotation_property, 12854 state->base.rotation); 12855 } 12856 12857 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); 12858 12859 return &cursor->base; 12860 } 12861 12862 static void intel_crtc_init(struct drm_device *dev, int pipe) 12863 { 12864 struct drm_i915_private *dev_priv = dev->dev_private; 12865 struct intel_crtc *intel_crtc; 12866 struct intel_crtc_state *crtc_state = NULL; 12867 struct drm_plane *primary = NULL; 12868 struct drm_plane *cursor = NULL; 12869 int i, ret; 12870 12871 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL); 12872 if (intel_crtc == NULL) 12873 return; 12874 12875 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); 12876 if (!crtc_state) 12877 goto fail; 12878 intel_crtc_set_state(intel_crtc, crtc_state); 12879 crtc_state->base.crtc = &intel_crtc->base; 12880 12881 primary = intel_primary_plane_create(dev, pipe); 12882 if (!primary) 12883 goto fail; 12884 12885 cursor = intel_cursor_plane_create(dev, pipe); 12886 if (!cursor) 12887 goto fail; 12888 12889 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 12890 cursor, &intel_crtc_funcs); 12891 if (ret) 12892 goto fail; 12893 12894 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); 12895 for (i = 0; i < 256; i++) { 12896 intel_crtc->lut_r[i] = i; 12897 intel_crtc->lut_g[i] = i; 12898 intel_crtc->lut_b[i] = i; 12899 } 12900 12901 /* 12902 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port 12903 * is hooked to pipe B. Hence we want plane A feeding pipe B. 12904 */ 12905 intel_crtc->pipe = pipe; 12906 intel_crtc->plane = pipe; 12907 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) { 12908 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); 12909 intel_crtc->plane = !pipe; 12910 } 12911 12912 intel_crtc->cursor_base = ~0; 12913 intel_crtc->cursor_cntl = ~0; 12914 intel_crtc->cursor_size = ~0; 12915 12916 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || 12917 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); 12918 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 12919 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 12920 12921 INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func); 12922 12923 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 12924 12925 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 12926 return; 12927 12928 fail: 12929 if (primary) 12930 drm_plane_cleanup(primary); 12931 if (cursor) 12932 drm_plane_cleanup(cursor); 12933 kfree(crtc_state); 12934 kfree(intel_crtc); 12935 } 12936 12937 enum i915_pipe intel_get_pipe_from_connector(struct intel_connector *connector) 12938 { 12939 struct drm_encoder *encoder = connector->base.encoder; 12940 struct drm_device *dev = connector->base.dev; 12941 12942 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 12943 12944 if (!encoder || WARN_ON(!encoder->crtc)) 12945 return INVALID_PIPE; 12946 12947 return to_intel_crtc(encoder->crtc)->pipe; 12948 } 12949 12950 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 12951 struct drm_file *file) 12952 { 12953 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 12954 struct drm_crtc *drmmode_crtc; 12955 struct intel_crtc *crtc; 12956 12957 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 12958 12959 if (!drmmode_crtc) { 12960 DRM_ERROR("no such CRTC id\n"); 12961 return -ENOENT; 12962 } 12963 12964 crtc = to_intel_crtc(drmmode_crtc); 12965 pipe_from_crtc_id->pipe = crtc->pipe; 12966 12967 return 0; 12968 } 12969 12970 static int intel_encoder_clones(struct intel_encoder *encoder) 12971 { 12972 struct drm_device *dev = encoder->base.dev; 12973 struct intel_encoder *source_encoder; 12974 int index_mask = 0; 12975 int entry = 0; 12976 12977 for_each_intel_encoder(dev, source_encoder) { 12978 if (encoders_cloneable(encoder, source_encoder)) 12979 index_mask |= (1 << entry); 12980 12981 entry++; 12982 } 12983 12984 return index_mask; 12985 } 12986 12987 static bool has_edp_a(struct drm_device *dev) 12988 { 12989 struct drm_i915_private *dev_priv = dev->dev_private; 12990 12991 if (!IS_MOBILE(dev)) 12992 return false; 12993 12994 if ((I915_READ(DP_A) & DP_DETECTED) == 0) 12995 return false; 12996 12997 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE)) 12998 return false; 12999 13000 return true; 13001 } 13002 13003 static bool intel_crt_present(struct drm_device *dev) 13004 { 13005 struct drm_i915_private *dev_priv = dev->dev_private; 13006 13007 if (INTEL_INFO(dev)->gen >= 9) 13008 return false; 13009 13010 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) 13011 return false; 13012 13013 if (IS_CHERRYVIEW(dev)) 13014 return false; 13015 13016 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support) 13017 return false; 13018 13019 return true; 13020 } 13021 13022 static void intel_setup_outputs(struct drm_device *dev) 13023 { 13024 struct drm_i915_private *dev_priv = dev->dev_private; 13025 struct intel_encoder *encoder; 13026 bool dpd_is_edp = false; 13027 13028 intel_lvds_init(dev); 13029 13030 if (intel_crt_present(dev)) 13031 intel_crt_init(dev); 13032 13033 if (HAS_DDI(dev)) { 13034 int found; 13035 13036 /* 13037 * Haswell uses DDI functions to detect digital outputs. 13038 * On SKL pre-D0 the strap isn't connected, so we assume 13039 * it's there. 13040 */ 13041 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 13042 /* WaIgnoreDDIAStrap: skl */ 13043 if (found || 13044 (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0)) 13045 intel_ddi_init(dev, PORT_A); 13046 13047 /* DDI B, C and D detection is indicated by the SFUSE_STRAP 13048 * register */ 13049 found = I915_READ(SFUSE_STRAP); 13050 13051 if (found & SFUSE_STRAP_DDIB_DETECTED) 13052 intel_ddi_init(dev, PORT_B); 13053 if (found & SFUSE_STRAP_DDIC_DETECTED) 13054 intel_ddi_init(dev, PORT_C); 13055 if (found & SFUSE_STRAP_DDID_DETECTED) 13056 intel_ddi_init(dev, PORT_D); 13057 } else if (HAS_PCH_SPLIT(dev)) { 13058 int found; 13059 dpd_is_edp = intel_dp_is_edp(dev, PORT_D); 13060 13061 if (has_edp_a(dev)) 13062 intel_dp_init(dev, DP_A, PORT_A); 13063 13064 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) { 13065 /* PCH SDVOB multiplex with HDMIB */ 13066 found = intel_sdvo_init(dev, PCH_SDVOB, true); 13067 if (!found) 13068 intel_hdmi_init(dev, PCH_HDMIB, PORT_B); 13069 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) 13070 intel_dp_init(dev, PCH_DP_B, PORT_B); 13071 } 13072 13073 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED) 13074 intel_hdmi_init(dev, PCH_HDMIC, PORT_C); 13075 13076 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED) 13077 intel_hdmi_init(dev, PCH_HDMID, PORT_D); 13078 13079 if (I915_READ(PCH_DP_C) & DP_DETECTED) 13080 intel_dp_init(dev, PCH_DP_C, PORT_C); 13081 13082 if (I915_READ(PCH_DP_D) & DP_DETECTED) 13083 intel_dp_init(dev, PCH_DP_D, PORT_D); 13084 } else if (IS_VALLEYVIEW(dev)) { 13085 /* 13086 * The DP_DETECTED bit is the latched state of the DDC 13087 * SDA pin at boot. However since eDP doesn't require DDC 13088 * (no way to plug in a DP->HDMI dongle) the DDC pins for 13089 * eDP ports may have been muxed to an alternate function. 13090 * Thus we can't rely on the DP_DETECTED bit alone to detect 13091 * eDP ports. Consult the VBT as well as DP_DETECTED to 13092 * detect eDP ports. 13093 */ 13094 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 13095 !intel_dp_is_edp(dev, PORT_B)) 13096 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 13097 PORT_B); 13098 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 13099 intel_dp_is_edp(dev, PORT_B)) 13100 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 13101 13102 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 13103 !intel_dp_is_edp(dev, PORT_C)) 13104 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 13105 PORT_C); 13106 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 13107 intel_dp_is_edp(dev, PORT_C)) 13108 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 13109 13110 if (IS_CHERRYVIEW(dev)) { 13111 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 13112 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 13113 PORT_D); 13114 /* eDP not supported on port D, so don't check VBT */ 13115 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 13116 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 13117 } 13118 13119 intel_dsi_init(dev); 13120 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { 13121 bool found = false; 13122 13123 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 13124 DRM_DEBUG_KMS("probing SDVOB\n"); 13125 found = intel_sdvo_init(dev, GEN3_SDVOB, true); 13126 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 13127 DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 13128 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B); 13129 } 13130 13131 if (!found && SUPPORTS_INTEGRATED_DP(dev)) 13132 intel_dp_init(dev, DP_B, PORT_B); 13133 } 13134 13135 /* Before G4X SDVOC doesn't have its own detect register */ 13136 13137 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) { 13138 DRM_DEBUG_KMS("probing SDVOC\n"); 13139 found = intel_sdvo_init(dev, GEN3_SDVOC, false); 13140 } 13141 13142 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) { 13143 13144 if (SUPPORTS_INTEGRATED_HDMI(dev)) { 13145 DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 13146 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C); 13147 } 13148 if (SUPPORTS_INTEGRATED_DP(dev)) 13149 intel_dp_init(dev, DP_C, PORT_C); 13150 } 13151 13152 if (SUPPORTS_INTEGRATED_DP(dev) && 13153 (I915_READ(DP_D) & DP_DETECTED)) 13154 intel_dp_init(dev, DP_D, PORT_D); 13155 } else if (IS_GEN2(dev)) 13156 intel_dvo_init(dev); 13157 13158 if (SUPPORTS_TV(dev)) 13159 intel_tv_init(dev); 13160 13161 intel_psr_init(dev); 13162 13163 for_each_intel_encoder(dev, encoder) { 13164 encoder->base.possible_crtcs = encoder->crtc_mask; 13165 encoder->base.possible_clones = 13166 intel_encoder_clones(encoder); 13167 } 13168 13169 intel_init_pch_refclk(dev); 13170 13171 drm_helper_move_panel_connectors_to_head(dev); 13172 } 13173 13174 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) 13175 { 13176 struct drm_device *dev = fb->dev; 13177 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 13178 13179 drm_framebuffer_cleanup(fb); 13180 mutex_lock(&dev->struct_mutex); 13181 WARN_ON(!intel_fb->obj->framebuffer_references--); 13182 drm_gem_object_unreference(&intel_fb->obj->base); 13183 mutex_unlock(&dev->struct_mutex); 13184 kfree(intel_fb); 13185 } 13186 13187 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 13188 struct drm_file *file, 13189 unsigned int *handle) 13190 { 13191 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 13192 struct drm_i915_gem_object *obj = intel_fb->obj; 13193 13194 return drm_gem_handle_create(file, &obj->base, handle); 13195 } 13196 13197 static const struct drm_framebuffer_funcs intel_fb_funcs = { 13198 .destroy = intel_user_framebuffer_destroy, 13199 .create_handle = intel_user_framebuffer_create_handle, 13200 }; 13201 13202 static 13203 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier, 13204 uint32_t pixel_format) 13205 { 13206 u32 gen = INTEL_INFO(dev)->gen; 13207 13208 if (gen >= 9) { 13209 /* "The stride in bytes must not exceed the of the size of 8K 13210 * pixels and 32K bytes." 13211 */ 13212 return min(8192*drm_format_plane_cpp(pixel_format, 0), 32768); 13213 } else if (gen >= 5 && !IS_VALLEYVIEW(dev)) { 13214 return 32*1024; 13215 } else if (gen >= 4) { 13216 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 13217 return 16*1024; 13218 else 13219 return 32*1024; 13220 } else if (gen >= 3) { 13221 if (fb_modifier == I915_FORMAT_MOD_X_TILED) 13222 return 8*1024; 13223 else 13224 return 16*1024; 13225 } else { 13226 /* XXX DSPC is limited to 4k tiled */ 13227 return 8*1024; 13228 } 13229 } 13230 13231 static int intel_framebuffer_init(struct drm_device *dev, 13232 struct intel_framebuffer *intel_fb, 13233 struct drm_mode_fb_cmd2 *mode_cmd, 13234 struct drm_i915_gem_object *obj) 13235 { 13236 unsigned int aligned_height; 13237 int ret; 13238 u32 pitch_limit, stride_alignment; 13239 13240 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 13241 13242 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) { 13243 /* Enforce that fb modifier and tiling mode match, but only for 13244 * X-tiled. This is needed for FBC. */ 13245 if (!!(obj->tiling_mode == I915_TILING_X) != 13246 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) { 13247 DRM_DEBUG("tiling_mode doesn't match fb modifier\n"); 13248 return -EINVAL; 13249 } 13250 } else { 13251 if (obj->tiling_mode == I915_TILING_X) 13252 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED; 13253 else if (obj->tiling_mode == I915_TILING_Y) { 13254 DRM_DEBUG("No Y tiling for legacy addfb\n"); 13255 return -EINVAL; 13256 } 13257 } 13258 13259 /* Passed in modifier sanity checking. */ 13260 switch (mode_cmd->modifier[0]) { 13261 case I915_FORMAT_MOD_Y_TILED: 13262 case I915_FORMAT_MOD_Yf_TILED: 13263 if (INTEL_INFO(dev)->gen < 9) { 13264 DRM_DEBUG("Unsupported tiling 0x%lx!\n", 13265 mode_cmd->modifier[0]); 13266 return -EINVAL; 13267 } 13268 case DRM_FORMAT_MOD_NONE: 13269 case I915_FORMAT_MOD_X_TILED: 13270 break; 13271 default: 13272 DRM_DEBUG("Unsupported fb modifier 0x%lx!\n", 13273 mode_cmd->modifier[0]); 13274 return -EINVAL; 13275 } 13276 13277 stride_alignment = intel_fb_stride_alignment(dev, mode_cmd->modifier[0], 13278 mode_cmd->pixel_format); 13279 if (mode_cmd->pitches[0] & (stride_alignment - 1)) { 13280 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n", 13281 mode_cmd->pitches[0], stride_alignment); 13282 return -EINVAL; 13283 } 13284 13285 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0], 13286 mode_cmd->pixel_format); 13287 if (mode_cmd->pitches[0] > pitch_limit) { 13288 DRM_DEBUG("%s pitch (%u) must be at less than %d\n", 13289 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ? 13290 "tiled" : "linear", 13291 mode_cmd->pitches[0], pitch_limit); 13292 return -EINVAL; 13293 } 13294 13295 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED && 13296 mode_cmd->pitches[0] != obj->stride) { 13297 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n", 13298 mode_cmd->pitches[0], obj->stride); 13299 return -EINVAL; 13300 } 13301 13302 /* Reject formats not supported by any plane early. */ 13303 switch (mode_cmd->pixel_format) { 13304 case DRM_FORMAT_C8: 13305 case DRM_FORMAT_RGB565: 13306 case DRM_FORMAT_XRGB8888: 13307 case DRM_FORMAT_ARGB8888: 13308 break; 13309 case DRM_FORMAT_XRGB1555: 13310 case DRM_FORMAT_ARGB1555: 13311 if (INTEL_INFO(dev)->gen > 3) { 13312 DRM_DEBUG("unsupported pixel format: %s\n", 13313 drm_get_format_name(mode_cmd->pixel_format)); 13314 return -EINVAL; 13315 } 13316 break; 13317 case DRM_FORMAT_XBGR8888: 13318 case DRM_FORMAT_ABGR8888: 13319 case DRM_FORMAT_XRGB2101010: 13320 case DRM_FORMAT_ARGB2101010: 13321 case DRM_FORMAT_XBGR2101010: 13322 case DRM_FORMAT_ABGR2101010: 13323 if (INTEL_INFO(dev)->gen < 4) { 13324 DRM_DEBUG("unsupported pixel format: %s\n", 13325 drm_get_format_name(mode_cmd->pixel_format)); 13326 return -EINVAL; 13327 } 13328 break; 13329 case DRM_FORMAT_YUYV: 13330 case DRM_FORMAT_UYVY: 13331 case DRM_FORMAT_YVYU: 13332 case DRM_FORMAT_VYUY: 13333 if (INTEL_INFO(dev)->gen < 5) { 13334 DRM_DEBUG("unsupported pixel format: %s\n", 13335 drm_get_format_name(mode_cmd->pixel_format)); 13336 return -EINVAL; 13337 } 13338 break; 13339 default: 13340 DRM_DEBUG("unsupported pixel format: %s\n", 13341 drm_get_format_name(mode_cmd->pixel_format)); 13342 return -EINVAL; 13343 } 13344 13345 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */ 13346 if (mode_cmd->offsets[0] != 0) 13347 return -EINVAL; 13348 13349 aligned_height = intel_fb_align_height(dev, mode_cmd->height, 13350 mode_cmd->pixel_format, 13351 mode_cmd->modifier[0]); 13352 /* FIXME drm helper for size checks (especially planar formats)? */ 13353 if (obj->base.size < aligned_height * mode_cmd->pitches[0]) 13354 return -EINVAL; 13355 13356 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); 13357 intel_fb->obj = obj; 13358 intel_fb->obj->framebuffer_references++; 13359 13360 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); 13361 if (ret) { 13362 DRM_ERROR("framebuffer init failed %d\n", ret); 13363 return ret; 13364 } 13365 13366 return 0; 13367 } 13368 13369 static struct drm_framebuffer * 13370 intel_user_framebuffer_create(struct drm_device *dev, 13371 struct drm_file *filp, 13372 struct drm_mode_fb_cmd2 *mode_cmd) 13373 { 13374 struct drm_i915_gem_object *obj; 13375 13376 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, 13377 mode_cmd->handles[0])); 13378 if (&obj->base == NULL) 13379 return ERR_PTR(-ENOENT); 13380 13381 return intel_framebuffer_create(dev, mode_cmd, obj); 13382 } 13383 13384 #ifndef CONFIG_DRM_I915_FBDEV 13385 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev) 13386 { 13387 } 13388 #endif 13389 13390 static const struct drm_mode_config_funcs intel_mode_funcs = { 13391 .fb_create = intel_user_framebuffer_create, 13392 .output_poll_changed = intel_fbdev_output_poll_changed, 13393 .atomic_check = intel_atomic_check, 13394 .atomic_commit = intel_atomic_commit, 13395 }; 13396 13397 /* Set up chip specific display functions */ 13398 static void intel_init_display(struct drm_device *dev) 13399 { 13400 struct drm_i915_private *dev_priv = dev->dev_private; 13401 13402 if (HAS_PCH_SPLIT(dev) || IS_G4X(dev)) 13403 dev_priv->display.find_dpll = g4x_find_best_dpll; 13404 else if (IS_CHERRYVIEW(dev)) 13405 dev_priv->display.find_dpll = chv_find_best_dpll; 13406 else if (IS_VALLEYVIEW(dev)) 13407 dev_priv->display.find_dpll = vlv_find_best_dpll; 13408 else if (IS_PINEVIEW(dev)) 13409 dev_priv->display.find_dpll = pnv_find_best_dpll; 13410 else 13411 dev_priv->display.find_dpll = i9xx_find_best_dpll; 13412 13413 if (INTEL_INFO(dev)->gen >= 9) { 13414 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 13415 dev_priv->display.get_initial_plane_config = 13416 skylake_get_initial_plane_config; 13417 dev_priv->display.crtc_compute_clock = 13418 haswell_crtc_compute_clock; 13419 dev_priv->display.crtc_enable = haswell_crtc_enable; 13420 dev_priv->display.crtc_disable = haswell_crtc_disable; 13421 dev_priv->display.off = ironlake_crtc_off; 13422 dev_priv->display.update_primary_plane = 13423 skylake_update_primary_plane; 13424 } else if (HAS_DDI(dev)) { 13425 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 13426 dev_priv->display.get_initial_plane_config = 13427 ironlake_get_initial_plane_config; 13428 dev_priv->display.crtc_compute_clock = 13429 haswell_crtc_compute_clock; 13430 dev_priv->display.crtc_enable = haswell_crtc_enable; 13431 dev_priv->display.crtc_disable = haswell_crtc_disable; 13432 dev_priv->display.off = ironlake_crtc_off; 13433 dev_priv->display.update_primary_plane = 13434 ironlake_update_primary_plane; 13435 } else if (HAS_PCH_SPLIT(dev)) { 13436 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 13437 dev_priv->display.get_initial_plane_config = 13438 ironlake_get_initial_plane_config; 13439 dev_priv->display.crtc_compute_clock = 13440 ironlake_crtc_compute_clock; 13441 dev_priv->display.crtc_enable = ironlake_crtc_enable; 13442 dev_priv->display.crtc_disable = ironlake_crtc_disable; 13443 dev_priv->display.off = ironlake_crtc_off; 13444 dev_priv->display.update_primary_plane = 13445 ironlake_update_primary_plane; 13446 } else if (IS_VALLEYVIEW(dev)) { 13447 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 13448 dev_priv->display.get_initial_plane_config = 13449 i9xx_get_initial_plane_config; 13450 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 13451 dev_priv->display.crtc_enable = valleyview_crtc_enable; 13452 dev_priv->display.crtc_disable = i9xx_crtc_disable; 13453 dev_priv->display.off = i9xx_crtc_off; 13454 dev_priv->display.update_primary_plane = 13455 i9xx_update_primary_plane; 13456 } else { 13457 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 13458 dev_priv->display.get_initial_plane_config = 13459 i9xx_get_initial_plane_config; 13460 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock; 13461 dev_priv->display.crtc_enable = i9xx_crtc_enable; 13462 dev_priv->display.crtc_disable = i9xx_crtc_disable; 13463 dev_priv->display.off = i9xx_crtc_off; 13464 dev_priv->display.update_primary_plane = 13465 i9xx_update_primary_plane; 13466 } 13467 13468 /* Returns the core display clock speed */ 13469 if (IS_VALLEYVIEW(dev)) 13470 dev_priv->display.get_display_clock_speed = 13471 valleyview_get_display_clock_speed; 13472 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) 13473 dev_priv->display.get_display_clock_speed = 13474 i945_get_display_clock_speed; 13475 else if (IS_I915G(dev)) 13476 dev_priv->display.get_display_clock_speed = 13477 i915_get_display_clock_speed; 13478 else if (IS_I945GM(dev) || IS_845G(dev)) 13479 dev_priv->display.get_display_clock_speed = 13480 i9xx_misc_get_display_clock_speed; 13481 else if (IS_PINEVIEW(dev)) 13482 dev_priv->display.get_display_clock_speed = 13483 pnv_get_display_clock_speed; 13484 else if (IS_I915GM(dev)) 13485 dev_priv->display.get_display_clock_speed = 13486 i915gm_get_display_clock_speed; 13487 else if (IS_I865G(dev)) 13488 dev_priv->display.get_display_clock_speed = 13489 i865_get_display_clock_speed; 13490 else if (IS_I85X(dev)) 13491 dev_priv->display.get_display_clock_speed = 13492 i855_get_display_clock_speed; 13493 else /* 852, 830 */ 13494 dev_priv->display.get_display_clock_speed = 13495 i830_get_display_clock_speed; 13496 13497 if (IS_GEN5(dev)) { 13498 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 13499 } else if (IS_GEN6(dev)) { 13500 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 13501 } else if (IS_IVYBRIDGE(dev)) { 13502 /* FIXME: detect B0+ stepping and use auto training */ 13503 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 13504 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 13505 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 13506 } else if (IS_VALLEYVIEW(dev)) { 13507 dev_priv->display.modeset_global_resources = 13508 valleyview_modeset_global_resources; 13509 } 13510 13511 switch (INTEL_INFO(dev)->gen) { 13512 case 2: 13513 dev_priv->display.queue_flip = intel_gen2_queue_flip; 13514 break; 13515 13516 case 3: 13517 dev_priv->display.queue_flip = intel_gen3_queue_flip; 13518 break; 13519 13520 case 4: 13521 case 5: 13522 dev_priv->display.queue_flip = intel_gen4_queue_flip; 13523 break; 13524 13525 case 6: 13526 dev_priv->display.queue_flip = intel_gen6_queue_flip; 13527 break; 13528 case 7: 13529 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 13530 dev_priv->display.queue_flip = intel_gen7_queue_flip; 13531 break; 13532 case 9: 13533 /* Drop through - unsupported since execlist only. */ 13534 default: 13535 /* Default just returns -ENODEV to indicate unsupported */ 13536 dev_priv->display.queue_flip = intel_default_queue_flip; 13537 } 13538 13539 intel_panel_init_backlight_funcs(dev); 13540 13541 lockinit(&dev_priv->pps_mutex, "i915pm", 0, LK_CANRECURSE); 13542 } 13543 13544 /* 13545 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, 13546 * resume, or other times. This quirk makes sure that's the case for 13547 * affected systems. 13548 */ 13549 static void quirk_pipea_force(struct drm_device *dev) 13550 { 13551 struct drm_i915_private *dev_priv = dev->dev_private; 13552 13553 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 13554 DRM_INFO("applying pipe a force quirk\n"); 13555 } 13556 13557 static void quirk_pipeb_force(struct drm_device *dev) 13558 { 13559 struct drm_i915_private *dev_priv = dev->dev_private; 13560 13561 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 13562 DRM_INFO("applying pipe b force quirk\n"); 13563 } 13564 13565 /* 13566 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason 13567 */ 13568 static void quirk_ssc_force_disable(struct drm_device *dev) 13569 { 13570 struct drm_i915_private *dev_priv = dev->dev_private; 13571 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 13572 DRM_INFO("applying lvds SSC disable quirk\n"); 13573 } 13574 13575 /* 13576 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight 13577 * brightness value 13578 */ 13579 static void quirk_invert_brightness(struct drm_device *dev) 13580 { 13581 struct drm_i915_private *dev_priv = dev->dev_private; 13582 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 13583 DRM_INFO("applying inverted panel brightness quirk\n"); 13584 } 13585 13586 /* Some VBT's incorrectly indicate no backlight is present */ 13587 static void quirk_backlight_present(struct drm_device *dev) 13588 { 13589 struct drm_i915_private *dev_priv = dev->dev_private; 13590 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 13591 DRM_INFO("applying backlight present quirk\n"); 13592 } 13593 13594 struct intel_quirk { 13595 int device; 13596 int subsystem_vendor; 13597 int subsystem_device; 13598 void (*hook)(struct drm_device *dev); 13599 }; 13600 13601 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */ 13602 struct intel_dmi_quirk { 13603 void (*hook)(struct drm_device *dev); 13604 const struct dmi_system_id (*dmi_id_list)[]; 13605 }; 13606 13607 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id) 13608 { 13609 DRM_INFO("Backlight polarity reversed on %s\n", id->ident); 13610 return 1; 13611 } 13612 13613 static const struct intel_dmi_quirk intel_dmi_quirks[] = { 13614 { 13615 .dmi_id_list = &(const struct dmi_system_id[]) { 13616 { 13617 .callback = intel_dmi_reverse_brightness, 13618 .ident = "NCR Corporation", 13619 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"), 13620 DMI_MATCH(DMI_PRODUCT_NAME, ""), 13621 }, 13622 }, 13623 { } /* terminating entry */ 13624 }, 13625 .hook = quirk_invert_brightness, 13626 }, 13627 }; 13628 13629 static struct intel_quirk intel_quirks[] = { 13630 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ 13631 { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, 13632 13633 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ 13634 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, 13635 13636 /* 830 needs to leave pipe A & dpll A up */ 13637 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, 13638 13639 /* 830 needs to leave pipe B & dpll B up */ 13640 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force }, 13641 13642 /* Lenovo U160 cannot use SSC on LVDS */ 13643 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, 13644 13645 /* Sony Vaio Y cannot use SSC on LVDS */ 13646 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, 13647 13648 /* Acer Aspire 5734Z must invert backlight brightness */ 13649 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, 13650 13651 /* Acer/eMachines G725 */ 13652 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, 13653 13654 /* Acer/eMachines e725 */ 13655 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, 13656 13657 /* Acer/Packard Bell NCL20 */ 13658 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, 13659 13660 /* Acer Aspire 4736Z */ 13661 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, 13662 13663 /* Acer Aspire 5336 */ 13664 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 13665 13666 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ 13667 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, 13668 13669 /* Acer C720 Chromebook (Core i3 4005U) */ 13670 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 13671 13672 /* Apple Macbook 2,1 (Core 2 T7400) */ 13673 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present }, 13674 13675 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 13676 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 13677 13678 /* HP Chromebook 14 (Celeron 2955U) */ 13679 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, 13680 13681 /* Dell Chromebook 11 */ 13682 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, 13683 }; 13684 13685 static void intel_init_quirks(struct drm_device *dev) 13686 { 13687 struct device *d = dev->dev; 13688 int i; 13689 13690 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { 13691 struct intel_quirk *q = &intel_quirks[i]; 13692 13693 if (pci_get_device(d) == q->device && 13694 (pci_get_subvendor(d) == q->subsystem_vendor || 13695 q->subsystem_vendor == PCI_ANY_ID) && 13696 (pci_get_subdevice(d) == q->subsystem_device || 13697 q->subsystem_device == PCI_ANY_ID)) 13698 q->hook(dev); 13699 } 13700 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) { 13701 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0) 13702 intel_dmi_quirks[i].hook(dev); 13703 } 13704 } 13705 13706 /* Disable the VGA plane that we never use */ 13707 static void i915_disable_vga(struct drm_device *dev) 13708 { 13709 struct drm_i915_private *dev_priv = dev->dev_private; 13710 u8 sr1; 13711 u32 vga_reg = i915_vgacntrl_reg(dev); 13712 13713 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ 13714 #if 0 13715 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 13716 #endif 13717 outb(VGA_SR_INDEX, SR01); 13718 sr1 = inb(VGA_SR_DATA); 13719 outb(VGA_SR_DATA, sr1 | 1 << 5); 13720 #if 0 13721 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 13722 #endif 13723 udelay(300); 13724 13725 /* 13726 * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming 13727 * from S3 without preserving (some of?) the other bits. 13728 */ 13729 I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); 13730 POSTING_READ(vga_reg); 13731 } 13732 13733 void intel_modeset_init_hw(struct drm_device *dev) 13734 { 13735 intel_prepare_ddi(dev); 13736 13737 if (IS_VALLEYVIEW(dev)) 13738 vlv_update_cdclk(dev); 13739 13740 intel_init_clock_gating(dev); 13741 13742 intel_enable_gt_powersave(dev); 13743 } 13744 13745 void intel_modeset_init(struct drm_device *dev) 13746 { 13747 struct drm_i915_private *dev_priv = dev->dev_private; 13748 int sprite, ret; 13749 enum i915_pipe pipe; 13750 struct intel_crtc *crtc; 13751 13752 drm_mode_config_init(dev); 13753 13754 dev->mode_config.min_width = 0; 13755 dev->mode_config.min_height = 0; 13756 13757 dev->mode_config.preferred_depth = 24; 13758 dev->mode_config.prefer_shadow = 1; 13759 13760 dev->mode_config.allow_fb_modifiers = true; 13761 13762 dev->mode_config.funcs = &intel_mode_funcs; 13763 13764 intel_init_quirks(dev); 13765 13766 intel_init_pm(dev); 13767 13768 if (INTEL_INFO(dev)->num_pipes == 0) 13769 return; 13770 13771 intel_init_display(dev); 13772 intel_init_audio(dev); 13773 13774 if (IS_GEN2(dev)) { 13775 dev->mode_config.max_width = 2048; 13776 dev->mode_config.max_height = 2048; 13777 } else if (IS_GEN3(dev)) { 13778 dev->mode_config.max_width = 4096; 13779 dev->mode_config.max_height = 4096; 13780 } else { 13781 dev->mode_config.max_width = 8192; 13782 dev->mode_config.max_height = 8192; 13783 } 13784 13785 if (IS_845G(dev) || IS_I865G(dev)) { 13786 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512; 13787 dev->mode_config.cursor_height = 1023; 13788 } else if (IS_GEN2(dev)) { 13789 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH; 13790 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT; 13791 } else { 13792 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH; 13793 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; 13794 } 13795 13796 dev->mode_config.fb_base = dev_priv->gtt.mappable_base; 13797 13798 DRM_DEBUG_KMS("%d display pipe%s available.\n", 13799 INTEL_INFO(dev)->num_pipes, 13800 INTEL_INFO(dev)->num_pipes > 1 ? "s" : ""); 13801 13802 for_each_pipe(dev_priv, pipe) { 13803 intel_crtc_init(dev, pipe); 13804 for_each_sprite(dev_priv, pipe, sprite) { 13805 ret = intel_plane_init(dev, pipe, sprite); 13806 if (ret) 13807 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", 13808 pipe_name(pipe), sprite_name(pipe, sprite), ret); 13809 } 13810 } 13811 13812 intel_init_dpio(dev); 13813 13814 intel_shared_dpll_init(dev); 13815 13816 /* save the BIOS value before clobbering it */ 13817 dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); 13818 /* Just disable it once at startup */ 13819 i915_disable_vga(dev); 13820 intel_setup_outputs(dev); 13821 13822 /* Just in case the BIOS is doing something questionable. */ 13823 intel_fbc_disable(dev); 13824 13825 drm_modeset_lock_all(dev); 13826 intel_modeset_setup_hw_state(dev, false); 13827 drm_modeset_unlock_all(dev); 13828 13829 for_each_intel_crtc(dev, crtc) { 13830 if (!crtc->active) 13831 continue; 13832 13833 /* 13834 * Note that reserving the BIOS fb up front prevents us 13835 * from stuffing other stolen allocations like the ring 13836 * on top. This prevents some ugliness at boot time, and 13837 * can even allow for smooth boot transitions if the BIOS 13838 * fb is large enough for the active pipe configuration. 13839 */ 13840 if (dev_priv->display.get_initial_plane_config) { 13841 dev_priv->display.get_initial_plane_config(crtc, 13842 &crtc->plane_config); 13843 /* 13844 * If the fb is shared between multiple heads, we'll 13845 * just get the first one. 13846 */ 13847 intel_find_initial_plane_obj(crtc, &crtc->plane_config); 13848 } 13849 } 13850 } 13851 13852 static void intel_enable_pipe_a(struct drm_device *dev) 13853 { 13854 struct intel_connector *connector; 13855 struct drm_connector *crt = NULL; 13856 struct intel_load_detect_pipe load_detect_temp; 13857 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx; 13858 13859 /* We can't just switch on the pipe A, we need to set things up with a 13860 * proper mode and output configuration. As a gross hack, enable pipe A 13861 * by enabling the load detect pipe once. */ 13862 for_each_intel_connector(dev, connector) { 13863 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { 13864 crt = &connector->base; 13865 break; 13866 } 13867 } 13868 13869 if (!crt) 13870 return; 13871 13872 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) 13873 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); 13874 } 13875 13876 static bool 13877 intel_check_plane_mapping(struct intel_crtc *crtc) 13878 { 13879 struct drm_device *dev = crtc->base.dev; 13880 struct drm_i915_private *dev_priv = dev->dev_private; 13881 u32 reg, val; 13882 13883 if (INTEL_INFO(dev)->num_pipes == 1) 13884 return true; 13885 13886 reg = DSPCNTR(!crtc->plane); 13887 val = I915_READ(reg); 13888 13889 if ((val & DISPLAY_PLANE_ENABLE) && 13890 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe)) 13891 return false; 13892 13893 return true; 13894 } 13895 13896 static void intel_sanitize_crtc(struct intel_crtc *crtc) 13897 { 13898 struct drm_device *dev = crtc->base.dev; 13899 struct drm_i915_private *dev_priv = dev->dev_private; 13900 u32 reg; 13901 13902 /* Clear any frame start delays used for debugging left by the BIOS */ 13903 reg = PIPECONF(crtc->config->cpu_transcoder); 13904 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK); 13905 13906 /* restore vblank interrupts to correct state */ 13907 drm_crtc_vblank_reset(&crtc->base); 13908 if (crtc->active) { 13909 update_scanline_offset(crtc); 13910 drm_crtc_vblank_on(&crtc->base); 13911 } 13912 13913 /* We need to sanitize the plane -> pipe mapping first because this will 13914 * disable the crtc (and hence change the state) if it is wrong. Note 13915 * that gen4+ has a fixed plane -> pipe mapping. */ 13916 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 13917 struct intel_connector *connector; 13918 bool plane; 13919 13920 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 13921 crtc->base.base.id); 13922 13923 /* Pipe has the wrong plane attached and the plane is active. 13924 * Temporarily change the plane mapping and disable everything 13925 * ... */ 13926 plane = crtc->plane; 13927 crtc->plane = !plane; 13928 crtc->primary_enabled = true; 13929 dev_priv->display.crtc_disable(&crtc->base); 13930 crtc->plane = plane; 13931 13932 /* ... and break all links. */ 13933 for_each_intel_connector(dev, connector) { 13934 if (connector->encoder->base.crtc != &crtc->base) 13935 continue; 13936 13937 connector->base.dpms = DRM_MODE_DPMS_OFF; 13938 connector->base.encoder = NULL; 13939 } 13940 /* multiple connectors may have the same encoder: 13941 * handle them and break crtc link separately */ 13942 for_each_intel_connector(dev, connector) 13943 if (connector->encoder->base.crtc == &crtc->base) { 13944 connector->encoder->base.crtc = NULL; 13945 connector->encoder->connectors_active = false; 13946 } 13947 13948 WARN_ON(crtc->active); 13949 crtc->base.state->enable = false; 13950 crtc->base.enabled = false; 13951 } 13952 13953 if (dev_priv->quirks & QUIRK_PIPEA_FORCE && 13954 crtc->pipe == PIPE_A && !crtc->active) { 13955 /* BIOS forgot to enable pipe A, this mostly happens after 13956 * resume. Force-enable the pipe to fix this, the update_dpms 13957 * call below we restore the pipe to the right state, but leave 13958 * the required bits on. */ 13959 intel_enable_pipe_a(dev); 13960 } 13961 13962 /* Adjust the state of the output pipe according to whether we 13963 * have active connectors/encoders. */ 13964 intel_crtc_update_dpms(&crtc->base); 13965 13966 if (crtc->active != crtc->base.state->enable) { 13967 struct intel_encoder *encoder; 13968 13969 /* This can happen either due to bugs in the get_hw_state 13970 * functions or because the pipe is force-enabled due to the 13971 * pipe A quirk. */ 13972 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was %s, now %s\n", 13973 crtc->base.base.id, 13974 crtc->base.state->enable ? "enabled" : "disabled", 13975 crtc->active ? "enabled" : "disabled"); 13976 13977 crtc->base.state->enable = crtc->active; 13978 crtc->base.enabled = crtc->active; 13979 13980 /* Because we only establish the connector -> encoder -> 13981 * crtc links if something is active, this means the 13982 * crtc is now deactivated. Break the links. connector 13983 * -> encoder links are only establish when things are 13984 * actually up, hence no need to break them. */ 13985 WARN_ON(crtc->active); 13986 13987 for_each_encoder_on_crtc(dev, &crtc->base, encoder) { 13988 WARN_ON(encoder->connectors_active); 13989 encoder->base.crtc = NULL; 13990 } 13991 } 13992 13993 if (crtc->active || HAS_GMCH_DISPLAY(dev)) { 13994 /* 13995 * We start out with underrun reporting disabled to avoid races. 13996 * For correct bookkeeping mark this on active crtcs. 13997 * 13998 * Also on gmch platforms we dont have any hardware bits to 13999 * disable the underrun reporting. Which means we need to start 14000 * out with underrun reporting disabled also on inactive pipes, 14001 * since otherwise we'll complain about the garbage we read when 14002 * e.g. coming up after runtime pm. 14003 * 14004 * No protection against concurrent access is required - at 14005 * worst a fifo underrun happens which also sets this to false. 14006 */ 14007 crtc->cpu_fifo_underrun_disabled = true; 14008 crtc->pch_fifo_underrun_disabled = true; 14009 } 14010 } 14011 14012 static void intel_sanitize_encoder(struct intel_encoder *encoder) 14013 { 14014 struct intel_connector *connector; 14015 struct drm_device *dev = encoder->base.dev; 14016 14017 /* We need to check both for a crtc link (meaning that the 14018 * encoder is active and trying to read from a pipe) and the 14019 * pipe itself being active. */ 14020 bool has_active_crtc = encoder->base.crtc && 14021 to_intel_crtc(encoder->base.crtc)->active; 14022 14023 if (encoder->connectors_active && !has_active_crtc) { 14024 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n", 14025 encoder->base.base.id, 14026 encoder->base.name); 14027 14028 /* Connector is active, but has no active pipe. This is 14029 * fallout from our resume register restoring. Disable 14030 * the encoder manually again. */ 14031 if (encoder->base.crtc) { 14032 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", 14033 encoder->base.base.id, 14034 encoder->base.name); 14035 encoder->disable(encoder); 14036 if (encoder->post_disable) 14037 encoder->post_disable(encoder); 14038 } 14039 encoder->base.crtc = NULL; 14040 encoder->connectors_active = false; 14041 14042 /* Inconsistent output/port/pipe state happens presumably due to 14043 * a bug in one of the get_hw_state functions. Or someplace else 14044 * in our code, like the register restore mess on resume. Clamp 14045 * things to off as a safer default. */ 14046 for_each_intel_connector(dev, connector) { 14047 if (connector->encoder != encoder) 14048 continue; 14049 connector->base.dpms = DRM_MODE_DPMS_OFF; 14050 connector->base.encoder = NULL; 14051 } 14052 } 14053 /* Enabled encoders without active connectors will be fixed in 14054 * the crtc fixup. */ 14055 } 14056 14057 void i915_redisable_vga_power_on(struct drm_device *dev) 14058 { 14059 struct drm_i915_private *dev_priv = dev->dev_private; 14060 u32 vga_reg = i915_vgacntrl_reg(dev); 14061 14062 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 14063 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 14064 i915_disable_vga(dev); 14065 } 14066 } 14067 14068 void i915_redisable_vga(struct drm_device *dev) 14069 { 14070 struct drm_i915_private *dev_priv = dev->dev_private; 14071 14072 /* This function can be called both from intel_modeset_setup_hw_state or 14073 * at a very early point in our resume sequence, where the power well 14074 * structures are not yet restored. Since this function is at a very 14075 * paranoid "someone might have enabled VGA while we were not looking" 14076 * level, just check if the power well is enabled instead of trying to 14077 * follow the "don't touch the power well if we don't need it" policy 14078 * the rest of the driver uses. */ 14079 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA)) 14080 return; 14081 14082 i915_redisable_vga_power_on(dev); 14083 } 14084 14085 static bool primary_get_hw_state(struct intel_crtc *crtc) 14086 { 14087 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 14088 14089 if (!crtc->active) 14090 return false; 14091 14092 return I915_READ(DSPCNTR(crtc->plane)) & DISPLAY_PLANE_ENABLE; 14093 } 14094 14095 static void intel_modeset_readout_hw_state(struct drm_device *dev) 14096 { 14097 struct drm_i915_private *dev_priv = dev->dev_private; 14098 enum i915_pipe pipe; 14099 struct intel_crtc *crtc; 14100 struct intel_encoder *encoder; 14101 struct intel_connector *connector; 14102 int i; 14103 14104 for_each_intel_crtc(dev, crtc) { 14105 memset(crtc->config, 0, sizeof(*crtc->config)); 14106 14107 crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE; 14108 14109 crtc->active = dev_priv->display.get_pipe_config(crtc, 14110 crtc->config); 14111 14112 crtc->base.state->enable = crtc->active; 14113 crtc->base.enabled = crtc->active; 14114 crtc->primary_enabled = primary_get_hw_state(crtc); 14115 14116 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 14117 crtc->base.base.id, 14118 crtc->active ? "enabled" : "disabled"); 14119 } 14120 14121 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 14122 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 14123 14124 pll->on = pll->get_hw_state(dev_priv, pll, 14125 &pll->config.hw_state); 14126 pll->active = 0; 14127 pll->config.crtc_mask = 0; 14128 for_each_intel_crtc(dev, crtc) { 14129 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) { 14130 pll->active++; 14131 pll->config.crtc_mask |= 1 << crtc->pipe; 14132 } 14133 } 14134 14135 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n", 14136 pll->name, pll->config.crtc_mask, pll->on); 14137 14138 if (pll->config.crtc_mask) 14139 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 14140 } 14141 14142 for_each_intel_encoder(dev, encoder) { 14143 pipe = 0; 14144 14145 if (encoder->get_hw_state(encoder, &pipe)) { 14146 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 14147 encoder->base.crtc = &crtc->base; 14148 encoder->get_config(encoder, crtc->config); 14149 } else { 14150 encoder->base.crtc = NULL; 14151 } 14152 14153 encoder->connectors_active = false; 14154 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n", 14155 encoder->base.base.id, 14156 encoder->base.name, 14157 encoder->base.crtc ? "enabled" : "disabled", 14158 pipe_name(pipe)); 14159 } 14160 14161 for_each_intel_connector(dev, connector) { 14162 if (connector->get_hw_state(connector)) { 14163 connector->base.dpms = DRM_MODE_DPMS_ON; 14164 connector->encoder->connectors_active = true; 14165 connector->base.encoder = &connector->encoder->base; 14166 } else { 14167 connector->base.dpms = DRM_MODE_DPMS_OFF; 14168 connector->base.encoder = NULL; 14169 } 14170 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n", 14171 connector->base.base.id, 14172 connector->base.name, 14173 connector->base.encoder ? "enabled" : "disabled"); 14174 } 14175 } 14176 14177 /* Scan out the current hw modeset state, sanitizes it and maps it into the drm 14178 * and i915 state tracking structures. */ 14179 void intel_modeset_setup_hw_state(struct drm_device *dev, 14180 bool force_restore) 14181 { 14182 struct drm_i915_private *dev_priv = dev->dev_private; 14183 enum i915_pipe pipe; 14184 struct intel_crtc *crtc; 14185 struct intel_encoder *encoder; 14186 int i; 14187 14188 intel_modeset_readout_hw_state(dev); 14189 14190 /* 14191 * Now that we have the config, copy it to each CRTC struct 14192 * Note that this could go away if we move to using crtc_config 14193 * checking everywhere. 14194 */ 14195 for_each_intel_crtc(dev, crtc) { 14196 if (crtc->active && i915.fastboot) { 14197 intel_mode_from_pipe_config(&crtc->base.mode, 14198 crtc->config); 14199 DRM_DEBUG_KMS("[CRTC:%d] found active mode: ", 14200 crtc->base.base.id); 14201 drm_mode_debug_printmodeline(&crtc->base.mode); 14202 } 14203 } 14204 14205 /* HW state is read out, now we need to sanitize this mess. */ 14206 for_each_intel_encoder(dev, encoder) { 14207 intel_sanitize_encoder(encoder); 14208 } 14209 14210 for_each_pipe(dev_priv, pipe) { 14211 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 14212 intel_sanitize_crtc(crtc); 14213 intel_dump_pipe_config(crtc, crtc->config, 14214 "[setup_hw_state]"); 14215 } 14216 14217 intel_modeset_update_connector_atomic_state(dev); 14218 14219 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 14220 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 14221 14222 if (!pll->on || pll->active) 14223 continue; 14224 14225 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); 14226 14227 pll->disable(dev_priv, pll); 14228 pll->on = false; 14229 } 14230 14231 if (IS_GEN9(dev)) 14232 skl_wm_get_hw_state(dev); 14233 else if (HAS_PCH_SPLIT(dev)) 14234 ilk_wm_get_hw_state(dev); 14235 14236 if (force_restore) { 14237 i915_redisable_vga(dev); 14238 14239 /* 14240 * We need to use raw interfaces for restoring state to avoid 14241 * checking (bogus) intermediate states. 14242 */ 14243 for_each_pipe(dev_priv, pipe) { 14244 struct drm_crtc *crtc = 14245 dev_priv->pipe_to_crtc_mapping[pipe]; 14246 14247 intel_crtc_restore_mode(crtc); 14248 } 14249 } else { 14250 intel_modeset_update_staged_output_state(dev); 14251 } 14252 14253 intel_modeset_check_state(dev); 14254 } 14255 14256 void intel_modeset_gem_init(struct drm_device *dev) 14257 { 14258 struct drm_i915_private *dev_priv = dev->dev_private; 14259 struct drm_crtc *c; 14260 struct drm_i915_gem_object *obj; 14261 int ret; 14262 14263 mutex_lock(&dev->struct_mutex); 14264 intel_init_gt_powersave(dev); 14265 mutex_unlock(&dev->struct_mutex); 14266 14267 /* 14268 * There may be no VBT; and if the BIOS enabled SSC we can 14269 * just keep using it to avoid unnecessary flicker. Whereas if the 14270 * BIOS isn't using it, don't assume it will work even if the VBT 14271 * indicates as much. 14272 */ 14273 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 14274 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & 14275 DREF_SSC1_ENABLE); 14276 14277 intel_modeset_init_hw(dev); 14278 14279 intel_setup_overlay(dev); 14280 14281 /* 14282 * Make sure any fbs we allocated at startup are properly 14283 * pinned & fenced. When we do the allocation it's too early 14284 * for this. 14285 */ 14286 for_each_crtc(dev, c) { 14287 obj = intel_fb_obj(c->primary->fb); 14288 if (obj == NULL) 14289 continue; 14290 14291 mutex_lock(&dev->struct_mutex); 14292 ret = intel_pin_and_fence_fb_obj(c->primary, 14293 c->primary->fb, 14294 c->primary->state, 14295 NULL); 14296 mutex_unlock(&dev->struct_mutex); 14297 if (ret) { 14298 DRM_ERROR("failed to pin boot fb on pipe %d\n", 14299 to_intel_crtc(c)->pipe); 14300 drm_framebuffer_unreference(c->primary->fb); 14301 c->primary->fb = NULL; 14302 update_state_fb(c->primary); 14303 } 14304 } 14305 14306 intel_backlight_register(dev); 14307 } 14308 14309 void intel_connector_unregister(struct intel_connector *intel_connector) 14310 { 14311 struct drm_connector *connector = &intel_connector->base; 14312 14313 intel_panel_destroy_backlight(connector); 14314 drm_connector_unregister(connector); 14315 } 14316 14317 void intel_modeset_cleanup(struct drm_device *dev) 14318 { 14319 struct drm_i915_private *dev_priv = dev->dev_private; 14320 struct drm_connector *connector; 14321 14322 intel_disable_gt_powersave(dev); 14323 14324 intel_backlight_unregister(dev); 14325 14326 /* 14327 * Interrupts and polling as the first thing to avoid creating havoc. 14328 * Too much stuff here (turning of connectors, ...) would 14329 * experience fancy races otherwise. 14330 */ 14331 intel_irq_uninstall(dev_priv); 14332 14333 /* 14334 * Due to the hpd irq storm handling the hotplug work can re-arm the 14335 * poll handlers. Hence disable polling after hpd handling is shut down. 14336 */ 14337 drm_kms_helper_poll_fini(dev); 14338 14339 mutex_lock(&dev->struct_mutex); 14340 14341 intel_unregister_dsm_handler(); 14342 14343 intel_fbc_disable(dev); 14344 14345 mutex_unlock(&dev->struct_mutex); 14346 14347 /* flush any delayed tasks or pending work */ 14348 flush_scheduled_work(); 14349 14350 /* destroy the backlight and sysfs files before encoders/connectors */ 14351 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 14352 struct intel_connector *intel_connector; 14353 14354 intel_connector = to_intel_connector(connector); 14355 intel_connector->unregister(intel_connector); 14356 } 14357 14358 drm_mode_config_cleanup(dev); 14359 14360 intel_cleanup_overlay(dev); 14361 14362 mutex_lock(&dev->struct_mutex); 14363 intel_cleanup_gt_powersave(dev); 14364 mutex_unlock(&dev->struct_mutex); 14365 } 14366 14367 /* 14368 * Return which encoder is currently attached for connector. 14369 */ 14370 struct drm_encoder *intel_best_encoder(struct drm_connector *connector) 14371 { 14372 return &intel_attached_encoder(connector)->base; 14373 } 14374 14375 void intel_connector_attach_encoder(struct intel_connector *connector, 14376 struct intel_encoder *encoder) 14377 { 14378 connector->encoder = encoder; 14379 drm_mode_connector_attach_encoder(&connector->base, 14380 &encoder->base); 14381 } 14382 14383 /* 14384 * set vga decode state - true == enable VGA decode 14385 */ 14386 int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 14387 { 14388 struct drm_i915_private *dev_priv = dev->dev_private; 14389 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 14390 u16 gmch_ctrl; 14391 14392 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { 14393 DRM_ERROR("failed to read control word\n"); 14394 return -EIO; 14395 } 14396 14397 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) 14398 return 0; 14399 14400 if (state) 14401 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; 14402 else 14403 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; 14404 14405 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { 14406 DRM_ERROR("failed to write control word\n"); 14407 return -EIO; 14408 } 14409 14410 return 0; 14411 } 14412 14413 #if 0 14414 struct intel_display_error_state { 14415 14416 u32 power_well_driver; 14417 14418 int num_transcoders; 14419 14420 struct intel_cursor_error_state { 14421 u32 control; 14422 u32 position; 14423 u32 base; 14424 u32 size; 14425 } cursor[I915_MAX_PIPES]; 14426 14427 struct intel_pipe_error_state { 14428 bool power_domain_on; 14429 u32 source; 14430 u32 stat; 14431 } pipe[I915_MAX_PIPES]; 14432 14433 struct intel_plane_error_state { 14434 u32 control; 14435 u32 stride; 14436 u32 size; 14437 u32 pos; 14438 u32 addr; 14439 u32 surface; 14440 u32 tile_offset; 14441 } plane[I915_MAX_PIPES]; 14442 14443 struct intel_transcoder_error_state { 14444 bool power_domain_on; 14445 enum transcoder cpu_transcoder; 14446 14447 u32 conf; 14448 14449 u32 htotal; 14450 u32 hblank; 14451 u32 hsync; 14452 u32 vtotal; 14453 u32 vblank; 14454 u32 vsync; 14455 } transcoder[4]; 14456 }; 14457 14458 struct intel_display_error_state * 14459 intel_display_capture_error_state(struct drm_device *dev) 14460 { 14461 struct drm_i915_private *dev_priv = dev->dev_private; 14462 struct intel_display_error_state *error; 14463 int transcoders[] = { 14464 TRANSCODER_A, 14465 TRANSCODER_B, 14466 TRANSCODER_C, 14467 TRANSCODER_EDP, 14468 }; 14469 int i; 14470 14471 if (INTEL_INFO(dev)->num_pipes == 0) 14472 return NULL; 14473 14474 error = kzalloc(sizeof(*error), GFP_ATOMIC); 14475 if (error == NULL) 14476 return NULL; 14477 14478 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 14479 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 14480 14481 for_each_pipe(dev_priv, i) { 14482 error->pipe[i].power_domain_on = 14483 __intel_display_power_is_enabled(dev_priv, 14484 POWER_DOMAIN_PIPE(i)); 14485 if (!error->pipe[i].power_domain_on) 14486 continue; 14487 14488 error->cursor[i].control = I915_READ(CURCNTR(i)); 14489 error->cursor[i].position = I915_READ(CURPOS(i)); 14490 error->cursor[i].base = I915_READ(CURBASE(i)); 14491 14492 error->plane[i].control = I915_READ(DSPCNTR(i)); 14493 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 14494 if (INTEL_INFO(dev)->gen <= 3) { 14495 error->plane[i].size = I915_READ(DSPSIZE(i)); 14496 error->plane[i].pos = I915_READ(DSPPOS(i)); 14497 } 14498 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 14499 error->plane[i].addr = I915_READ(DSPADDR(i)); 14500 if (INTEL_INFO(dev)->gen >= 4) { 14501 error->plane[i].surface = I915_READ(DSPSURF(i)); 14502 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 14503 } 14504 14505 error->pipe[i].source = I915_READ(PIPESRC(i)); 14506 14507 if (HAS_GMCH_DISPLAY(dev)) 14508 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 14509 } 14510 14511 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 14512 if (HAS_DDI(dev_priv->dev)) 14513 error->num_transcoders++; /* Account for eDP. */ 14514 14515 for (i = 0; i < error->num_transcoders; i++) { 14516 enum transcoder cpu_transcoder = transcoders[i]; 14517 14518 error->transcoder[i].power_domain_on = 14519 __intel_display_power_is_enabled(dev_priv, 14520 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 14521 if (!error->transcoder[i].power_domain_on) 14522 continue; 14523 14524 error->transcoder[i].cpu_transcoder = cpu_transcoder; 14525 14526 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); 14527 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); 14528 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); 14529 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); 14530 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); 14531 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); 14532 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); 14533 } 14534 14535 return error; 14536 } 14537 14538 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__) 14539 14540 void 14541 intel_display_print_error_state(struct drm_i915_error_state_buf *m, 14542 struct drm_device *dev, 14543 struct intel_display_error_state *error) 14544 { 14545 struct drm_i915_private *dev_priv = dev->dev_private; 14546 int i; 14547 14548 if (!error) 14549 return; 14550 14551 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); 14552 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 14553 err_printf(m, "PWR_WELL_CTL2: %08x\n", 14554 error->power_well_driver); 14555 for_each_pipe(dev_priv, i) { 14556 err_printf(m, "Pipe [%d]:\n", i); 14557 err_printf(m, " Power: %s\n", 14558 error->pipe[i].power_domain_on ? "on" : "off"); 14559 err_printf(m, " SRC: %08x\n", error->pipe[i].source); 14560 err_printf(m, " STAT: %08x\n", error->pipe[i].stat); 14561 14562 err_printf(m, "Plane [%d]:\n", i); 14563 err_printf(m, " CNTR: %08x\n", error->plane[i].control); 14564 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride); 14565 if (INTEL_INFO(dev)->gen <= 3) { 14566 err_printf(m, " SIZE: %08x\n", error->plane[i].size); 14567 err_printf(m, " POS: %08x\n", error->plane[i].pos); 14568 } 14569 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 14570 err_printf(m, " ADDR: %08x\n", error->plane[i].addr); 14571 if (INTEL_INFO(dev)->gen >= 4) { 14572 err_printf(m, " SURF: %08x\n", error->plane[i].surface); 14573 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); 14574 } 14575 14576 err_printf(m, "Cursor [%d]:\n", i); 14577 err_printf(m, " CNTR: %08x\n", error->cursor[i].control); 14578 err_printf(m, " POS: %08x\n", error->cursor[i].position); 14579 err_printf(m, " BASE: %08x\n", error->cursor[i].base); 14580 } 14581 14582 for (i = 0; i < error->num_transcoders; i++) { 14583 err_printf(m, "CPU transcoder: %c\n", 14584 transcoder_name(error->transcoder[i].cpu_transcoder)); 14585 err_printf(m, " Power: %s\n", 14586 error->transcoder[i].power_domain_on ? "on" : "off"); 14587 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); 14588 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); 14589 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); 14590 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); 14591 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); 14592 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); 14593 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); 14594 } 14595 } 14596 #endif 14597 14598 void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file) 14599 { 14600 struct intel_crtc *crtc; 14601 14602 for_each_intel_crtc(dev, crtc) { 14603 struct intel_unpin_work *work; 14604 14605 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 14606 14607 work = crtc->unpin_work; 14608 14609 if (work && work->event && 14610 work->event->base.file_priv == file) { 14611 kfree(work->event); 14612 work->event = NULL; 14613 } 14614 14615 lockmgr(&dev->event_lock, LK_RELEASE); 14616 } 14617 } 14618