1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "gt/intel_rps.h" 7 #include "i915_drv.h" 8 #include "i915_irq.h" 9 #include "i915_reg.h" 10 #include "icl_dsi_regs.h" 11 #include "intel_crtc.h" 12 #include "intel_de.h" 13 #include "intel_display_irq.h" 14 #include "intel_display_trace.h" 15 #include "intel_display_types.h" 16 #include "intel_dp_aux.h" 17 #include "intel_fdi_regs.h" 18 #include "intel_fifo_underrun.h" 19 #include "intel_gmbus.h" 20 #include "intel_hotplug_irq.h" 21 #include "intel_pmdemand.h" 22 #include "intel_psr.h" 23 #include "intel_psr_regs.h" 24 25 static void 26 intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) 27 { 28 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 29 30 drm_crtc_handle_vblank(&crtc->base); 31 } 32 33 /** 34 * ilk_update_display_irq - update DEIMR 35 * @dev_priv: driver private 36 * @interrupt_mask: mask of interrupt bits to update 37 * @enabled_irq_mask: mask of interrupt bits to enable 38 */ 39 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 40 u32 interrupt_mask, u32 enabled_irq_mask) 41 { 42 u32 new_val; 43 44 lockdep_assert_held(&dev_priv->irq_lock); 45 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 46 47 new_val = dev_priv->irq_mask; 48 new_val &= ~interrupt_mask; 49 new_val |= (~enabled_irq_mask & interrupt_mask); 50 51 if (new_val != dev_priv->irq_mask && 52 !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { 53 dev_priv->irq_mask = new_val; 54 intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); 55 intel_uncore_posting_read(&dev_priv->uncore, DEIMR); 56 } 57 } 58 59 void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) 60 { 61 ilk_update_display_irq(i915, bits, bits); 62 } 63 64 void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) 65 { 66 ilk_update_display_irq(i915, bits, 0); 67 } 68 69 /** 70 * bdw_update_port_irq - update DE port interrupt 71 * @dev_priv: driver private 72 * @interrupt_mask: mask of interrupt bits to update 73 * @enabled_irq_mask: mask of interrupt bits to enable 74 */ 75 void bdw_update_port_irq(struct drm_i915_private *dev_priv, 76 u32 interrupt_mask, u32 enabled_irq_mask) 77 { 78 u32 new_val; 79 u32 old_val; 80 81 lockdep_assert_held(&dev_priv->irq_lock); 82 83 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 84 85 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 86 return; 87 88 old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 89 90 new_val = old_val; 91 new_val &= ~interrupt_mask; 92 new_val |= (~enabled_irq_mask & interrupt_mask); 93 94 if (new_val != old_val) { 95 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); 96 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); 97 } 98 } 99 100 /** 101 * bdw_update_pipe_irq - update DE pipe interrupt 102 * @dev_priv: driver private 103 * @pipe: pipe whose interrupt to update 104 * @interrupt_mask: mask of interrupt bits to update 105 * @enabled_irq_mask: mask of interrupt bits to enable 106 */ 107 static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 108 enum pipe pipe, u32 interrupt_mask, 109 u32 enabled_irq_mask) 110 { 111 u32 new_val; 112 113 lockdep_assert_held(&dev_priv->irq_lock); 114 115 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 116 117 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 118 return; 119 120 new_val = dev_priv->de_irq_mask[pipe]; 121 new_val &= ~interrupt_mask; 122 new_val |= (~enabled_irq_mask & interrupt_mask); 123 124 if (new_val != dev_priv->de_irq_mask[pipe]) { 125 dev_priv->de_irq_mask[pipe] = new_val; 126 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 127 intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); 128 } 129 } 130 131 void bdw_enable_pipe_irq(struct drm_i915_private *i915, 132 enum pipe pipe, u32 bits) 133 { 134 bdw_update_pipe_irq(i915, pipe, bits, bits); 135 } 136 137 void bdw_disable_pipe_irq(struct drm_i915_private *i915, 138 enum pipe pipe, u32 bits) 139 { 140 bdw_update_pipe_irq(i915, pipe, bits, 0); 141 } 142 143 /** 144 * ibx_display_interrupt_update - update SDEIMR 145 * @dev_priv: driver private 146 * @interrupt_mask: mask of interrupt bits to update 147 * @enabled_irq_mask: mask of interrupt bits to enable 148 */ 149 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 150 u32 interrupt_mask, 151 u32 enabled_irq_mask) 152 { 153 u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); 154 155 sdeimr &= ~interrupt_mask; 156 sdeimr |= (~enabled_irq_mask & interrupt_mask); 157 158 drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); 159 160 lockdep_assert_held(&dev_priv->irq_lock); 161 162 if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) 163 return; 164 165 intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); 166 intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); 167 } 168 169 void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) 170 { 171 ibx_display_interrupt_update(i915, bits, bits); 172 } 173 174 void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) 175 { 176 ibx_display_interrupt_update(i915, bits, 0); 177 } 178 179 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 180 enum pipe pipe) 181 { 182 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 183 u32 enable_mask = status_mask << 16; 184 185 lockdep_assert_held(&dev_priv->irq_lock); 186 187 if (DISPLAY_VER(dev_priv) < 5) 188 goto out; 189 190 /* 191 * On pipe A we don't support the PSR interrupt yet, 192 * on pipe B and C the same bit MBZ. 193 */ 194 if (drm_WARN_ON_ONCE(&dev_priv->drm, 195 status_mask & PIPE_A_PSR_STATUS_VLV)) 196 return 0; 197 /* 198 * On pipe B and C we don't support the PSR interrupt yet, on pipe 199 * A the same bit is for perf counters which we don't use either. 200 */ 201 if (drm_WARN_ON_ONCE(&dev_priv->drm, 202 status_mask & PIPE_B_PSR_STATUS_VLV)) 203 return 0; 204 205 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 206 SPRITE0_FLIP_DONE_INT_EN_VLV | 207 SPRITE1_FLIP_DONE_INT_EN_VLV); 208 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 209 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 210 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 211 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 212 213 out: 214 drm_WARN_ONCE(&dev_priv->drm, 215 enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 216 status_mask & ~PIPESTAT_INT_STATUS_MASK, 217 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 218 pipe_name(pipe), enable_mask, status_mask); 219 220 return enable_mask; 221 } 222 223 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 224 enum pipe pipe, u32 status_mask) 225 { 226 i915_reg_t reg = PIPESTAT(pipe); 227 u32 enable_mask; 228 229 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 230 "pipe %c: status_mask=0x%x\n", 231 pipe_name(pipe), status_mask); 232 233 lockdep_assert_held(&dev_priv->irq_lock); 234 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 235 236 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 237 return; 238 239 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 240 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 241 242 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 243 intel_uncore_posting_read(&dev_priv->uncore, reg); 244 } 245 246 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 247 enum pipe pipe, u32 status_mask) 248 { 249 i915_reg_t reg = PIPESTAT(pipe); 250 u32 enable_mask; 251 252 drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, 253 "pipe %c: status_mask=0x%x\n", 254 pipe_name(pipe), status_mask); 255 256 lockdep_assert_held(&dev_priv->irq_lock); 257 drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); 258 259 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 260 return; 261 262 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 263 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 264 265 intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); 266 intel_uncore_posting_read(&dev_priv->uncore, reg); 267 } 268 269 static bool i915_has_asle(struct drm_i915_private *dev_priv) 270 { 271 if (!dev_priv->display.opregion.asle) 272 return false; 273 274 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); 275 } 276 277 /** 278 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 279 * @dev_priv: i915 device private 280 */ 281 void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 282 { 283 if (!i915_has_asle(dev_priv)) 284 return; 285 286 spin_lock_irq(&dev_priv->irq_lock); 287 288 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 289 if (DISPLAY_VER(dev_priv) >= 4) 290 i915_enable_pipestat(dev_priv, PIPE_A, 291 PIPE_LEGACY_BLC_EVENT_STATUS); 292 293 spin_unlock_irq(&dev_priv->irq_lock); 294 } 295 296 #if defined(CONFIG_DEBUG_FS) 297 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 298 enum pipe pipe, 299 u32 crc0, u32 crc1, 300 u32 crc2, u32 crc3, 301 u32 crc4) 302 { 303 struct intel_crtc *crtc = intel_crtc_for_pipe(dev_priv, pipe); 304 struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; 305 u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; 306 307 trace_intel_pipe_crc(crtc, crcs); 308 309 spin_lock(&pipe_crc->lock); 310 /* 311 * For some not yet identified reason, the first CRC is 312 * bonkers. So let's just wait for the next vblank and read 313 * out the buggy result. 314 * 315 * On GEN8+ sometimes the second CRC is bonkers as well, so 316 * don't trust that one either. 317 */ 318 if (pipe_crc->skipped <= 0 || 319 (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 320 pipe_crc->skipped++; 321 spin_unlock(&pipe_crc->lock); 322 return; 323 } 324 spin_unlock(&pipe_crc->lock); 325 326 drm_crtc_add_crc_entry(&crtc->base, true, 327 drm_crtc_accurate_vblank_count(&crtc->base), 328 crcs); 329 } 330 #else 331 static inline void 332 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 333 enum pipe pipe, 334 u32 crc0, u32 crc1, 335 u32 crc2, u32 crc3, 336 u32 crc4) {} 337 #endif 338 339 static void flip_done_handler(struct drm_i915_private *i915, 340 enum pipe pipe) 341 { 342 struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe); 343 struct drm_crtc_state *crtc_state = crtc->base.state; 344 struct drm_pending_vblank_event *e = crtc_state->event; 345 struct drm_device *dev = &i915->drm; 346 unsigned long irqflags; 347 348 spin_lock_irqsave(&dev->event_lock, irqflags); 349 350 crtc_state->event = NULL; 351 352 drm_crtc_send_vblank_event(&crtc->base, e); 353 354 spin_unlock_irqrestore(&dev->event_lock, irqflags); 355 } 356 357 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 358 enum pipe pipe) 359 { 360 display_pipe_crc_irq_handler(dev_priv, pipe, 361 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 362 0, 0, 0, 0); 363 } 364 365 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 366 enum pipe pipe) 367 { 368 display_pipe_crc_irq_handler(dev_priv, pipe, 369 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 370 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), 371 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), 372 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), 373 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); 374 } 375 376 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 377 enum pipe pipe) 378 { 379 u32 res1, res2; 380 381 if (DISPLAY_VER(dev_priv) >= 3) 382 res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); 383 else 384 res1 = 0; 385 386 if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) 387 res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); 388 else 389 res2 = 0; 390 391 display_pipe_crc_irq_handler(dev_priv, pipe, 392 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), 393 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), 394 intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), 395 res1, res2); 396 } 397 398 void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 399 { 400 enum pipe pipe; 401 402 for_each_pipe(dev_priv, pipe) { 403 intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), 404 PIPESTAT_INT_STATUS_MASK | 405 PIPE_FIFO_UNDERRUN_STATUS); 406 407 dev_priv->pipestat_irq_mask[pipe] = 0; 408 } 409 } 410 411 void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 412 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 413 { 414 enum pipe pipe; 415 416 spin_lock(&dev_priv->irq_lock); 417 418 if (!dev_priv->display_irqs_enabled) { 419 spin_unlock(&dev_priv->irq_lock); 420 return; 421 } 422 423 for_each_pipe(dev_priv, pipe) { 424 i915_reg_t reg; 425 u32 status_mask, enable_mask, iir_bit = 0; 426 427 /* 428 * PIPESTAT bits get signalled even when the interrupt is 429 * disabled with the mask bits, and some of the status bits do 430 * not generate interrupts at all (like the underrun bit). Hence 431 * we need to be careful that we only handle what we want to 432 * handle. 433 */ 434 435 /* fifo underruns are filterered in the underrun handler. */ 436 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 437 438 switch (pipe) { 439 default: 440 case PIPE_A: 441 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 442 break; 443 case PIPE_B: 444 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 445 break; 446 case PIPE_C: 447 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 448 break; 449 } 450 if (iir & iir_bit) 451 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 452 453 if (!status_mask) 454 continue; 455 456 reg = PIPESTAT(pipe); 457 pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; 458 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 459 460 /* 461 * Clear the PIPE*STAT regs before the IIR 462 * 463 * Toggle the enable bits to make sure we get an 464 * edge in the ISR pipe event bit if we don't clear 465 * all the enabled status bits. Otherwise the edge 466 * triggered IIR on i965/g4x wouldn't notice that 467 * an interrupt is still pending. 468 */ 469 if (pipe_stats[pipe]) { 470 intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); 471 intel_uncore_write(&dev_priv->uncore, reg, enable_mask); 472 } 473 } 474 spin_unlock(&dev_priv->irq_lock); 475 } 476 477 void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 478 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 479 { 480 enum pipe pipe; 481 482 for_each_pipe(dev_priv, pipe) { 483 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 484 intel_handle_vblank(dev_priv, pipe); 485 486 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 487 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 488 489 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 490 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 491 } 492 } 493 494 void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 495 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 496 { 497 bool blc_event = false; 498 enum pipe pipe; 499 500 for_each_pipe(dev_priv, pipe) { 501 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 502 intel_handle_vblank(dev_priv, pipe); 503 504 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 505 blc_event = true; 506 507 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 508 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 509 510 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 511 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 512 } 513 514 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 515 intel_opregion_asle_intr(dev_priv); 516 } 517 518 void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 519 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 520 { 521 bool blc_event = false; 522 enum pipe pipe; 523 524 for_each_pipe(dev_priv, pipe) { 525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 526 intel_handle_vblank(dev_priv, pipe); 527 528 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 529 blc_event = true; 530 531 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 532 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 533 534 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 535 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 536 } 537 538 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 539 intel_opregion_asle_intr(dev_priv); 540 541 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 542 intel_gmbus_irq_handler(dev_priv); 543 } 544 545 void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 546 u32 pipe_stats[I915_MAX_PIPES]) 547 { 548 enum pipe pipe; 549 550 for_each_pipe(dev_priv, pipe) { 551 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 552 intel_handle_vblank(dev_priv, pipe); 553 554 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) 555 flip_done_handler(dev_priv, pipe); 556 557 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 558 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 559 560 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 561 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 562 } 563 564 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 565 intel_gmbus_irq_handler(dev_priv); 566 } 567 568 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 569 { 570 enum pipe pipe; 571 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 572 573 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 574 575 if (pch_iir & SDE_AUDIO_POWER_MASK) { 576 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 577 SDE_AUDIO_POWER_SHIFT); 578 drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", 579 port_name(port)); 580 } 581 582 if (pch_iir & SDE_AUX_MASK) 583 intel_dp_aux_irq_handler(dev_priv); 584 585 if (pch_iir & SDE_GMBUS) 586 intel_gmbus_irq_handler(dev_priv); 587 588 if (pch_iir & SDE_AUDIO_HDCP_MASK) 589 drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); 590 591 if (pch_iir & SDE_AUDIO_TRANS_MASK) 592 drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); 593 594 if (pch_iir & SDE_POISON) 595 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 596 597 if (pch_iir & SDE_FDI_MASK) { 598 for_each_pipe(dev_priv, pipe) 599 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 600 pipe_name(pipe), 601 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 602 } 603 604 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 605 drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); 606 607 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 608 drm_dbg(&dev_priv->drm, 609 "PCH transcoder CRC error interrupt\n"); 610 611 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 612 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 613 614 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 615 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 616 } 617 618 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 619 { 620 u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); 621 enum pipe pipe; 622 623 if (err_int & ERR_INT_POISON) 624 drm_err(&dev_priv->drm, "Poison interrupt\n"); 625 626 for_each_pipe(dev_priv, pipe) { 627 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 628 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 629 630 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 631 if (IS_IVYBRIDGE(dev_priv)) 632 ivb_pipe_crc_irq_handler(dev_priv, pipe); 633 else 634 hsw_pipe_crc_irq_handler(dev_priv, pipe); 635 } 636 } 637 638 intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); 639 } 640 641 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 642 { 643 u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); 644 enum pipe pipe; 645 646 if (serr_int & SERR_INT_POISON) 647 drm_err(&dev_priv->drm, "PCH poison interrupt\n"); 648 649 for_each_pipe(dev_priv, pipe) 650 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 651 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 652 653 intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); 654 } 655 656 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 657 { 658 enum pipe pipe; 659 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 660 661 ibx_hpd_irq_handler(dev_priv, hotplug_trigger); 662 663 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 664 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 665 SDE_AUDIO_POWER_SHIFT_CPT); 666 drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", 667 port_name(port)); 668 } 669 670 if (pch_iir & SDE_AUX_MASK_CPT) 671 intel_dp_aux_irq_handler(dev_priv); 672 673 if (pch_iir & SDE_GMBUS_CPT) 674 intel_gmbus_irq_handler(dev_priv); 675 676 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 677 drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); 678 679 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 680 drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); 681 682 if (pch_iir & SDE_FDI_MASK_CPT) { 683 for_each_pipe(dev_priv, pipe) 684 drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", 685 pipe_name(pipe), 686 intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); 687 } 688 689 if (pch_iir & SDE_ERROR_CPT) 690 cpt_serr_int_handler(dev_priv); 691 } 692 693 void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 694 { 695 enum pipe pipe; 696 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 697 698 if (hotplug_trigger) 699 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 700 701 if (de_iir & DE_AUX_CHANNEL_A) 702 intel_dp_aux_irq_handler(dev_priv); 703 704 if (de_iir & DE_GSE) 705 intel_opregion_asle_intr(dev_priv); 706 707 if (de_iir & DE_POISON) 708 drm_err(&dev_priv->drm, "Poison interrupt\n"); 709 710 for_each_pipe(dev_priv, pipe) { 711 if (de_iir & DE_PIPE_VBLANK(pipe)) 712 intel_handle_vblank(dev_priv, pipe); 713 714 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) 715 flip_done_handler(dev_priv, pipe); 716 717 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 718 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 719 720 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 721 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 722 } 723 724 /* check event from PCH */ 725 if (de_iir & DE_PCH_EVENT) { 726 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 727 728 if (HAS_PCH_CPT(dev_priv)) 729 cpt_irq_handler(dev_priv, pch_iir); 730 else 731 ibx_irq_handler(dev_priv, pch_iir); 732 733 /* should clear PCH hotplug event before clear CPU irq */ 734 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 735 } 736 737 if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) 738 gen5_rps_irq_handler(&to_gt(dev_priv)->rps); 739 } 740 741 void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) 742 { 743 enum pipe pipe; 744 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 745 746 if (hotplug_trigger) 747 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 748 749 if (de_iir & DE_ERR_INT_IVB) 750 ivb_err_int_handler(dev_priv); 751 752 if (de_iir & DE_EDP_PSR_INT_HSW) { 753 struct intel_encoder *encoder; 754 755 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 756 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 757 u32 psr_iir; 758 759 psr_iir = intel_uncore_rmw(&dev_priv->uncore, 760 EDP_PSR_IIR, 0, 0); 761 intel_psr_irq_handler(intel_dp, psr_iir); 762 break; 763 } 764 } 765 766 if (de_iir & DE_AUX_CHANNEL_A_IVB) 767 intel_dp_aux_irq_handler(dev_priv); 768 769 if (de_iir & DE_GSE_IVB) 770 intel_opregion_asle_intr(dev_priv); 771 772 for_each_pipe(dev_priv, pipe) { 773 if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) 774 intel_handle_vblank(dev_priv, pipe); 775 776 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) 777 flip_done_handler(dev_priv, pipe); 778 } 779 780 /* check event from PCH */ 781 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 782 u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); 783 784 cpt_irq_handler(dev_priv, pch_iir); 785 786 /* clear PCH hotplug event before clear CPU irq */ 787 intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); 788 } 789 } 790 791 static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) 792 { 793 u32 mask; 794 795 if (DISPLAY_VER(dev_priv) >= 20) 796 return 0; 797 else if (DISPLAY_VER(dev_priv) >= 14) 798 return TGL_DE_PORT_AUX_DDIA | 799 TGL_DE_PORT_AUX_DDIB; 800 else if (DISPLAY_VER(dev_priv) >= 13) 801 return TGL_DE_PORT_AUX_DDIA | 802 TGL_DE_PORT_AUX_DDIB | 803 TGL_DE_PORT_AUX_DDIC | 804 XELPD_DE_PORT_AUX_DDID | 805 XELPD_DE_PORT_AUX_DDIE | 806 TGL_DE_PORT_AUX_USBC1 | 807 TGL_DE_PORT_AUX_USBC2 | 808 TGL_DE_PORT_AUX_USBC3 | 809 TGL_DE_PORT_AUX_USBC4; 810 else if (DISPLAY_VER(dev_priv) >= 12) 811 return TGL_DE_PORT_AUX_DDIA | 812 TGL_DE_PORT_AUX_DDIB | 813 TGL_DE_PORT_AUX_DDIC | 814 TGL_DE_PORT_AUX_USBC1 | 815 TGL_DE_PORT_AUX_USBC2 | 816 TGL_DE_PORT_AUX_USBC3 | 817 TGL_DE_PORT_AUX_USBC4 | 818 TGL_DE_PORT_AUX_USBC5 | 819 TGL_DE_PORT_AUX_USBC6; 820 821 mask = GEN8_AUX_CHANNEL_A; 822 if (DISPLAY_VER(dev_priv) >= 9) 823 mask |= GEN9_AUX_CHANNEL_B | 824 GEN9_AUX_CHANNEL_C | 825 GEN9_AUX_CHANNEL_D; 826 827 if (DISPLAY_VER(dev_priv) == 11) { 828 mask |= ICL_AUX_CHANNEL_F; 829 mask |= ICL_AUX_CHANNEL_E; 830 } 831 832 return mask; 833 } 834 835 static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) 836 { 837 if (DISPLAY_VER(dev_priv) >= 13 || HAS_D12_PLANE_MINIMIZATION(dev_priv)) 838 return RKL_DE_PIPE_IRQ_FAULT_ERRORS; 839 else if (DISPLAY_VER(dev_priv) >= 11) 840 return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; 841 else if (DISPLAY_VER(dev_priv) >= 9) 842 return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 843 else 844 return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 845 } 846 847 static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) 848 { 849 wake_up_all(&dev_priv->display.pmdemand.waitqueue); 850 } 851 852 static void 853 gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) 854 { 855 bool found = false; 856 857 if (DISPLAY_VER(dev_priv) >= 14) { 858 if (iir & (XELPDP_PMDEMAND_RSP | 859 XELPDP_PMDEMAND_RSPTOUT_ERR)) { 860 if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) 861 drm_dbg(&dev_priv->drm, 862 "Error waiting for Punit PM Demand Response\n"); 863 864 intel_pmdemand_irq_handler(dev_priv); 865 found = true; 866 } 867 } else if (iir & GEN8_DE_MISC_GSE) { 868 intel_opregion_asle_intr(dev_priv); 869 found = true; 870 } 871 872 if (iir & GEN8_DE_EDP_PSR) { 873 struct intel_encoder *encoder; 874 u32 psr_iir; 875 i915_reg_t iir_reg; 876 877 for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { 878 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 879 880 if (DISPLAY_VER(dev_priv) >= 12) 881 iir_reg = TRANS_PSR_IIR(intel_dp->psr.transcoder); 882 else 883 iir_reg = EDP_PSR_IIR; 884 885 psr_iir = intel_uncore_rmw(&dev_priv->uncore, iir_reg, 0, 0); 886 887 if (psr_iir) 888 found = true; 889 890 intel_psr_irq_handler(intel_dp, psr_iir); 891 892 /* prior GEN12 only have one EDP PSR */ 893 if (DISPLAY_VER(dev_priv) < 12) 894 break; 895 } 896 } 897 898 if (!found) 899 drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n"); 900 } 901 902 static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, 903 u32 te_trigger) 904 { 905 enum pipe pipe = INVALID_PIPE; 906 enum transcoder dsi_trans; 907 enum port port; 908 u32 val; 909 910 /* 911 * Incase of dual link, TE comes from DSI_1 912 * this is to check if dual link is enabled 913 */ 914 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); 915 val &= PORT_SYNC_MODE_ENABLE; 916 917 /* 918 * if dual link is enabled, then read DSI_0 919 * transcoder registers 920 */ 921 port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ? 922 PORT_A : PORT_B; 923 dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; 924 925 /* Check if DSI configured in command mode */ 926 val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); 927 val = val & OP_MODE_MASK; 928 929 if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { 930 drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); 931 return; 932 } 933 934 /* Get PIPE for handling VBLANK event */ 935 val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); 936 switch (val & TRANS_DDI_EDP_INPUT_MASK) { 937 case TRANS_DDI_EDP_INPUT_A_ON: 938 pipe = PIPE_A; 939 break; 940 case TRANS_DDI_EDP_INPUT_B_ONOFF: 941 pipe = PIPE_B; 942 break; 943 case TRANS_DDI_EDP_INPUT_C_ONOFF: 944 pipe = PIPE_C; 945 break; 946 default: 947 drm_err(&dev_priv->drm, "Invalid PIPE\n"); 948 return; 949 } 950 951 intel_handle_vblank(dev_priv, pipe); 952 953 /* clear TE in dsi IIR */ 954 port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; 955 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 956 } 957 958 static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) 959 { 960 if (DISPLAY_VER(i915) >= 9) 961 return GEN9_PIPE_PLANE1_FLIP_DONE; 962 else 963 return GEN8_PIPE_PRIMARY_FLIP_DONE; 964 } 965 966 u32 gen8_de_pipe_underrun_mask(struct drm_i915_private *dev_priv) 967 { 968 u32 mask = GEN8_PIPE_FIFO_UNDERRUN; 969 970 if (DISPLAY_VER(dev_priv) >= 13) 971 mask |= XELPD_PIPE_SOFT_UNDERRUN | 972 XELPD_PIPE_HARD_UNDERRUN; 973 974 return mask; 975 } 976 977 static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) 978 { 979 u32 pica_ier = 0; 980 981 *pica_iir = 0; 982 *pch_iir = intel_de_read(i915, SDEIIR); 983 if (!*pch_iir) 984 return; 985 986 /** 987 * PICA IER must be disabled/re-enabled around clearing PICA IIR and 988 * SDEIIR, to avoid losing PICA IRQs and to ensure that such IRQs set 989 * their flags both in the PICA and SDE IIR. 990 */ 991 if (*pch_iir & SDE_PICAINTERRUPT) { 992 drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTP); 993 994 pica_ier = intel_de_rmw(i915, PICAINTERRUPT_IER, ~0, 0); 995 *pica_iir = intel_de_read(i915, PICAINTERRUPT_IIR); 996 intel_de_write(i915, PICAINTERRUPT_IIR, *pica_iir); 997 } 998 999 intel_de_write(i915, SDEIIR, *pch_iir); 1000 1001 if (pica_ier) 1002 intel_de_write(i915, PICAINTERRUPT_IER, pica_ier); 1003 } 1004 1005 void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 1006 { 1007 u32 iir; 1008 enum pipe pipe; 1009 1010 drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); 1011 1012 if (master_ctl & GEN8_DE_MISC_IRQ) { 1013 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); 1014 if (iir) { 1015 intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); 1016 gen8_de_misc_irq_handler(dev_priv, iir); 1017 } else { 1018 drm_err_ratelimited(&dev_priv->drm, 1019 "The master control interrupt lied (DE MISC)!\n"); 1020 } 1021 } 1022 1023 if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { 1024 iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); 1025 if (iir) { 1026 intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); 1027 gen11_hpd_irq_handler(dev_priv, iir); 1028 } else { 1029 drm_err_ratelimited(&dev_priv->drm, 1030 "The master control interrupt lied, (DE HPD)!\n"); 1031 } 1032 } 1033 1034 if (master_ctl & GEN8_DE_PORT_IRQ) { 1035 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); 1036 if (iir) { 1037 bool found = false; 1038 1039 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); 1040 1041 if (iir & gen8_de_port_aux_mask(dev_priv)) { 1042 intel_dp_aux_irq_handler(dev_priv); 1043 found = true; 1044 } 1045 1046 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { 1047 u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; 1048 1049 if (hotplug_trigger) { 1050 bxt_hpd_irq_handler(dev_priv, hotplug_trigger); 1051 found = true; 1052 } 1053 } else if (IS_BROADWELL(dev_priv)) { 1054 u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; 1055 1056 if (hotplug_trigger) { 1057 ilk_hpd_irq_handler(dev_priv, hotplug_trigger); 1058 found = true; 1059 } 1060 } 1061 1062 if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && 1063 (iir & BXT_DE_PORT_GMBUS)) { 1064 intel_gmbus_irq_handler(dev_priv); 1065 found = true; 1066 } 1067 1068 if (DISPLAY_VER(dev_priv) >= 11) { 1069 u32 te_trigger = iir & (DSI0_TE | DSI1_TE); 1070 1071 if (te_trigger) { 1072 gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); 1073 found = true; 1074 } 1075 } 1076 1077 if (!found) 1078 drm_err_ratelimited(&dev_priv->drm, 1079 "Unexpected DE Port interrupt\n"); 1080 } else { 1081 drm_err_ratelimited(&dev_priv->drm, 1082 "The master control interrupt lied (DE PORT)!\n"); 1083 } 1084 } 1085 1086 for_each_pipe(dev_priv, pipe) { 1087 u32 fault_errors; 1088 1089 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1090 continue; 1091 1092 iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); 1093 if (!iir) { 1094 drm_err_ratelimited(&dev_priv->drm, 1095 "The master control interrupt lied (DE PIPE)!\n"); 1096 continue; 1097 } 1098 1099 intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); 1100 1101 if (iir & GEN8_PIPE_VBLANK) 1102 intel_handle_vblank(dev_priv, pipe); 1103 1104 if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) 1105 flip_done_handler(dev_priv, pipe); 1106 1107 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 1108 hsw_pipe_crc_irq_handler(dev_priv, pipe); 1109 1110 if (iir & gen8_de_pipe_underrun_mask(dev_priv)) 1111 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1112 1113 fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); 1114 if (fault_errors) 1115 drm_err_ratelimited(&dev_priv->drm, 1116 "Fault errors on pipe %c: 0x%08x\n", 1117 pipe_name(pipe), 1118 fault_errors); 1119 } 1120 1121 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 1122 master_ctl & GEN8_DE_PCH_IRQ) { 1123 u32 pica_iir; 1124 1125 /* 1126 * FIXME(BDW): Assume for now that the new interrupt handling 1127 * scheme also closed the SDE interrupt handling race we've seen 1128 * on older pch-split platforms. But this needs testing. 1129 */ 1130 gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); 1131 if (iir) { 1132 if (pica_iir) 1133 xelpdp_pica_irq_handler(dev_priv, pica_iir); 1134 1135 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1136 icp_irq_handler(dev_priv, iir); 1137 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) 1138 spt_irq_handler(dev_priv, iir); 1139 else 1140 cpt_irq_handler(dev_priv, iir); 1141 } else { 1142 /* 1143 * Like on previous PCH there seems to be something 1144 * fishy going on with forwarding PCH interrupts. 1145 */ 1146 drm_dbg(&dev_priv->drm, 1147 "The master control interrupt lied (SDE)!\n"); 1148 } 1149 } 1150 } 1151 1152 u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) 1153 { 1154 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1155 u32 iir; 1156 1157 if (!(master_ctl & GEN11_GU_MISC_IRQ)) 1158 return 0; 1159 1160 iir = raw_reg_read(regs, GEN11_GU_MISC_IIR); 1161 if (likely(iir)) 1162 raw_reg_write(regs, GEN11_GU_MISC_IIR, iir); 1163 1164 return iir; 1165 } 1166 1167 void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) 1168 { 1169 if (iir & GEN11_GU_MISC_GSE) 1170 intel_opregion_asle_intr(i915); 1171 } 1172 1173 void gen11_display_irq_handler(struct drm_i915_private *i915) 1174 { 1175 void __iomem * const regs = intel_uncore_regs(&i915->uncore); 1176 const u32 disp_ctl = raw_reg_read(regs, GEN11_DISPLAY_INT_CTL); 1177 1178 disable_rpm_wakeref_asserts(&i915->runtime_pm); 1179 /* 1180 * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ 1181 * for the display related bits. 1182 */ 1183 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 0x0); 1184 gen8_de_irq_handler(i915, disp_ctl); 1185 raw_reg_write(regs, GEN11_DISPLAY_INT_CTL, 1186 GEN11_DISPLAY_IRQ_ENABLE); 1187 1188 enable_rpm_wakeref_asserts(&i915->runtime_pm); 1189 } 1190 1191 /* Called from drm generic code, passed 'crtc' which 1192 * we use as a pipe index 1193 */ 1194 int i8xx_enable_vblank(struct drm_crtc *crtc) 1195 { 1196 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1197 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1198 unsigned long irqflags; 1199 1200 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1201 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1202 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1203 1204 return 0; 1205 } 1206 1207 int i915gm_enable_vblank(struct drm_crtc *crtc) 1208 { 1209 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1210 1211 /* 1212 * Vblank interrupts fail to wake the device up from C2+. 1213 * Disabling render clock gating during C-states avoids 1214 * the problem. There is a small power cost so we do this 1215 * only when vblank interrupts are actually enabled. 1216 */ 1217 if (dev_priv->vblank_enabled++ == 0) 1218 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1219 1220 return i8xx_enable_vblank(crtc); 1221 } 1222 1223 int i965_enable_vblank(struct drm_crtc *crtc) 1224 { 1225 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1226 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1227 unsigned long irqflags; 1228 1229 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1230 i915_enable_pipestat(dev_priv, pipe, 1231 PIPE_START_VBLANK_INTERRUPT_STATUS); 1232 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1233 1234 return 0; 1235 } 1236 1237 int ilk_enable_vblank(struct drm_crtc *crtc) 1238 { 1239 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1240 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1241 unsigned long irqflags; 1242 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1243 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1244 1245 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1246 ilk_enable_display_irq(dev_priv, bit); 1247 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1248 1249 /* Even though there is no DMC, frame counter can get stuck when 1250 * PSR is active as no frames are generated. 1251 */ 1252 if (HAS_PSR(dev_priv)) 1253 drm_crtc_vblank_restore(crtc); 1254 1255 return 0; 1256 } 1257 1258 static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, 1259 bool enable) 1260 { 1261 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 1262 enum port port; 1263 1264 if (!(intel_crtc->mode_flags & 1265 (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0))) 1266 return false; 1267 1268 /* for dual link cases we consider TE from slave */ 1269 if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1) 1270 port = PORT_B; 1271 else 1272 port = PORT_A; 1273 1274 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_MASK_REG(port), DSI_TE_EVENT, 1275 enable ? 0 : DSI_TE_EVENT); 1276 1277 intel_uncore_rmw(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), 0, 0); 1278 1279 return true; 1280 } 1281 1282 int bdw_enable_vblank(struct drm_crtc *_crtc) 1283 { 1284 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1285 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1286 enum pipe pipe = crtc->pipe; 1287 unsigned long irqflags; 1288 1289 if (gen11_dsi_configure_te(crtc, true)) 1290 return 0; 1291 1292 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1293 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1294 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1295 1296 /* Even if there is no DMC, frame counter can get stuck when 1297 * PSR is active as no frames are generated, so check only for PSR. 1298 */ 1299 if (HAS_PSR(dev_priv)) 1300 drm_crtc_vblank_restore(&crtc->base); 1301 1302 return 0; 1303 } 1304 1305 /* Called from drm generic code, passed 'crtc' which 1306 * we use as a pipe index 1307 */ 1308 void i8xx_disable_vblank(struct drm_crtc *crtc) 1309 { 1310 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1311 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1312 unsigned long irqflags; 1313 1314 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1315 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 1316 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1317 } 1318 1319 void i915gm_disable_vblank(struct drm_crtc *crtc) 1320 { 1321 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1322 1323 i8xx_disable_vblank(crtc); 1324 1325 if (--dev_priv->vblank_enabled == 0) 1326 intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); 1327 } 1328 1329 void i965_disable_vblank(struct drm_crtc *crtc) 1330 { 1331 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1332 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1333 unsigned long irqflags; 1334 1335 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1336 i915_disable_pipestat(dev_priv, pipe, 1337 PIPE_START_VBLANK_INTERRUPT_STATUS); 1338 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1339 } 1340 1341 void ilk_disable_vblank(struct drm_crtc *crtc) 1342 { 1343 struct drm_i915_private *dev_priv = to_i915(crtc->dev); 1344 enum pipe pipe = to_intel_crtc(crtc)->pipe; 1345 unsigned long irqflags; 1346 u32 bit = DISPLAY_VER(dev_priv) >= 7 ? 1347 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 1348 1349 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1350 ilk_disable_display_irq(dev_priv, bit); 1351 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1352 } 1353 1354 void bdw_disable_vblank(struct drm_crtc *_crtc) 1355 { 1356 struct intel_crtc *crtc = to_intel_crtc(_crtc); 1357 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1358 enum pipe pipe = crtc->pipe; 1359 unsigned long irqflags; 1360 1361 if (gen11_dsi_configure_te(crtc, false)) 1362 return; 1363 1364 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1365 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 1366 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1367 } 1368 1369 void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 1370 { 1371 struct intel_uncore *uncore = &dev_priv->uncore; 1372 1373 if (IS_CHERRYVIEW(dev_priv)) 1374 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 1375 else 1376 intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); 1377 1378 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 1379 intel_uncore_rmw(uncore, PORT_HOTPLUG_STAT, 0, 0); 1380 1381 i9xx_pipestat_irq_reset(dev_priv); 1382 1383 GEN3_IRQ_RESET(uncore, VLV_); 1384 dev_priv->irq_mask = ~0u; 1385 } 1386 1387 void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 1388 { 1389 struct intel_uncore *uncore = &dev_priv->uncore; 1390 1391 u32 pipestat_mask; 1392 u32 enable_mask; 1393 enum pipe pipe; 1394 1395 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 1396 1397 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 1398 for_each_pipe(dev_priv, pipe) 1399 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 1400 1401 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 1402 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 1403 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 1404 I915_LPE_PIPE_A_INTERRUPT | 1405 I915_LPE_PIPE_B_INTERRUPT; 1406 1407 if (IS_CHERRYVIEW(dev_priv)) 1408 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 1409 I915_LPE_PIPE_C_INTERRUPT; 1410 1411 drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); 1412 1413 dev_priv->irq_mask = ~enable_mask; 1414 1415 GEN3_IRQ_INIT(uncore, VLV_, dev_priv->irq_mask, enable_mask); 1416 } 1417 1418 void gen8_display_irq_reset(struct drm_i915_private *dev_priv) 1419 { 1420 struct intel_uncore *uncore = &dev_priv->uncore; 1421 enum pipe pipe; 1422 1423 if (!HAS_DISPLAY(dev_priv)) 1424 return; 1425 1426 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1427 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1428 1429 for_each_pipe(dev_priv, pipe) 1430 if (intel_display_power_is_enabled(dev_priv, 1431 POWER_DOMAIN_PIPE(pipe))) 1432 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1433 1434 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1435 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1436 } 1437 1438 void gen11_display_irq_reset(struct drm_i915_private *dev_priv) 1439 { 1440 struct intel_uncore *uncore = &dev_priv->uncore; 1441 enum pipe pipe; 1442 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1443 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1444 1445 if (!HAS_DISPLAY(dev_priv)) 1446 return; 1447 1448 intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); 1449 1450 if (DISPLAY_VER(dev_priv) >= 12) { 1451 enum transcoder trans; 1452 1453 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1454 enum intel_display_power_domain domain; 1455 1456 domain = POWER_DOMAIN_TRANSCODER(trans); 1457 if (!intel_display_power_is_enabled(dev_priv, domain)) 1458 continue; 1459 1460 intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); 1461 intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); 1462 } 1463 } else { 1464 intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); 1465 intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); 1466 } 1467 1468 for_each_pipe(dev_priv, pipe) 1469 if (intel_display_power_is_enabled(dev_priv, 1470 POWER_DOMAIN_PIPE(pipe))) 1471 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1472 1473 GEN3_IRQ_RESET(uncore, GEN8_DE_PORT_); 1474 GEN3_IRQ_RESET(uncore, GEN8_DE_MISC_); 1475 1476 if (DISPLAY_VER(dev_priv) >= 14) 1477 GEN3_IRQ_RESET(uncore, PICAINTERRUPT_); 1478 else 1479 GEN3_IRQ_RESET(uncore, GEN11_DE_HPD_); 1480 1481 if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1482 GEN3_IRQ_RESET(uncore, SDE); 1483 } 1484 1485 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 1486 u8 pipe_mask) 1487 { 1488 struct intel_uncore *uncore = &dev_priv->uncore; 1489 u32 extra_ier = GEN8_PIPE_VBLANK | 1490 gen8_de_pipe_underrun_mask(dev_priv) | 1491 gen8_de_pipe_flip_done_mask(dev_priv); 1492 enum pipe pipe; 1493 1494 spin_lock_irq(&dev_priv->irq_lock); 1495 1496 if (!intel_irqs_enabled(dev_priv)) { 1497 spin_unlock_irq(&dev_priv->irq_lock); 1498 return; 1499 } 1500 1501 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1502 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1503 dev_priv->de_irq_mask[pipe], 1504 ~dev_priv->de_irq_mask[pipe] | extra_ier); 1505 1506 spin_unlock_irq(&dev_priv->irq_lock); 1507 } 1508 1509 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 1510 u8 pipe_mask) 1511 { 1512 struct intel_uncore *uncore = &dev_priv->uncore; 1513 enum pipe pipe; 1514 1515 spin_lock_irq(&dev_priv->irq_lock); 1516 1517 if (!intel_irqs_enabled(dev_priv)) { 1518 spin_unlock_irq(&dev_priv->irq_lock); 1519 return; 1520 } 1521 1522 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 1523 GEN8_IRQ_RESET_NDX(uncore, DE_PIPE, pipe); 1524 1525 spin_unlock_irq(&dev_priv->irq_lock); 1526 1527 /* make sure we're done processing display irqs */ 1528 intel_synchronize_irq(dev_priv); 1529 } 1530 1531 /* 1532 * SDEIER is also touched by the interrupt handler to work around missed PCH 1533 * interrupts. Hence we can't update it after the interrupt handler is enabled - 1534 * instead we unconditionally enable all PCH interrupt sources here, but then 1535 * only unmask them as needed with SDEIMR. 1536 * 1537 * Note that we currently do this after installing the interrupt handler, 1538 * but before we enable the master interrupt. That should be sufficient 1539 * to avoid races with the irq handler, assuming we have MSI. Shared legacy 1540 * interrupts could still race. 1541 */ 1542 static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) 1543 { 1544 struct intel_uncore *uncore = &dev_priv->uncore; 1545 u32 mask; 1546 1547 if (HAS_PCH_NOP(dev_priv)) 1548 return; 1549 1550 if (HAS_PCH_IBX(dev_priv)) 1551 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 1552 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 1553 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 1554 else 1555 mask = SDE_GMBUS_CPT; 1556 1557 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1558 } 1559 1560 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 1561 { 1562 lockdep_assert_held(&dev_priv->irq_lock); 1563 1564 if (dev_priv->display_irqs_enabled) 1565 return; 1566 1567 dev_priv->display_irqs_enabled = true; 1568 1569 if (intel_irqs_enabled(dev_priv)) { 1570 vlv_display_irq_reset(dev_priv); 1571 vlv_display_irq_postinstall(dev_priv); 1572 } 1573 } 1574 1575 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 1576 { 1577 lockdep_assert_held(&dev_priv->irq_lock); 1578 1579 if (!dev_priv->display_irqs_enabled) 1580 return; 1581 1582 dev_priv->display_irqs_enabled = false; 1583 1584 if (intel_irqs_enabled(dev_priv)) 1585 vlv_display_irq_reset(dev_priv); 1586 } 1587 1588 void ilk_de_irq_postinstall(struct drm_i915_private *i915) 1589 { 1590 struct intel_uncore *uncore = &i915->uncore; 1591 u32 display_mask, extra_mask; 1592 1593 if (GRAPHICS_VER(i915) >= 7) { 1594 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1595 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 1596 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 1597 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 1598 DE_PLANE_FLIP_DONE_IVB(PLANE_C) | 1599 DE_PLANE_FLIP_DONE_IVB(PLANE_B) | 1600 DE_PLANE_FLIP_DONE_IVB(PLANE_A) | 1601 DE_DP_A_HOTPLUG_IVB); 1602 } else { 1603 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1604 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 1605 DE_PIPEA_CRC_DONE | DE_POISON); 1606 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | 1607 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 1608 DE_PLANE_FLIP_DONE(PLANE_A) | 1609 DE_PLANE_FLIP_DONE(PLANE_B) | 1610 DE_DP_A_HOTPLUG); 1611 } 1612 1613 if (IS_HASWELL(i915)) { 1614 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1615 display_mask |= DE_EDP_PSR_INT_HSW; 1616 } 1617 1618 if (IS_IRONLAKE_M(i915)) 1619 extra_mask |= DE_PCU_EVENT; 1620 1621 i915->irq_mask = ~display_mask; 1622 1623 ibx_irq_postinstall(i915); 1624 1625 GEN3_IRQ_INIT(uncore, DE, i915->irq_mask, 1626 display_mask | extra_mask); 1627 } 1628 1629 static void mtp_irq_postinstall(struct drm_i915_private *i915); 1630 static void icp_irq_postinstall(struct drm_i915_private *i915); 1631 1632 void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 1633 { 1634 struct intel_uncore *uncore = &dev_priv->uncore; 1635 1636 u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | 1637 GEN8_PIPE_CDCLK_CRC_DONE; 1638 u32 de_pipe_enables; 1639 u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); 1640 u32 de_port_enables; 1641 u32 de_misc_masked = GEN8_DE_EDP_PSR; 1642 u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | 1643 BIT(TRANSCODER_C) | BIT(TRANSCODER_D); 1644 enum pipe pipe; 1645 1646 if (!HAS_DISPLAY(dev_priv)) 1647 return; 1648 1649 if (DISPLAY_VER(dev_priv) >= 14) 1650 mtp_irq_postinstall(dev_priv); 1651 else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) 1652 icp_irq_postinstall(dev_priv); 1653 else if (HAS_PCH_SPLIT(dev_priv)) 1654 ibx_irq_postinstall(dev_priv); 1655 1656 if (DISPLAY_VER(dev_priv) <= 10) 1657 de_misc_masked |= GEN8_DE_MISC_GSE; 1658 1659 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1660 de_port_masked |= BXT_DE_PORT_GMBUS; 1661 1662 if (DISPLAY_VER(dev_priv) >= 14) { 1663 de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | 1664 XELPDP_PMDEMAND_RSP; 1665 } else if (DISPLAY_VER(dev_priv) >= 11) { 1666 enum port port; 1667 1668 if (intel_bios_is_dsi_present(dev_priv, &port)) 1669 de_port_masked |= DSI0_TE | DSI1_TE; 1670 } 1671 1672 de_pipe_enables = de_pipe_masked | 1673 GEN8_PIPE_VBLANK | 1674 gen8_de_pipe_underrun_mask(dev_priv) | 1675 gen8_de_pipe_flip_done_mask(dev_priv); 1676 1677 de_port_enables = de_port_masked; 1678 if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) 1679 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 1680 else if (IS_BROADWELL(dev_priv)) 1681 de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; 1682 1683 if (DISPLAY_VER(dev_priv) >= 12) { 1684 enum transcoder trans; 1685 1686 for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { 1687 enum intel_display_power_domain domain; 1688 1689 domain = POWER_DOMAIN_TRANSCODER(trans); 1690 if (!intel_display_power_is_enabled(dev_priv, domain)) 1691 continue; 1692 1693 gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); 1694 } 1695 } else { 1696 gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); 1697 } 1698 1699 for_each_pipe(dev_priv, pipe) { 1700 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 1701 1702 if (intel_display_power_is_enabled(dev_priv, 1703 POWER_DOMAIN_PIPE(pipe))) 1704 GEN8_IRQ_INIT_NDX(uncore, DE_PIPE, pipe, 1705 dev_priv->de_irq_mask[pipe], 1706 de_pipe_enables); 1707 } 1708 1709 GEN3_IRQ_INIT(uncore, GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 1710 GEN3_IRQ_INIT(uncore, GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 1711 1712 if (IS_DISPLAY_VER(dev_priv, 11, 13)) { 1713 u32 de_hpd_masked = 0; 1714 u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | 1715 GEN11_DE_TBT_HOTPLUG_MASK; 1716 1717 GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked, 1718 de_hpd_enables); 1719 } 1720 } 1721 1722 static void mtp_irq_postinstall(struct drm_i915_private *i915) 1723 { 1724 struct intel_uncore *uncore = &i915->uncore; 1725 u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; 1726 u32 de_hpd_mask = XELPDP_AUX_TC_MASK; 1727 u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | 1728 XELPDP_TBT_HOTPLUG_MASK; 1729 1730 GEN3_IRQ_INIT(uncore, PICAINTERRUPT_, ~de_hpd_mask, 1731 de_hpd_enables); 1732 1733 GEN3_IRQ_INIT(uncore, SDE, ~sde_mask, 0xffffffff); 1734 } 1735 1736 static void icp_irq_postinstall(struct drm_i915_private *dev_priv) 1737 { 1738 struct intel_uncore *uncore = &dev_priv->uncore; 1739 u32 mask = SDE_GMBUS_ICP; 1740 1741 GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff); 1742 } 1743 1744 void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) 1745 { 1746 if (!HAS_DISPLAY(dev_priv)) 1747 return; 1748 1749 gen8_de_irq_postinstall(dev_priv); 1750 1751 intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, 1752 GEN11_DISPLAY_IRQ_ENABLE); 1753 } 1754 1755 void dg1_de_irq_postinstall(struct drm_i915_private *i915) 1756 { 1757 if (!HAS_DISPLAY(i915)) 1758 return; 1759 1760 gen8_de_irq_postinstall(i915); 1761 intel_uncore_write(&i915->uncore, GEN11_DISPLAY_INT_CTL, 1762 GEN11_DISPLAY_IRQ_ENABLE); 1763 } 1764 1765 void intel_display_irq_init(struct drm_i915_private *i915) 1766 { 1767 i915->drm.vblank_disable_immediate = true; 1768 1769 /* 1770 * Most platforms treat the display irq block as an always-on power 1771 * domain. vlv/chv can disable it at runtime and need special care to 1772 * avoid writing any of the display block registers outside of the power 1773 * domain. We defer setting up the display irqs in this case to the 1774 * runtime pm. 1775 */ 1776 i915->display_irqs_enabled = true; 1777 if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) 1778 i915->display_irqs_enabled = false; 1779 1780 intel_hotplug_irq_init(i915); 1781 } 1782