1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: interrupt handling 37 * 38 * These functions provide the basic support for enabling and disabling the 39 * interrupt handling support. There's a lot more functionality in i915_irq.c 40 * and related files, but that will be described in separate chapters. 41 */ 42 43 static const u32 hpd_ibx[HPD_NUM_PINS] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 49 }; 50 51 static const u32 hpd_cpt[HPD_NUM_PINS] = { 52 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 53 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 54 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 55 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 56 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 57 }; 58 59 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 60 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 61 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 62 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 63 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 64 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 65 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 66 }; 67 68 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 69 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 70 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 71 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 72 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 73 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 74 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 75 }; 76 77 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 78 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 79 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 80 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 81 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 82 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 83 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 84 }; 85 86 /* BXT hpd list */ 87 static const u32 hpd_bxt[HPD_NUM_PINS] = { 88 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 89 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 90 }; 91 92 /* IIR can theoretically queue up two events. Be paranoid. */ 93 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 94 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 95 POSTING_READ(GEN8_##type##_IMR(which)); \ 96 I915_WRITE(GEN8_##type##_IER(which), 0); \ 97 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 98 POSTING_READ(GEN8_##type##_IIR(which)); \ 99 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 100 POSTING_READ(GEN8_##type##_IIR(which)); \ 101 } while (0) 102 103 #define GEN5_IRQ_RESET(type) do { \ 104 I915_WRITE(type##IMR, 0xffffffff); \ 105 POSTING_READ(type##IMR); \ 106 I915_WRITE(type##IER, 0); \ 107 I915_WRITE(type##IIR, 0xffffffff); \ 108 POSTING_READ(type##IIR); \ 109 I915_WRITE(type##IIR, 0xffffffff); \ 110 POSTING_READ(type##IIR); \ 111 } while (0) 112 113 /* 114 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 115 */ 116 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 117 u32 val = I915_READ(reg); \ 118 if (val) { \ 119 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 120 (reg), val); \ 121 I915_WRITE((reg), 0xffffffff); \ 122 POSTING_READ(reg); \ 123 I915_WRITE((reg), 0xffffffff); \ 124 POSTING_READ(reg); \ 125 } \ 126 } while (0) 127 128 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 129 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 130 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 131 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 132 POSTING_READ(GEN8_##type##_IMR(which)); \ 133 } while (0) 134 135 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 136 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 137 I915_WRITE(type##IER, (ier_val)); \ 138 I915_WRITE(type##IMR, (imr_val)); \ 139 POSTING_READ(type##IMR); \ 140 } while (0) 141 142 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 143 144 /* For display hotplug interrupt */ 145 void 146 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 147 { 148 assert_spin_locked(&dev_priv->irq_lock); 149 150 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 151 return; 152 153 if ((dev_priv->irq_mask & mask) != 0) { 154 dev_priv->irq_mask &= ~mask; 155 I915_WRITE(DEIMR, dev_priv->irq_mask); 156 POSTING_READ(DEIMR); 157 } 158 } 159 160 void 161 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 162 { 163 assert_spin_locked(&dev_priv->irq_lock); 164 165 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 166 return; 167 168 if ((dev_priv->irq_mask & mask) != mask) { 169 dev_priv->irq_mask |= mask; 170 I915_WRITE(DEIMR, dev_priv->irq_mask); 171 POSTING_READ(DEIMR); 172 } 173 } 174 175 /** 176 * ilk_update_gt_irq - update GTIMR 177 * @dev_priv: driver private 178 * @interrupt_mask: mask of interrupt bits to update 179 * @enabled_irq_mask: mask of interrupt bits to enable 180 */ 181 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 182 uint32_t interrupt_mask, 183 uint32_t enabled_irq_mask) 184 { 185 assert_spin_locked(&dev_priv->irq_lock); 186 187 WARN_ON(enabled_irq_mask & ~interrupt_mask); 188 189 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 190 return; 191 192 dev_priv->gt_irq_mask &= ~interrupt_mask; 193 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 194 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 195 POSTING_READ(GTIMR); 196 } 197 198 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 199 { 200 ilk_update_gt_irq(dev_priv, mask, mask); 201 } 202 203 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 204 { 205 ilk_update_gt_irq(dev_priv, mask, 0); 206 } 207 208 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 209 { 210 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 211 } 212 213 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 214 { 215 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 216 } 217 218 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 219 { 220 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 221 } 222 223 /** 224 * snb_update_pm_irq - update GEN6_PMIMR 225 * @dev_priv: driver private 226 * @interrupt_mask: mask of interrupt bits to update 227 * @enabled_irq_mask: mask of interrupt bits to enable 228 */ 229 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 230 uint32_t interrupt_mask, 231 uint32_t enabled_irq_mask) 232 { 233 uint32_t new_val; 234 235 WARN_ON(enabled_irq_mask & ~interrupt_mask); 236 237 assert_spin_locked(&dev_priv->irq_lock); 238 239 new_val = dev_priv->pm_irq_mask; 240 new_val &= ~interrupt_mask; 241 new_val |= (~enabled_irq_mask & interrupt_mask); 242 243 if (new_val != dev_priv->pm_irq_mask) { 244 dev_priv->pm_irq_mask = new_val; 245 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 246 POSTING_READ(gen6_pm_imr(dev_priv)); 247 } 248 } 249 250 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 251 { 252 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 253 return; 254 255 snb_update_pm_irq(dev_priv, mask, mask); 256 } 257 258 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 259 uint32_t mask) 260 { 261 snb_update_pm_irq(dev_priv, mask, 0); 262 } 263 264 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 265 { 266 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 267 return; 268 269 __gen6_disable_pm_irq(dev_priv, mask); 270 } 271 272 void gen6_reset_rps_interrupts(struct drm_device *dev) 273 { 274 struct drm_i915_private *dev_priv = dev->dev_private; 275 uint32_t reg = gen6_pm_iir(dev_priv); 276 277 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 278 I915_WRITE(reg, dev_priv->pm_rps_events); 279 I915_WRITE(reg, dev_priv->pm_rps_events); 280 POSTING_READ(reg); 281 dev_priv->rps.pm_iir = 0; 282 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 283 } 284 285 void gen6_enable_rps_interrupts(struct drm_device *dev) 286 { 287 struct drm_i915_private *dev_priv = dev->dev_private; 288 289 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 290 291 WARN_ON(dev_priv->rps.pm_iir); 292 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 293 dev_priv->rps.interrupts_enabled = true; 294 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 295 dev_priv->pm_rps_events); 296 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 297 298 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 299 } 300 301 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 302 { 303 /* 304 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 305 * if GEN6_PM_UP_EI_EXPIRED is masked. 306 * 307 * TODO: verify if this can be reproduced on VLV,CHV. 308 */ 309 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 310 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 311 312 if (INTEL_INFO(dev_priv)->gen >= 8) 313 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 314 315 return mask; 316 } 317 318 void gen6_disable_rps_interrupts(struct drm_device *dev) 319 { 320 struct drm_i915_private *dev_priv = dev->dev_private; 321 322 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 323 dev_priv->rps.interrupts_enabled = false; 324 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 325 326 cancel_work_sync(&dev_priv->rps.work); 327 328 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 329 330 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 331 332 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 333 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 334 ~dev_priv->pm_rps_events); 335 336 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 337 338 /* Wait for pending IRQ handlers to complete (on other CPUs) */ 339 #if 0 340 synchronize_irq(dev->irq); 341 #endif 342 } 343 344 /** 345 * ibx_display_interrupt_update - update SDEIMR 346 * @dev_priv: driver private 347 * @interrupt_mask: mask of interrupt bits to update 348 * @enabled_irq_mask: mask of interrupt bits to enable 349 */ 350 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 351 uint32_t interrupt_mask, 352 uint32_t enabled_irq_mask) 353 { 354 uint32_t sdeimr = I915_READ(SDEIMR); 355 sdeimr &= ~interrupt_mask; 356 sdeimr |= (~enabled_irq_mask & interrupt_mask); 357 358 WARN_ON(enabled_irq_mask & ~interrupt_mask); 359 360 assert_spin_locked(&dev_priv->irq_lock); 361 362 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 363 return; 364 365 I915_WRITE(SDEIMR, sdeimr); 366 POSTING_READ(SDEIMR); 367 } 368 369 static void 370 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 371 u32 enable_mask, u32 status_mask) 372 { 373 u32 reg = PIPESTAT(pipe); 374 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 375 376 assert_spin_locked(&dev_priv->irq_lock); 377 WARN_ON(!intel_irqs_enabled(dev_priv)); 378 379 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 380 status_mask & ~PIPESTAT_INT_STATUS_MASK, 381 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 382 pipe_name(pipe), enable_mask, status_mask)) 383 return; 384 385 if ((pipestat & enable_mask) == enable_mask) 386 return; 387 388 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 389 390 /* Enable the interrupt, clear any pending status */ 391 pipestat |= enable_mask | status_mask; 392 I915_WRITE(reg, pipestat); 393 POSTING_READ(reg); 394 } 395 396 static void 397 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 398 u32 enable_mask, u32 status_mask) 399 { 400 u32 reg = PIPESTAT(pipe); 401 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 402 403 assert_spin_locked(&dev_priv->irq_lock); 404 WARN_ON(!intel_irqs_enabled(dev_priv)); 405 406 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 407 status_mask & ~PIPESTAT_INT_STATUS_MASK, 408 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 409 pipe_name(pipe), enable_mask, status_mask)) 410 return; 411 412 if ((pipestat & enable_mask) == 0) 413 return; 414 415 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 416 417 pipestat &= ~enable_mask; 418 I915_WRITE(reg, pipestat); 419 POSTING_READ(reg); 420 } 421 422 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 423 { 424 u32 enable_mask = status_mask << 16; 425 426 /* 427 * On pipe A we don't support the PSR interrupt yet, 428 * on pipe B and C the same bit MBZ. 429 */ 430 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 431 return 0; 432 /* 433 * On pipe B and C we don't support the PSR interrupt yet, on pipe 434 * A the same bit is for perf counters which we don't use either. 435 */ 436 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 437 return 0; 438 439 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 440 SPRITE0_FLIP_DONE_INT_EN_VLV | 441 SPRITE1_FLIP_DONE_INT_EN_VLV); 442 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 443 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 444 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 445 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 446 447 return enable_mask; 448 } 449 450 void 451 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 452 u32 status_mask) 453 { 454 u32 enable_mask; 455 456 if (IS_VALLEYVIEW(dev_priv->dev)) 457 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 458 status_mask); 459 else 460 enable_mask = status_mask << 16; 461 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 462 } 463 464 void 465 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 466 u32 status_mask) 467 { 468 u32 enable_mask; 469 470 if (IS_VALLEYVIEW(dev_priv->dev)) 471 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 472 status_mask); 473 else 474 enable_mask = status_mask << 16; 475 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 476 } 477 478 /** 479 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 480 */ 481 static void i915_enable_asle_pipestat(struct drm_device *dev) 482 { 483 struct drm_i915_private *dev_priv = dev->dev_private; 484 485 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 486 return; 487 488 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 489 490 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 491 if (INTEL_INFO(dev)->gen >= 4) 492 i915_enable_pipestat(dev_priv, PIPE_A, 493 PIPE_LEGACY_BLC_EVENT_STATUS); 494 495 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 496 } 497 498 /* 499 * This timing diagram depicts the video signal in and 500 * around the vertical blanking period. 501 * 502 * Assumptions about the fictitious mode used in this example: 503 * vblank_start >= 3 504 * vsync_start = vblank_start + 1 505 * vsync_end = vblank_start + 2 506 * vtotal = vblank_start + 3 507 * 508 * start of vblank: 509 * latch double buffered registers 510 * increment frame counter (ctg+) 511 * generate start of vblank interrupt (gen4+) 512 * | 513 * | frame start: 514 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 515 * | may be shifted forward 1-3 extra lines via PIPECONF 516 * | | 517 * | | start of vsync: 518 * | | generate vsync interrupt 519 * | | | 520 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 521 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 522 * ----va---> <-----------------vb--------------------> <--------va------------- 523 * | | <----vs-----> | 524 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 525 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 526 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 527 * | | | 528 * last visible pixel first visible pixel 529 * | increment frame counter (gen3/4) 530 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 531 * 532 * x = horizontal active 533 * _ = horizontal blanking 534 * hs = horizontal sync 535 * va = vertical active 536 * vb = vertical blanking 537 * vs = vertical sync 538 * vbs = vblank_start (number) 539 * 540 * Summary: 541 * - most events happen at the start of horizontal sync 542 * - frame start happens at the start of horizontal blank, 1-4 lines 543 * (depending on PIPECONF settings) after the start of vblank 544 * - gen3/4 pixel and frame counter are synchronized with the start 545 * of horizontal active on the first line of vertical active 546 */ 547 548 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 549 { 550 /* Gen2 doesn't have a hardware frame counter */ 551 return 0; 552 } 553 554 /* Called from drm generic code, passed a 'crtc', which 555 * we use as a pipe index 556 */ 557 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 558 { 559 struct drm_i915_private *dev_priv = dev->dev_private; 560 unsigned long high_frame; 561 unsigned long low_frame; 562 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 563 struct intel_crtc *intel_crtc = 564 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 565 const struct drm_display_mode *mode = 566 &intel_crtc->config->base.adjusted_mode; 567 568 htotal = mode->crtc_htotal; 569 hsync_start = mode->crtc_hsync_start; 570 vbl_start = mode->crtc_vblank_start; 571 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 572 vbl_start = DIV_ROUND_UP(vbl_start, 2); 573 574 /* Convert to pixel count */ 575 vbl_start *= htotal; 576 577 /* Start of vblank event occurs at start of hsync */ 578 vbl_start -= htotal - hsync_start; 579 580 high_frame = PIPEFRAME(pipe); 581 low_frame = PIPEFRAMEPIXEL(pipe); 582 583 /* 584 * High & low register fields aren't synchronized, so make sure 585 * we get a low value that's stable across two reads of the high 586 * register. 587 */ 588 do { 589 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 590 low = I915_READ(low_frame); 591 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 592 } while (high1 != high2); 593 594 high1 >>= PIPE_FRAME_HIGH_SHIFT; 595 pixel = low & PIPE_PIXEL_MASK; 596 low >>= PIPE_FRAME_LOW_SHIFT; 597 598 /* 599 * The frame counter increments at beginning of active. 600 * Cook up a vblank counter by also checking the pixel 601 * counter against vblank start. 602 */ 603 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 604 } 605 606 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 607 { 608 struct drm_i915_private *dev_priv = dev->dev_private; 609 int reg = PIPE_FRMCOUNT_GM45(pipe); 610 611 return I915_READ(reg); 612 } 613 614 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 615 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 616 617 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 618 { 619 struct drm_device *dev = crtc->base.dev; 620 struct drm_i915_private *dev_priv = dev->dev_private; 621 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 622 enum i915_pipe pipe = crtc->pipe; 623 int position, vtotal; 624 625 vtotal = mode->crtc_vtotal; 626 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 627 vtotal /= 2; 628 629 if (IS_GEN2(dev)) 630 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 631 else 632 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 633 634 /* 635 * See update_scanline_offset() for the details on the 636 * scanline_offset adjustment. 637 */ 638 return (position + crtc->scanline_offset) % vtotal; 639 } 640 641 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 642 unsigned int flags, int *vpos, int *hpos, 643 ktime_t *stime, ktime_t *etime) 644 { 645 struct drm_i915_private *dev_priv = dev->dev_private; 646 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 647 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 648 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 649 int position; 650 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 651 bool in_vbl = true; 652 int ret = 0; 653 654 if (!intel_crtc->active) { 655 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 656 "pipe %c\n", pipe_name(pipe)); 657 return 0; 658 } 659 660 htotal = mode->crtc_htotal; 661 hsync_start = mode->crtc_hsync_start; 662 vtotal = mode->crtc_vtotal; 663 vbl_start = mode->crtc_vblank_start; 664 vbl_end = mode->crtc_vblank_end; 665 666 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 667 vbl_start = DIV_ROUND_UP(vbl_start, 2); 668 vbl_end /= 2; 669 vtotal /= 2; 670 } 671 672 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 673 674 /* 675 * Lock uncore.lock, as we will do multiple timing critical raw 676 * register reads, potentially with preemption disabled, so the 677 * following code must not block on uncore.lock. 678 */ 679 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 680 681 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 682 683 /* Get optional system timestamp before query. */ 684 if (stime) 685 *stime = ktime_get(); 686 687 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 688 /* No obvious pixelcount register. Only query vertical 689 * scanout position from Display scan line register. 690 */ 691 position = __intel_get_crtc_scanline(intel_crtc); 692 } else { 693 /* Have access to pixelcount since start of frame. 694 * We can split this into vertical and horizontal 695 * scanout position. 696 */ 697 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 698 699 /* convert to pixel counts */ 700 vbl_start *= htotal; 701 vbl_end *= htotal; 702 vtotal *= htotal; 703 704 /* 705 * In interlaced modes, the pixel counter counts all pixels, 706 * so one field will have htotal more pixels. In order to avoid 707 * the reported position from jumping backwards when the pixel 708 * counter is beyond the length of the shorter field, just 709 * clamp the position the length of the shorter field. This 710 * matches how the scanline counter based position works since 711 * the scanline counter doesn't count the two half lines. 712 */ 713 if (position >= vtotal) 714 position = vtotal - 1; 715 716 /* 717 * Start of vblank interrupt is triggered at start of hsync, 718 * just prior to the first active line of vblank. However we 719 * consider lines to start at the leading edge of horizontal 720 * active. So, should we get here before we've crossed into 721 * the horizontal active of the first line in vblank, we would 722 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 723 * always add htotal-hsync_start to the current pixel position. 724 */ 725 position = (position + htotal - hsync_start) % vtotal; 726 } 727 728 /* Get optional system timestamp after query. */ 729 if (etime) 730 *etime = ktime_get(); 731 732 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 733 734 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 735 736 in_vbl = position >= vbl_start && position < vbl_end; 737 738 /* 739 * While in vblank, position will be negative 740 * counting up towards 0 at vbl_end. And outside 741 * vblank, position will be positive counting 742 * up since vbl_end. 743 */ 744 if (position >= vbl_start) 745 position -= vbl_end; 746 else 747 position += vtotal - vbl_end; 748 749 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 750 *vpos = position; 751 *hpos = 0; 752 } else { 753 *vpos = position / htotal; 754 *hpos = position - (*vpos * htotal); 755 } 756 757 /* In vblank? */ 758 if (in_vbl) 759 ret |= DRM_SCANOUTPOS_IN_VBLANK; 760 761 return ret; 762 } 763 764 int intel_get_crtc_scanline(struct intel_crtc *crtc) 765 { 766 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 767 int position; 768 769 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 770 position = __intel_get_crtc_scanline(crtc); 771 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 772 773 return position; 774 } 775 776 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 777 int *max_error, 778 struct timeval *vblank_time, 779 unsigned flags) 780 { 781 struct drm_crtc *crtc; 782 783 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 784 DRM_ERROR("Invalid crtc %d\n", pipe); 785 return -EINVAL; 786 } 787 788 /* Get drm_crtc to timestamp: */ 789 crtc = intel_get_crtc_for_pipe(dev, pipe); 790 if (crtc == NULL) { 791 DRM_ERROR("Invalid crtc %d\n", pipe); 792 return -EINVAL; 793 } 794 795 if (!crtc->state->enable) { 796 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 797 return -EBUSY; 798 } 799 800 /* Helper routine in DRM core does all the work: */ 801 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 802 vblank_time, flags, 803 crtc, 804 &to_intel_crtc(crtc)->config->base.adjusted_mode); 805 } 806 807 static bool intel_hpd_irq_event(struct drm_device *dev, 808 struct drm_connector *connector) 809 { 810 enum drm_connector_status old_status; 811 812 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 813 old_status = connector->status; 814 815 connector->status = connector->funcs->detect(connector, false); 816 if (old_status == connector->status) 817 return false; 818 819 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 820 connector->base.id, 821 connector->name, 822 drm_get_connector_status_name(old_status), 823 drm_get_connector_status_name(connector->status)); 824 825 return true; 826 } 827 828 static void i915_digport_work_func(struct work_struct *work) 829 { 830 struct drm_i915_private *dev_priv = 831 container_of(work, struct drm_i915_private, dig_port_work); 832 u32 long_port_mask, short_port_mask; 833 struct intel_digital_port *intel_dig_port; 834 int i, ret; 835 u32 old_bits = 0; 836 837 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 838 long_port_mask = dev_priv->long_hpd_port_mask; 839 dev_priv->long_hpd_port_mask = 0; 840 short_port_mask = dev_priv->short_hpd_port_mask; 841 dev_priv->short_hpd_port_mask = 0; 842 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 843 844 for (i = 0; i < I915_MAX_PORTS; i++) { 845 bool valid = false; 846 bool long_hpd = false; 847 intel_dig_port = dev_priv->hpd_irq_port[i]; 848 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 849 continue; 850 851 if (long_port_mask & (1 << i)) { 852 valid = true; 853 long_hpd = true; 854 } else if (short_port_mask & (1 << i)) 855 valid = true; 856 857 if (valid) { 858 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 859 if (ret == true) { 860 /* if we get true fallback to old school hpd */ 861 old_bits |= (1 << intel_dig_port->base.hpd_pin); 862 } 863 } 864 } 865 866 if (old_bits) { 867 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 868 dev_priv->hpd_event_bits |= old_bits; 869 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 870 schedule_work(&dev_priv->hotplug_work); 871 } 872 } 873 874 /* 875 * Handle hotplug events outside the interrupt handler proper. 876 */ 877 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 878 879 static void i915_hotplug_work_func(struct work_struct *work) 880 { 881 struct drm_i915_private *dev_priv = 882 container_of(work, struct drm_i915_private, hotplug_work); 883 struct drm_device *dev = dev_priv->dev; 884 struct drm_mode_config *mode_config = &dev->mode_config; 885 struct intel_connector *intel_connector; 886 struct intel_encoder *intel_encoder; 887 struct drm_connector *connector; 888 bool hpd_disabled = false; 889 bool changed = false; 890 u32 hpd_event_bits; 891 892 mutex_lock(&mode_config->mutex); 893 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 894 895 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 896 897 hpd_event_bits = dev_priv->hpd_event_bits; 898 dev_priv->hpd_event_bits = 0; 899 list_for_each_entry(connector, &mode_config->connector_list, head) { 900 intel_connector = to_intel_connector(connector); 901 if (!intel_connector->encoder) 902 continue; 903 intel_encoder = intel_connector->encoder; 904 if (intel_encoder->hpd_pin > HPD_NONE && 905 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 906 connector->polled == DRM_CONNECTOR_POLL_HPD) { 907 DRM_INFO("HPD interrupt storm detected on connector %s: " 908 "switching from hotplug detection to polling\n", 909 connector->name); 910 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 911 connector->polled = DRM_CONNECTOR_POLL_CONNECT 912 | DRM_CONNECTOR_POLL_DISCONNECT; 913 hpd_disabled = true; 914 } 915 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 916 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 917 connector->name, intel_encoder->hpd_pin); 918 } 919 } 920 /* if there were no outputs to poll, poll was disabled, 921 * therefore make sure it's enabled when disabling HPD on 922 * some connectors */ 923 if (hpd_disabled) { 924 drm_kms_helper_poll_enable(dev); 925 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 926 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 927 } 928 929 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 930 931 list_for_each_entry(connector, &mode_config->connector_list, head) { 932 intel_connector = to_intel_connector(connector); 933 if (!intel_connector->encoder) 934 continue; 935 intel_encoder = intel_connector->encoder; 936 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 937 if (intel_encoder->hot_plug) 938 intel_encoder->hot_plug(intel_encoder); 939 if (intel_hpd_irq_event(dev, connector)) 940 changed = true; 941 } 942 } 943 mutex_unlock(&mode_config->mutex); 944 945 if (changed) 946 drm_kms_helper_hotplug_event(dev); 947 } 948 949 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 950 { 951 struct drm_i915_private *dev_priv = dev->dev_private; 952 u32 busy_up, busy_down, max_avg, min_avg; 953 u8 new_delay; 954 955 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 956 957 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 958 959 new_delay = dev_priv->ips.cur_delay; 960 961 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 962 busy_up = I915_READ(RCPREVBSYTUPAVG); 963 busy_down = I915_READ(RCPREVBSYTDNAVG); 964 max_avg = I915_READ(RCBMAXAVG); 965 min_avg = I915_READ(RCBMINAVG); 966 967 /* Handle RCS change request from hw */ 968 if (busy_up > max_avg) { 969 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 970 new_delay = dev_priv->ips.cur_delay - 1; 971 if (new_delay < dev_priv->ips.max_delay) 972 new_delay = dev_priv->ips.max_delay; 973 } else if (busy_down < min_avg) { 974 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 975 new_delay = dev_priv->ips.cur_delay + 1; 976 if (new_delay > dev_priv->ips.min_delay) 977 new_delay = dev_priv->ips.min_delay; 978 } 979 980 if (ironlake_set_drps(dev, new_delay)) 981 dev_priv->ips.cur_delay = new_delay; 982 983 lockmgr(&mchdev_lock, LK_RELEASE); 984 985 return; 986 } 987 988 static void notify_ring(struct intel_engine_cs *ring) 989 { 990 if (!intel_ring_initialized(ring)) 991 return; 992 993 trace_i915_gem_request_notify(ring); 994 995 wake_up_all(&ring->irq_queue); 996 } 997 998 static void vlv_c0_read(struct drm_i915_private *dev_priv, 999 struct intel_rps_ei *ei) 1000 { 1001 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1002 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1003 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1004 } 1005 1006 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1007 const struct intel_rps_ei *old, 1008 const struct intel_rps_ei *now, 1009 int threshold) 1010 { 1011 u64 time, c0; 1012 1013 if (old->cz_clock == 0) 1014 return false; 1015 1016 time = now->cz_clock - old->cz_clock; 1017 time *= threshold * dev_priv->mem_freq; 1018 1019 /* Workload can be split between render + media, e.g. SwapBuffers 1020 * being blitted in X after being rendered in mesa. To account for 1021 * this we need to combine both engines into our activity counter. 1022 */ 1023 c0 = now->render_c0 - old->render_c0; 1024 c0 += now->media_c0 - old->media_c0; 1025 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 1026 1027 return c0 >= time; 1028 } 1029 1030 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1031 { 1032 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1033 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1034 } 1035 1036 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1037 { 1038 struct intel_rps_ei now; 1039 u32 events = 0; 1040 1041 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1042 return 0; 1043 1044 vlv_c0_read(dev_priv, &now); 1045 if (now.cz_clock == 0) 1046 return 0; 1047 1048 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1049 if (!vlv_c0_above(dev_priv, 1050 &dev_priv->rps.down_ei, &now, 1051 dev_priv->rps.down_threshold)) 1052 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1053 dev_priv->rps.down_ei = now; 1054 } 1055 1056 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1057 if (vlv_c0_above(dev_priv, 1058 &dev_priv->rps.up_ei, &now, 1059 dev_priv->rps.up_threshold)) 1060 events |= GEN6_PM_RP_UP_THRESHOLD; 1061 dev_priv->rps.up_ei = now; 1062 } 1063 1064 return events; 1065 } 1066 1067 static bool any_waiters(struct drm_i915_private *dev_priv) 1068 { 1069 struct intel_engine_cs *ring; 1070 int i; 1071 1072 for_each_ring(ring, dev_priv, i) 1073 if (ring->irq_refcount) 1074 return true; 1075 1076 return false; 1077 } 1078 1079 static void gen6_pm_rps_work(struct work_struct *work) 1080 { 1081 struct drm_i915_private *dev_priv = 1082 container_of(work, struct drm_i915_private, rps.work); 1083 bool client_boost; 1084 int new_delay, adj, min, max; 1085 u32 pm_iir; 1086 1087 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1088 /* Speed up work cancelation during disabling rps interrupts. */ 1089 if (!dev_priv->rps.interrupts_enabled) { 1090 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1091 return; 1092 } 1093 pm_iir = dev_priv->rps.pm_iir; 1094 dev_priv->rps.pm_iir = 0; 1095 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1096 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1097 client_boost = dev_priv->rps.client_boost; 1098 dev_priv->rps.client_boost = false; 1099 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1100 1101 /* Make sure we didn't queue anything we're not going to process. */ 1102 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1103 1104 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1105 return; 1106 1107 mutex_lock(&dev_priv->rps.hw_lock); 1108 1109 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1110 1111 adj = dev_priv->rps.last_adj; 1112 new_delay = dev_priv->rps.cur_freq; 1113 min = dev_priv->rps.min_freq_softlimit; 1114 max = dev_priv->rps.max_freq_softlimit; 1115 1116 if (client_boost) { 1117 new_delay = dev_priv->rps.max_freq_softlimit; 1118 adj = 0; 1119 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1120 if (adj > 0) 1121 adj *= 2; 1122 else /* CHV needs even encode values */ 1123 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1124 /* 1125 * For better performance, jump directly 1126 * to RPe if we're below it. 1127 */ 1128 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1129 new_delay = dev_priv->rps.efficient_freq; 1130 adj = 0; 1131 } 1132 } else if (any_waiters(dev_priv)) { 1133 adj = 0; 1134 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1135 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1136 new_delay = dev_priv->rps.efficient_freq; 1137 else 1138 new_delay = dev_priv->rps.min_freq_softlimit; 1139 adj = 0; 1140 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1141 if (adj < 0) 1142 adj *= 2; 1143 else /* CHV needs even encode values */ 1144 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1145 } else { /* unknown event */ 1146 adj = 0; 1147 } 1148 1149 dev_priv->rps.last_adj = adj; 1150 1151 /* sysfs frequency interfaces may have snuck in while servicing the 1152 * interrupt 1153 */ 1154 new_delay += adj; 1155 new_delay = clamp_t(int, new_delay, min, max); 1156 1157 intel_set_rps(dev_priv->dev, new_delay); 1158 1159 mutex_unlock(&dev_priv->rps.hw_lock); 1160 } 1161 1162 /** 1163 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1164 * occurred. 1165 * @work: workqueue struct 1166 * 1167 * Doesn't actually do anything except notify userspace. As a consequence of 1168 * this event, userspace should try to remap the bad rows since statistically 1169 * it is likely the same row is more likely to go bad again. 1170 */ 1171 static void ivybridge_parity_work(struct work_struct *work) 1172 { 1173 struct drm_i915_private *dev_priv = 1174 container_of(work, struct drm_i915_private, l3_parity.error_work); 1175 u32 error_status, row, bank, subbank; 1176 char *parity_event[6]; 1177 uint32_t misccpctl; 1178 uint8_t slice = 0; 1179 1180 /* We must turn off DOP level clock gating to access the L3 registers. 1181 * In order to prevent a get/put style interface, acquire struct mutex 1182 * any time we access those registers. 1183 */ 1184 mutex_lock(&dev_priv->dev->struct_mutex); 1185 1186 /* If we've screwed up tracking, just let the interrupt fire again */ 1187 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1188 goto out; 1189 1190 misccpctl = I915_READ(GEN7_MISCCPCTL); 1191 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1192 POSTING_READ(GEN7_MISCCPCTL); 1193 1194 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1195 u32 reg; 1196 1197 slice--; 1198 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1199 break; 1200 1201 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1202 1203 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1204 1205 error_status = I915_READ(reg); 1206 row = GEN7_PARITY_ERROR_ROW(error_status); 1207 bank = GEN7_PARITY_ERROR_BANK(error_status); 1208 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1209 1210 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1211 POSTING_READ(reg); 1212 1213 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1214 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1215 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1216 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1217 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1218 parity_event[5] = NULL; 1219 1220 #if 0 1221 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1222 KOBJ_CHANGE, parity_event); 1223 #endif 1224 1225 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1226 slice, row, bank, subbank); 1227 1228 kfree(parity_event[4]); 1229 kfree(parity_event[3]); 1230 kfree(parity_event[2]); 1231 kfree(parity_event[1]); 1232 } 1233 1234 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1235 1236 out: 1237 WARN_ON(dev_priv->l3_parity.which_slice); 1238 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1239 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1240 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1241 1242 mutex_unlock(&dev_priv->dev->struct_mutex); 1243 } 1244 1245 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1246 { 1247 struct drm_i915_private *dev_priv = dev->dev_private; 1248 1249 if (!HAS_L3_DPF(dev)) 1250 return; 1251 1252 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1253 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1254 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1255 1256 iir &= GT_PARITY_ERROR(dev); 1257 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1258 dev_priv->l3_parity.which_slice |= 1 << 1; 1259 1260 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1261 dev_priv->l3_parity.which_slice |= 1 << 0; 1262 1263 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1264 } 1265 1266 static void ilk_gt_irq_handler(struct drm_device *dev, 1267 struct drm_i915_private *dev_priv, 1268 u32 gt_iir) 1269 { 1270 if (gt_iir & 1271 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1272 notify_ring(&dev_priv->ring[RCS]); 1273 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1274 notify_ring(&dev_priv->ring[VCS]); 1275 } 1276 1277 static void snb_gt_irq_handler(struct drm_device *dev, 1278 struct drm_i915_private *dev_priv, 1279 u32 gt_iir) 1280 { 1281 1282 if (gt_iir & 1283 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1284 notify_ring(&dev_priv->ring[RCS]); 1285 if (gt_iir & GT_BSD_USER_INTERRUPT) 1286 notify_ring(&dev_priv->ring[VCS]); 1287 if (gt_iir & GT_BLT_USER_INTERRUPT) 1288 notify_ring(&dev_priv->ring[BCS]); 1289 1290 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1291 GT_BSD_CS_ERROR_INTERRUPT | 1292 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1293 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1294 1295 if (gt_iir & GT_PARITY_ERROR(dev)) 1296 ivybridge_parity_error_irq_handler(dev, gt_iir); 1297 } 1298 1299 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1300 u32 master_ctl) 1301 { 1302 1303 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1304 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1305 if (tmp) { 1306 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1307 1308 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1309 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1310 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1311 notify_ring(&dev_priv->ring[RCS]); 1312 1313 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1314 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1315 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1316 notify_ring(&dev_priv->ring[BCS]); 1317 } else 1318 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1319 } 1320 1321 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1322 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1323 if (tmp) { 1324 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1325 1326 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1327 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1328 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1329 notify_ring(&dev_priv->ring[VCS]); 1330 1331 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1332 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1333 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1334 notify_ring(&dev_priv->ring[VCS2]); 1335 } else 1336 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1337 } 1338 1339 if (master_ctl & GEN8_GT_VECS_IRQ) { 1340 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1341 if (tmp) { 1342 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1343 1344 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1345 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1346 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1347 notify_ring(&dev_priv->ring[VECS]); 1348 } else 1349 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1350 } 1351 1352 if (master_ctl & GEN8_GT_PM_IRQ) { 1353 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1354 if (tmp & dev_priv->pm_rps_events) { 1355 I915_WRITE_FW(GEN8_GT_IIR(2), 1356 tmp & dev_priv->pm_rps_events); 1357 gen6_rps_irq_handler(dev_priv, tmp); 1358 } else 1359 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1360 } 1361 1362 } 1363 1364 #define HPD_STORM_DETECT_PERIOD 1000 1365 #define HPD_STORM_THRESHOLD 5 1366 1367 static int pch_port_to_hotplug_shift(enum port port) 1368 { 1369 switch (port) { 1370 case PORT_A: 1371 case PORT_E: 1372 default: 1373 return -1; 1374 case PORT_B: 1375 return 0; 1376 case PORT_C: 1377 return 8; 1378 case PORT_D: 1379 return 16; 1380 } 1381 } 1382 1383 static int i915_port_to_hotplug_shift(enum port port) 1384 { 1385 switch (port) { 1386 case PORT_A: 1387 case PORT_E: 1388 default: 1389 return -1; 1390 case PORT_B: 1391 return 17; 1392 case PORT_C: 1393 return 19; 1394 case PORT_D: 1395 return 21; 1396 } 1397 } 1398 1399 static enum port get_port_from_pin(enum hpd_pin pin) 1400 { 1401 switch (pin) { 1402 case HPD_PORT_B: 1403 return PORT_B; 1404 case HPD_PORT_C: 1405 return PORT_C; 1406 case HPD_PORT_D: 1407 return PORT_D; 1408 default: 1409 return PORT_A; /* no hpd */ 1410 } 1411 } 1412 1413 static void intel_hpd_irq_handler(struct drm_device *dev, 1414 u32 hotplug_trigger, 1415 u32 dig_hotplug_reg, 1416 const u32 hpd[HPD_NUM_PINS]) 1417 { 1418 struct drm_i915_private *dev_priv = dev->dev_private; 1419 int i; 1420 enum port port; 1421 bool storm_detected = false; 1422 bool queue_dig = false, queue_hp = false; 1423 u32 dig_shift; 1424 u32 dig_port_mask = 0; 1425 1426 if (!hotplug_trigger) 1427 return; 1428 1429 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1430 hotplug_trigger, dig_hotplug_reg); 1431 1432 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1433 for (i = 1; i < HPD_NUM_PINS; i++) { 1434 if (!(hpd[i] & hotplug_trigger)) 1435 continue; 1436 1437 port = get_port_from_pin(i); 1438 if (port && dev_priv->hpd_irq_port[port]) { 1439 bool long_hpd; 1440 1441 if (!HAS_GMCH_DISPLAY(dev_priv)) { 1442 dig_shift = pch_port_to_hotplug_shift(port); 1443 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1444 } else { 1445 dig_shift = i915_port_to_hotplug_shift(port); 1446 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1447 } 1448 1449 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1450 port_name(port), 1451 long_hpd ? "long" : "short"); 1452 /* for long HPD pulses we want to have the digital queue happen, 1453 but we still want HPD storm detection to function. */ 1454 if (long_hpd) { 1455 dev_priv->long_hpd_port_mask |= (1 << port); 1456 dig_port_mask |= hpd[i]; 1457 } else { 1458 /* for short HPD just trigger the digital queue */ 1459 dev_priv->short_hpd_port_mask |= (1 << port); 1460 hotplug_trigger &= ~hpd[i]; 1461 } 1462 queue_dig = true; 1463 } 1464 } 1465 1466 for (i = 1; i < HPD_NUM_PINS; i++) { 1467 if (hpd[i] & hotplug_trigger && 1468 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1469 /* 1470 * On GMCH platforms the interrupt mask bits only 1471 * prevent irq generation, not the setting of the 1472 * hotplug bits itself. So only WARN about unexpected 1473 * interrupts on saner platforms. 1474 */ 1475 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1476 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1477 hotplug_trigger, i, hpd[i]); 1478 1479 continue; 1480 } 1481 1482 if (!(hpd[i] & hotplug_trigger) || 1483 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1484 continue; 1485 1486 if (!(dig_port_mask & hpd[i])) { 1487 dev_priv->hpd_event_bits |= (1 << i); 1488 queue_hp = true; 1489 } 1490 1491 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1492 dev_priv->hpd_stats[i].hpd_last_jiffies 1493 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1494 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1495 dev_priv->hpd_stats[i].hpd_cnt = 0; 1496 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1497 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1498 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1499 dev_priv->hpd_event_bits &= ~(1 << i); 1500 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1501 storm_detected = true; 1502 } else { 1503 dev_priv->hpd_stats[i].hpd_cnt++; 1504 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1505 dev_priv->hpd_stats[i].hpd_cnt); 1506 } 1507 } 1508 1509 if (storm_detected) 1510 dev_priv->display.hpd_irq_setup(dev); 1511 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1512 1513 /* 1514 * Our hotplug handler can grab modeset locks (by calling down into the 1515 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1516 * queue for otherwise the flush_work in the pageflip code will 1517 * deadlock. 1518 */ 1519 if (queue_dig) 1520 schedule_work(&dev_priv->dig_port_work); 1521 if (queue_hp) 1522 schedule_work(&dev_priv->hotplug_work); 1523 } 1524 1525 static void gmbus_irq_handler(struct drm_device *dev) 1526 { 1527 struct drm_i915_private *dev_priv = dev->dev_private; 1528 1529 wake_up_all(&dev_priv->gmbus_wait_queue); 1530 } 1531 1532 static void dp_aux_irq_handler(struct drm_device *dev) 1533 { 1534 struct drm_i915_private *dev_priv = dev->dev_private; 1535 1536 wake_up_all(&dev_priv->gmbus_wait_queue); 1537 } 1538 1539 #if defined(CONFIG_DEBUG_FS) 1540 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1541 uint32_t crc0, uint32_t crc1, 1542 uint32_t crc2, uint32_t crc3, 1543 uint32_t crc4) 1544 { 1545 struct drm_i915_private *dev_priv = dev->dev_private; 1546 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1547 struct intel_pipe_crc_entry *entry; 1548 int head, tail; 1549 1550 spin_lock(&pipe_crc->lock); 1551 1552 if (!pipe_crc->entries) { 1553 spin_unlock(&pipe_crc->lock); 1554 DRM_DEBUG_KMS("spurious interrupt\n"); 1555 return; 1556 } 1557 1558 head = pipe_crc->head; 1559 tail = pipe_crc->tail; 1560 1561 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1562 spin_unlock(&pipe_crc->lock); 1563 DRM_ERROR("CRC buffer overflowing\n"); 1564 return; 1565 } 1566 1567 entry = &pipe_crc->entries[head]; 1568 1569 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1570 entry->crc[0] = crc0; 1571 entry->crc[1] = crc1; 1572 entry->crc[2] = crc2; 1573 entry->crc[3] = crc3; 1574 entry->crc[4] = crc4; 1575 1576 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1577 pipe_crc->head = head; 1578 1579 spin_unlock(&pipe_crc->lock); 1580 1581 wake_up_interruptible(&pipe_crc->wq); 1582 } 1583 #else 1584 static inline void 1585 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1586 uint32_t crc0, uint32_t crc1, 1587 uint32_t crc2, uint32_t crc3, 1588 uint32_t crc4) {} 1589 #endif 1590 1591 1592 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1593 { 1594 struct drm_i915_private *dev_priv = dev->dev_private; 1595 1596 display_pipe_crc_irq_handler(dev, pipe, 1597 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1598 0, 0, 0, 0); 1599 } 1600 1601 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1602 { 1603 struct drm_i915_private *dev_priv = dev->dev_private; 1604 1605 display_pipe_crc_irq_handler(dev, pipe, 1606 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1607 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1608 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1609 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1610 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1611 } 1612 1613 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1614 { 1615 struct drm_i915_private *dev_priv = dev->dev_private; 1616 uint32_t res1, res2; 1617 1618 if (INTEL_INFO(dev)->gen >= 3) 1619 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1620 else 1621 res1 = 0; 1622 1623 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1624 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1625 else 1626 res2 = 0; 1627 1628 display_pipe_crc_irq_handler(dev, pipe, 1629 I915_READ(PIPE_CRC_RES_RED(pipe)), 1630 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1631 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1632 res1, res2); 1633 } 1634 1635 /* The RPS events need forcewake, so we add them to a work queue and mask their 1636 * IMR bits until the work is done. Other interrupts can be processed without 1637 * the work queue. */ 1638 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1639 { 1640 if (pm_iir & dev_priv->pm_rps_events) { 1641 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1642 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1643 if (dev_priv->rps.interrupts_enabled) { 1644 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1645 queue_work(dev_priv->wq, &dev_priv->rps.work); 1646 } 1647 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1648 } 1649 1650 if (INTEL_INFO(dev_priv)->gen >= 8) 1651 return; 1652 1653 if (HAS_VEBOX(dev_priv->dev)) { 1654 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1655 notify_ring(&dev_priv->ring[VECS]); 1656 1657 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1658 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1659 } 1660 } 1661 1662 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1663 { 1664 if (!drm_handle_vblank(dev, pipe)) 1665 return false; 1666 1667 return true; 1668 } 1669 1670 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1671 { 1672 struct drm_i915_private *dev_priv = dev->dev_private; 1673 u32 pipe_stats[I915_MAX_PIPES] = { }; 1674 int pipe; 1675 1676 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1677 for_each_pipe(dev_priv, pipe) { 1678 int reg; 1679 u32 mask, iir_bit = 0; 1680 1681 /* 1682 * PIPESTAT bits get signalled even when the interrupt is 1683 * disabled with the mask bits, and some of the status bits do 1684 * not generate interrupts at all (like the underrun bit). Hence 1685 * we need to be careful that we only handle what we want to 1686 * handle. 1687 */ 1688 1689 /* fifo underruns are filterered in the underrun handler. */ 1690 mask = PIPE_FIFO_UNDERRUN_STATUS; 1691 1692 switch (pipe) { 1693 case PIPE_A: 1694 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1695 break; 1696 case PIPE_B: 1697 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1698 break; 1699 case PIPE_C: 1700 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1701 break; 1702 } 1703 if (iir & iir_bit) 1704 mask |= dev_priv->pipestat_irq_mask[pipe]; 1705 1706 if (!mask) 1707 continue; 1708 1709 reg = PIPESTAT(pipe); 1710 mask |= PIPESTAT_INT_ENABLE_MASK; 1711 pipe_stats[pipe] = I915_READ(reg) & mask; 1712 1713 /* 1714 * Clear the PIPE*STAT regs before the IIR 1715 */ 1716 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1717 PIPESTAT_INT_STATUS_MASK)) 1718 I915_WRITE(reg, pipe_stats[pipe]); 1719 } 1720 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1721 1722 for_each_pipe(dev_priv, pipe) { 1723 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1724 intel_pipe_handle_vblank(dev, pipe)) 1725 intel_check_page_flip(dev, pipe); 1726 1727 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1728 intel_prepare_page_flip(dev, pipe); 1729 intel_finish_page_flip(dev, pipe); 1730 } 1731 1732 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1733 i9xx_pipe_crc_irq_handler(dev, pipe); 1734 1735 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1736 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1737 } 1738 1739 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1740 gmbus_irq_handler(dev); 1741 } 1742 1743 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1744 { 1745 struct drm_i915_private *dev_priv = dev->dev_private; 1746 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1747 1748 if (hotplug_status) { 1749 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1750 /* 1751 * Make sure hotplug status is cleared before we clear IIR, or else we 1752 * may miss hotplug events. 1753 */ 1754 POSTING_READ(PORT_HOTPLUG_STAT); 1755 1756 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1757 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1758 1759 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1760 } else { 1761 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1762 1763 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1764 } 1765 1766 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1767 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1768 dp_aux_irq_handler(dev); 1769 } 1770 } 1771 1772 static irqreturn_t valleyview_irq_handler(void *arg) 1773 { 1774 struct drm_device *dev = arg; 1775 struct drm_i915_private *dev_priv = dev->dev_private; 1776 u32 iir, gt_iir, pm_iir; 1777 1778 if (!intel_irqs_enabled(dev_priv)) 1779 return IRQ_NONE; 1780 1781 while (true) { 1782 /* Find, clear, then process each source of interrupt */ 1783 1784 gt_iir = I915_READ(GTIIR); 1785 if (gt_iir) 1786 I915_WRITE(GTIIR, gt_iir); 1787 1788 pm_iir = I915_READ(GEN6_PMIIR); 1789 if (pm_iir) 1790 I915_WRITE(GEN6_PMIIR, pm_iir); 1791 1792 iir = I915_READ(VLV_IIR); 1793 if (iir) { 1794 /* Consume port before clearing IIR or we'll miss events */ 1795 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1796 i9xx_hpd_irq_handler(dev); 1797 I915_WRITE(VLV_IIR, iir); 1798 } 1799 1800 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1801 goto out; 1802 1803 if (gt_iir) 1804 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1805 if (pm_iir) 1806 gen6_rps_irq_handler(dev_priv, pm_iir); 1807 /* Call regardless, as some status bits might not be 1808 * signalled in iir */ 1809 valleyview_pipestat_irq_handler(dev, iir); 1810 } 1811 1812 out: 1813 return; 1814 } 1815 1816 static irqreturn_t cherryview_irq_handler(void *arg) 1817 { 1818 struct drm_device *dev = arg; 1819 struct drm_i915_private *dev_priv = dev->dev_private; 1820 u32 master_ctl, iir; 1821 1822 if (!intel_irqs_enabled(dev_priv)) 1823 return IRQ_NONE; 1824 1825 for (;;) { 1826 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1827 iir = I915_READ(VLV_IIR); 1828 1829 if (master_ctl == 0 && iir == 0) 1830 break; 1831 1832 1833 I915_WRITE(GEN8_MASTER_IRQ, 0); 1834 1835 /* Find, clear, then process each source of interrupt */ 1836 1837 if (iir) { 1838 /* Consume port before clearing IIR or we'll miss events */ 1839 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1840 i9xx_hpd_irq_handler(dev); 1841 I915_WRITE(VLV_IIR, iir); 1842 } 1843 1844 gen8_gt_irq_handler(dev_priv, master_ctl); 1845 1846 /* Call regardless, as some status bits might not be 1847 * signalled in iir */ 1848 valleyview_pipestat_irq_handler(dev, iir); 1849 1850 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1851 POSTING_READ(GEN8_MASTER_IRQ); 1852 } 1853 1854 } 1855 1856 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1857 { 1858 struct drm_i915_private *dev_priv = dev->dev_private; 1859 int pipe; 1860 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1861 u32 dig_hotplug_reg; 1862 1863 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1864 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1865 1866 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1867 1868 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1869 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1870 SDE_AUDIO_POWER_SHIFT); 1871 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1872 port_name(port)); 1873 } 1874 1875 if (pch_iir & SDE_AUX_MASK) 1876 dp_aux_irq_handler(dev); 1877 1878 if (pch_iir & SDE_GMBUS) 1879 gmbus_irq_handler(dev); 1880 1881 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1882 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1883 1884 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1885 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1886 1887 if (pch_iir & SDE_POISON) 1888 DRM_ERROR("PCH poison interrupt\n"); 1889 1890 if (pch_iir & SDE_FDI_MASK) 1891 for_each_pipe(dev_priv, pipe) 1892 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1893 pipe_name(pipe), 1894 I915_READ(FDI_RX_IIR(pipe))); 1895 1896 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1897 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1898 1899 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1900 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1901 1902 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1903 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1904 1905 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1906 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1907 } 1908 1909 static void ivb_err_int_handler(struct drm_device *dev) 1910 { 1911 struct drm_i915_private *dev_priv = dev->dev_private; 1912 u32 err_int = I915_READ(GEN7_ERR_INT); 1913 enum i915_pipe pipe; 1914 1915 if (err_int & ERR_INT_POISON) 1916 DRM_ERROR("Poison interrupt\n"); 1917 1918 for_each_pipe(dev_priv, pipe) { 1919 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1920 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1921 1922 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1923 if (IS_IVYBRIDGE(dev)) 1924 ivb_pipe_crc_irq_handler(dev, pipe); 1925 else 1926 hsw_pipe_crc_irq_handler(dev, pipe); 1927 } 1928 } 1929 1930 I915_WRITE(GEN7_ERR_INT, err_int); 1931 } 1932 1933 static void cpt_serr_int_handler(struct drm_device *dev) 1934 { 1935 struct drm_i915_private *dev_priv = dev->dev_private; 1936 u32 serr_int = I915_READ(SERR_INT); 1937 1938 if (serr_int & SERR_INT_POISON) 1939 DRM_ERROR("PCH poison interrupt\n"); 1940 1941 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1942 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1943 1944 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1945 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1946 1947 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1948 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1949 1950 I915_WRITE(SERR_INT, serr_int); 1951 } 1952 1953 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1954 { 1955 struct drm_i915_private *dev_priv = dev->dev_private; 1956 int pipe; 1957 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1958 u32 dig_hotplug_reg; 1959 1960 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1961 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1962 1963 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 1964 1965 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1966 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1967 SDE_AUDIO_POWER_SHIFT_CPT); 1968 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1969 port_name(port)); 1970 } 1971 1972 if (pch_iir & SDE_AUX_MASK_CPT) 1973 dp_aux_irq_handler(dev); 1974 1975 if (pch_iir & SDE_GMBUS_CPT) 1976 gmbus_irq_handler(dev); 1977 1978 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1979 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1980 1981 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1982 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1983 1984 if (pch_iir & SDE_FDI_MASK_CPT) 1985 for_each_pipe(dev_priv, pipe) 1986 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1987 pipe_name(pipe), 1988 I915_READ(FDI_RX_IIR(pipe))); 1989 1990 if (pch_iir & SDE_ERROR_CPT) 1991 cpt_serr_int_handler(dev); 1992 } 1993 1994 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1995 { 1996 struct drm_i915_private *dev_priv = dev->dev_private; 1997 enum i915_pipe pipe; 1998 1999 if (de_iir & DE_AUX_CHANNEL_A) 2000 dp_aux_irq_handler(dev); 2001 2002 if (de_iir & DE_GSE) 2003 intel_opregion_asle_intr(dev); 2004 2005 if (de_iir & DE_POISON) 2006 DRM_ERROR("Poison interrupt\n"); 2007 2008 for_each_pipe(dev_priv, pipe) { 2009 if (de_iir & DE_PIPE_VBLANK(pipe) && 2010 intel_pipe_handle_vblank(dev, pipe)) 2011 intel_check_page_flip(dev, pipe); 2012 2013 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2014 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2015 2016 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2017 i9xx_pipe_crc_irq_handler(dev, pipe); 2018 2019 /* plane/pipes map 1:1 on ilk+ */ 2020 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2021 intel_prepare_page_flip(dev, pipe); 2022 intel_finish_page_flip_plane(dev, pipe); 2023 } 2024 } 2025 2026 /* check event from PCH */ 2027 if (de_iir & DE_PCH_EVENT) { 2028 u32 pch_iir = I915_READ(SDEIIR); 2029 2030 if (HAS_PCH_CPT(dev)) 2031 cpt_irq_handler(dev, pch_iir); 2032 else 2033 ibx_irq_handler(dev, pch_iir); 2034 2035 /* should clear PCH hotplug event before clear CPU irq */ 2036 I915_WRITE(SDEIIR, pch_iir); 2037 } 2038 2039 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2040 ironlake_rps_change_irq_handler(dev); 2041 } 2042 2043 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2044 { 2045 struct drm_i915_private *dev_priv = dev->dev_private; 2046 enum i915_pipe pipe; 2047 2048 if (de_iir & DE_ERR_INT_IVB) 2049 ivb_err_int_handler(dev); 2050 2051 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2052 dp_aux_irq_handler(dev); 2053 2054 if (de_iir & DE_GSE_IVB) 2055 intel_opregion_asle_intr(dev); 2056 2057 for_each_pipe(dev_priv, pipe) { 2058 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2059 intel_pipe_handle_vblank(dev, pipe)) 2060 intel_check_page_flip(dev, pipe); 2061 2062 /* plane/pipes map 1:1 on ilk+ */ 2063 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2064 intel_prepare_page_flip(dev, pipe); 2065 intel_finish_page_flip_plane(dev, pipe); 2066 } 2067 } 2068 2069 /* check event from PCH */ 2070 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2071 u32 pch_iir = I915_READ(SDEIIR); 2072 2073 cpt_irq_handler(dev, pch_iir); 2074 2075 /* clear PCH hotplug event before clear CPU irq */ 2076 I915_WRITE(SDEIIR, pch_iir); 2077 } 2078 } 2079 2080 /* 2081 * To handle irqs with the minimum potential races with fresh interrupts, we: 2082 * 1 - Disable Master Interrupt Control. 2083 * 2 - Find the source(s) of the interrupt. 2084 * 3 - Clear the Interrupt Identity bits (IIR). 2085 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2086 * 5 - Re-enable Master Interrupt Control. 2087 */ 2088 static irqreturn_t ironlake_irq_handler(void *arg) 2089 { 2090 struct drm_device *dev = arg; 2091 struct drm_i915_private *dev_priv = dev->dev_private; 2092 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2093 2094 if (!intel_irqs_enabled(dev_priv)) 2095 return IRQ_NONE; 2096 2097 /* We get interrupts on unclaimed registers, so check for this before we 2098 * do any I915_{READ,WRITE}. */ 2099 intel_uncore_check_errors(dev); 2100 2101 /* disable master interrupt before clearing iir */ 2102 de_ier = I915_READ(DEIER); 2103 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2104 POSTING_READ(DEIER); 2105 2106 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2107 * interrupts will will be stored on its back queue, and then we'll be 2108 * able to process them after we restore SDEIER (as soon as we restore 2109 * it, we'll get an interrupt if SDEIIR still has something to process 2110 * due to its back queue). */ 2111 if (!HAS_PCH_NOP(dev)) { 2112 sde_ier = I915_READ(SDEIER); 2113 I915_WRITE(SDEIER, 0); 2114 POSTING_READ(SDEIER); 2115 } 2116 2117 /* Find, clear, then process each source of interrupt */ 2118 2119 gt_iir = I915_READ(GTIIR); 2120 if (gt_iir) { 2121 I915_WRITE(GTIIR, gt_iir); 2122 if (INTEL_INFO(dev)->gen >= 6) 2123 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2124 else 2125 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2126 } 2127 2128 de_iir = I915_READ(DEIIR); 2129 if (de_iir) { 2130 I915_WRITE(DEIIR, de_iir); 2131 if (INTEL_INFO(dev)->gen >= 7) 2132 ivb_display_irq_handler(dev, de_iir); 2133 else 2134 ilk_display_irq_handler(dev, de_iir); 2135 } 2136 2137 if (INTEL_INFO(dev)->gen >= 6) { 2138 u32 pm_iir = I915_READ(GEN6_PMIIR); 2139 if (pm_iir) { 2140 I915_WRITE(GEN6_PMIIR, pm_iir); 2141 gen6_rps_irq_handler(dev_priv, pm_iir); 2142 } 2143 } 2144 2145 I915_WRITE(DEIER, de_ier); 2146 POSTING_READ(DEIER); 2147 if (!HAS_PCH_NOP(dev)) { 2148 I915_WRITE(SDEIER, sde_ier); 2149 POSTING_READ(SDEIER); 2150 } 2151 2152 } 2153 2154 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 2155 { 2156 struct drm_i915_private *dev_priv = dev->dev_private; 2157 uint32_t hp_control; 2158 uint32_t hp_trigger; 2159 2160 /* Get the status */ 2161 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 2162 hp_control = I915_READ(BXT_HOTPLUG_CTL); 2163 2164 /* Hotplug not enabled ? */ 2165 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 2166 DRM_ERROR("Interrupt when HPD disabled\n"); 2167 return; 2168 } 2169 2170 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 2171 hp_control & BXT_HOTPLUG_CTL_MASK); 2172 2173 /* Check for HPD storm and schedule bottom half */ 2174 intel_hpd_irq_handler(dev, hp_trigger, hp_control, hpd_bxt); 2175 2176 /* 2177 * FIXME: Save the hot plug status for bottom half before 2178 * clearing the sticky status bits, else the status will be 2179 * lost. 2180 */ 2181 2182 /* Clear sticky bits in hpd status */ 2183 I915_WRITE(BXT_HOTPLUG_CTL, hp_control); 2184 } 2185 2186 static irqreturn_t gen8_irq_handler(void *arg) 2187 { 2188 struct drm_device *dev = arg; 2189 struct drm_i915_private *dev_priv = dev->dev_private; 2190 u32 master_ctl; 2191 uint32_t tmp = 0; 2192 enum i915_pipe pipe; 2193 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2194 2195 if (!intel_irqs_enabled(dev_priv)) 2196 return IRQ_NONE; 2197 2198 if (IS_GEN9(dev)) 2199 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2200 GEN9_AUX_CHANNEL_D; 2201 2202 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2203 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2204 if (!master_ctl) 2205 return IRQ_NONE; 2206 2207 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2208 2209 /* Find, clear, then process each source of interrupt */ 2210 2211 gen8_gt_irq_handler(dev_priv, master_ctl); 2212 2213 if (master_ctl & GEN8_DE_MISC_IRQ) { 2214 tmp = I915_READ(GEN8_DE_MISC_IIR); 2215 if (tmp) { 2216 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2217 if (tmp & GEN8_DE_MISC_GSE) 2218 intel_opregion_asle_intr(dev); 2219 else 2220 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2221 } 2222 else 2223 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2224 } 2225 2226 if (master_ctl & GEN8_DE_PORT_IRQ) { 2227 tmp = I915_READ(GEN8_DE_PORT_IIR); 2228 if (tmp) { 2229 bool found = false; 2230 2231 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2232 2233 if (tmp & aux_mask) { 2234 dp_aux_irq_handler(dev); 2235 found = true; 2236 } 2237 2238 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2239 bxt_hpd_handler(dev, tmp); 2240 found = true; 2241 } 2242 2243 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { 2244 gmbus_irq_handler(dev); 2245 found = true; 2246 } 2247 2248 if (!found) 2249 DRM_ERROR("Unexpected DE Port interrupt\n"); 2250 } 2251 else 2252 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2253 } 2254 2255 for_each_pipe(dev_priv, pipe) { 2256 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2257 2258 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2259 continue; 2260 2261 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2262 if (pipe_iir) { 2263 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2264 2265 if (pipe_iir & GEN8_PIPE_VBLANK && 2266 intel_pipe_handle_vblank(dev, pipe)) 2267 intel_check_page_flip(dev, pipe); 2268 2269 if (IS_GEN9(dev)) 2270 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2271 else 2272 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2273 2274 if (flip_done) { 2275 intel_prepare_page_flip(dev, pipe); 2276 intel_finish_page_flip_plane(dev, pipe); 2277 } 2278 2279 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2280 hsw_pipe_crc_irq_handler(dev, pipe); 2281 2282 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2283 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2284 pipe); 2285 2286 2287 if (IS_GEN9(dev)) 2288 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2289 else 2290 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2291 2292 if (fault_errors) 2293 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2294 pipe_name(pipe), 2295 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2296 } else 2297 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2298 } 2299 2300 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2301 master_ctl & GEN8_DE_PCH_IRQ) { 2302 /* 2303 * FIXME(BDW): Assume for now that the new interrupt handling 2304 * scheme also closed the SDE interrupt handling race we've seen 2305 * on older pch-split platforms. But this needs testing. 2306 */ 2307 u32 pch_iir = I915_READ(SDEIIR); 2308 if (pch_iir) { 2309 I915_WRITE(SDEIIR, pch_iir); 2310 cpt_irq_handler(dev, pch_iir); 2311 } else 2312 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2313 2314 } 2315 2316 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2317 POSTING_READ_FW(GEN8_MASTER_IRQ); 2318 2319 } 2320 2321 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2322 bool reset_completed) 2323 { 2324 struct intel_engine_cs *ring; 2325 int i; 2326 2327 /* 2328 * Notify all waiters for GPU completion events that reset state has 2329 * been changed, and that they need to restart their wait after 2330 * checking for potential errors (and bail out to drop locks if there is 2331 * a gpu reset pending so that i915_error_work_func can acquire them). 2332 */ 2333 2334 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2335 for_each_ring(ring, dev_priv, i) 2336 wake_up_all(&ring->irq_queue); 2337 2338 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2339 wake_up_all(&dev_priv->pending_flip_queue); 2340 2341 /* 2342 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2343 * reset state is cleared. 2344 */ 2345 if (reset_completed) 2346 wake_up_all(&dev_priv->gpu_error.reset_queue); 2347 } 2348 2349 /** 2350 * i915_reset_and_wakeup - do process context error handling work 2351 * 2352 * Fire an error uevent so userspace can see that a hang or error 2353 * was detected. 2354 */ 2355 static void i915_reset_and_wakeup(struct drm_device *dev) 2356 { 2357 struct drm_i915_private *dev_priv = to_i915(dev); 2358 struct i915_gpu_error *error = &dev_priv->gpu_error; 2359 #if 0 2360 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2361 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2362 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2363 #endif 2364 int ret; 2365 2366 #if 0 2367 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2368 #endif 2369 2370 /* 2371 * Note that there's only one work item which does gpu resets, so we 2372 * need not worry about concurrent gpu resets potentially incrementing 2373 * error->reset_counter twice. We only need to take care of another 2374 * racing irq/hangcheck declaring the gpu dead for a second time. A 2375 * quick check for that is good enough: schedule_work ensures the 2376 * correct ordering between hang detection and this work item, and since 2377 * the reset in-progress bit is only ever set by code outside of this 2378 * work we don't need to worry about any other races. 2379 */ 2380 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2381 DRM_DEBUG_DRIVER("resetting chip\n"); 2382 #if 0 2383 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2384 reset_event); 2385 #endif 2386 2387 /* 2388 * In most cases it's guaranteed that we get here with an RPM 2389 * reference held, for example because there is a pending GPU 2390 * request that won't finish until the reset is done. This 2391 * isn't the case at least when we get here by doing a 2392 * simulated reset via debugs, so get an RPM reference. 2393 */ 2394 intel_runtime_pm_get(dev_priv); 2395 2396 intel_prepare_reset(dev); 2397 2398 /* 2399 * All state reset _must_ be completed before we update the 2400 * reset counter, for otherwise waiters might miss the reset 2401 * pending state and not properly drop locks, resulting in 2402 * deadlocks with the reset work. 2403 */ 2404 ret = i915_reset(dev); 2405 2406 intel_finish_reset(dev); 2407 2408 intel_runtime_pm_put(dev_priv); 2409 2410 if (ret == 0) { 2411 /* 2412 * After all the gem state is reset, increment the reset 2413 * counter and wake up everyone waiting for the reset to 2414 * complete. 2415 * 2416 * Since unlock operations are a one-sided barrier only, 2417 * we need to insert a barrier here to order any seqno 2418 * updates before 2419 * the counter increment. 2420 */ 2421 smp_mb__before_atomic(); 2422 atomic_inc(&dev_priv->gpu_error.reset_counter); 2423 2424 #if 0 2425 kobject_uevent_env(&dev->primary->kdev->kobj, 2426 KOBJ_CHANGE, reset_done_event); 2427 #endif 2428 } else { 2429 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2430 } 2431 2432 /* 2433 * Note: The wake_up also serves as a memory barrier so that 2434 * waiters see the update value of the reset counter atomic_t. 2435 */ 2436 i915_error_wake_up(dev_priv, true); 2437 } 2438 } 2439 2440 static void i915_report_and_clear_eir(struct drm_device *dev) 2441 { 2442 struct drm_i915_private *dev_priv = dev->dev_private; 2443 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2444 u32 eir = I915_READ(EIR); 2445 int pipe, i; 2446 2447 if (!eir) 2448 return; 2449 2450 pr_err("render error detected, EIR: 0x%08x\n", eir); 2451 2452 #if 0 2453 i915_get_extra_instdone(dev, instdone); 2454 #endif 2455 2456 if (IS_G4X(dev)) { 2457 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2458 u32 ipeir = I915_READ(IPEIR_I965); 2459 2460 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2461 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2462 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2463 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2464 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2465 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2466 I915_WRITE(IPEIR_I965, ipeir); 2467 POSTING_READ(IPEIR_I965); 2468 } 2469 if (eir & GM45_ERROR_PAGE_TABLE) { 2470 u32 pgtbl_err = I915_READ(PGTBL_ER); 2471 pr_err("page table error\n"); 2472 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2473 I915_WRITE(PGTBL_ER, pgtbl_err); 2474 POSTING_READ(PGTBL_ER); 2475 } 2476 } 2477 2478 if (!IS_GEN2(dev)) { 2479 if (eir & I915_ERROR_PAGE_TABLE) { 2480 u32 pgtbl_err = I915_READ(PGTBL_ER); 2481 pr_err("page table error\n"); 2482 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2483 I915_WRITE(PGTBL_ER, pgtbl_err); 2484 POSTING_READ(PGTBL_ER); 2485 } 2486 } 2487 2488 if (eir & I915_ERROR_MEMORY_REFRESH) { 2489 pr_err("memory refresh error:\n"); 2490 for_each_pipe(dev_priv, pipe) 2491 pr_err("pipe %c stat: 0x%08x\n", 2492 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2493 /* pipestat has already been acked */ 2494 } 2495 if (eir & I915_ERROR_INSTRUCTION) { 2496 pr_err("instruction error\n"); 2497 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2498 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2499 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2500 if (INTEL_INFO(dev)->gen < 4) { 2501 u32 ipeir = I915_READ(IPEIR); 2502 2503 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2504 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2505 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2506 I915_WRITE(IPEIR, ipeir); 2507 POSTING_READ(IPEIR); 2508 } else { 2509 u32 ipeir = I915_READ(IPEIR_I965); 2510 2511 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2512 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2513 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2514 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2515 I915_WRITE(IPEIR_I965, ipeir); 2516 POSTING_READ(IPEIR_I965); 2517 } 2518 } 2519 2520 I915_WRITE(EIR, eir); 2521 POSTING_READ(EIR); 2522 eir = I915_READ(EIR); 2523 if (eir) { 2524 /* 2525 * some errors might have become stuck, 2526 * mask them. 2527 */ 2528 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2529 I915_WRITE(EMR, I915_READ(EMR) | eir); 2530 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2531 } 2532 } 2533 2534 /** 2535 * i915_handle_error - handle a gpu error 2536 * @dev: drm device 2537 * 2538 * Do some basic checking of regsiter state at error time and 2539 * dump it to the syslog. Also call i915_capture_error_state() to make 2540 * sure we get a record and make it available in debugfs. Fire a uevent 2541 * so userspace knows something bad happened (should trigger collection 2542 * of a ring dump etc.). 2543 */ 2544 void i915_handle_error(struct drm_device *dev, bool wedged, 2545 const char *fmt, ...) 2546 { 2547 struct drm_i915_private *dev_priv = dev->dev_private; 2548 #if 0 2549 va_list args; 2550 char error_msg[80]; 2551 2552 va_start(args, fmt); 2553 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2554 va_end(args); 2555 2556 i915_capture_error_state(dev, wedged, error_msg); 2557 #endif 2558 i915_report_and_clear_eir(dev); 2559 2560 if (wedged) { 2561 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2562 &dev_priv->gpu_error.reset_counter); 2563 2564 /* 2565 * Wakeup waiting processes so that the reset function 2566 * i915_reset_and_wakeup doesn't deadlock trying to grab 2567 * various locks. By bumping the reset counter first, the woken 2568 * processes will see a reset in progress and back off, 2569 * releasing their locks and then wait for the reset completion. 2570 * We must do this for _all_ gpu waiters that might hold locks 2571 * that the reset work needs to acquire. 2572 * 2573 * Note: The wake_up serves as the required memory barrier to 2574 * ensure that the waiters see the updated value of the reset 2575 * counter atomic_t. 2576 */ 2577 i915_error_wake_up(dev_priv, false); 2578 } 2579 2580 i915_reset_and_wakeup(dev); 2581 } 2582 2583 /* Called from drm generic code, passed 'crtc' which 2584 * we use as a pipe index 2585 */ 2586 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2587 { 2588 struct drm_i915_private *dev_priv = dev->dev_private; 2589 2590 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2591 if (INTEL_INFO(dev)->gen >= 4) 2592 i915_enable_pipestat(dev_priv, pipe, 2593 PIPE_START_VBLANK_INTERRUPT_STATUS); 2594 i915_enable_pipestat(dev_priv, pipe, 2595 PIPE_VBLANK_INTERRUPT_STATUS); 2596 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2597 2598 return 0; 2599 } 2600 2601 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2602 { 2603 struct drm_i915_private *dev_priv = dev->dev_private; 2604 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2605 DE_PIPE_VBLANK(pipe); 2606 2607 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2608 ironlake_enable_display_irq(dev_priv, bit); 2609 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2610 2611 return 0; 2612 } 2613 2614 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2615 { 2616 struct drm_i915_private *dev_priv = dev->dev_private; 2617 2618 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2619 i915_enable_pipestat(dev_priv, pipe, 2620 PIPE_START_VBLANK_INTERRUPT_STATUS); 2621 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2622 2623 return 0; 2624 } 2625 2626 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2627 { 2628 struct drm_i915_private *dev_priv = dev->dev_private; 2629 2630 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2631 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2632 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2633 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2634 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2635 return 0; 2636 } 2637 2638 /* Called from drm generic code, passed 'crtc' which 2639 * we use as a pipe index 2640 */ 2641 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2642 { 2643 struct drm_i915_private *dev_priv = dev->dev_private; 2644 2645 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2646 i915_disable_pipestat(dev_priv, pipe, 2647 PIPE_VBLANK_INTERRUPT_STATUS | 2648 PIPE_START_VBLANK_INTERRUPT_STATUS); 2649 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2650 } 2651 2652 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2653 { 2654 struct drm_i915_private *dev_priv = dev->dev_private; 2655 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2656 DE_PIPE_VBLANK(pipe); 2657 2658 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2659 ironlake_disable_display_irq(dev_priv, bit); 2660 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2661 } 2662 2663 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2664 { 2665 struct drm_i915_private *dev_priv = dev->dev_private; 2666 2667 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2668 i915_disable_pipestat(dev_priv, pipe, 2669 PIPE_START_VBLANK_INTERRUPT_STATUS); 2670 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2671 } 2672 2673 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2674 { 2675 struct drm_i915_private *dev_priv = dev->dev_private; 2676 2677 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2678 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2679 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2680 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2681 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2682 } 2683 2684 static bool 2685 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2686 { 2687 return (list_empty(&ring->request_list) || 2688 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2689 } 2690 2691 static bool 2692 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2693 { 2694 if (INTEL_INFO(dev)->gen >= 8) { 2695 return (ipehr >> 23) == 0x1c; 2696 } else { 2697 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2698 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2699 MI_SEMAPHORE_REGISTER); 2700 } 2701 } 2702 2703 static struct intel_engine_cs * 2704 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2705 { 2706 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2707 struct intel_engine_cs *signaller; 2708 int i; 2709 2710 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2711 for_each_ring(signaller, dev_priv, i) { 2712 if (ring == signaller) 2713 continue; 2714 2715 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2716 return signaller; 2717 } 2718 } else { 2719 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2720 2721 for_each_ring(signaller, dev_priv, i) { 2722 if(ring == signaller) 2723 continue; 2724 2725 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2726 return signaller; 2727 } 2728 } 2729 2730 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n", 2731 ring->id, ipehr, offset); 2732 2733 return NULL; 2734 } 2735 2736 static struct intel_engine_cs * 2737 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2738 { 2739 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2740 u32 cmd, ipehr, head; 2741 u64 offset = 0; 2742 int i, backwards; 2743 2744 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2745 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2746 return NULL; 2747 2748 /* 2749 * HEAD is likely pointing to the dword after the actual command, 2750 * so scan backwards until we find the MBOX. But limit it to just 3 2751 * or 4 dwords depending on the semaphore wait command size. 2752 * Note that we don't care about ACTHD here since that might 2753 * point at at batch, and semaphores are always emitted into the 2754 * ringbuffer itself. 2755 */ 2756 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2757 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2758 2759 for (i = backwards; i; --i) { 2760 /* 2761 * Be paranoid and presume the hw has gone off into the wild - 2762 * our ring is smaller than what the hardware (and hence 2763 * HEAD_ADDR) allows. Also handles wrap-around. 2764 */ 2765 head &= ring->buffer->size - 1; 2766 2767 /* This here seems to blow up */ 2768 cmd = ioread32(ring->buffer->virtual_start + head); 2769 if (cmd == ipehr) 2770 break; 2771 2772 head -= 4; 2773 } 2774 2775 if (!i) 2776 return NULL; 2777 2778 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2779 if (INTEL_INFO(ring->dev)->gen >= 8) { 2780 offset = ioread32(ring->buffer->virtual_start + head + 12); 2781 offset <<= 32; 2782 offset = ioread32(ring->buffer->virtual_start + head + 8); 2783 } 2784 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2785 } 2786 2787 static int semaphore_passed(struct intel_engine_cs *ring) 2788 { 2789 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2790 struct intel_engine_cs *signaller; 2791 u32 seqno; 2792 2793 ring->hangcheck.deadlock++; 2794 2795 signaller = semaphore_waits_for(ring, &seqno); 2796 if (signaller == NULL) 2797 return -1; 2798 2799 /* Prevent pathological recursion due to driver bugs */ 2800 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2801 return -1; 2802 2803 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2804 return 1; 2805 2806 /* cursory check for an unkickable deadlock */ 2807 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2808 semaphore_passed(signaller) < 0) 2809 return -1; 2810 2811 return 0; 2812 } 2813 2814 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2815 { 2816 struct intel_engine_cs *ring; 2817 int i; 2818 2819 for_each_ring(ring, dev_priv, i) 2820 ring->hangcheck.deadlock = 0; 2821 } 2822 2823 static enum intel_ring_hangcheck_action 2824 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2825 { 2826 struct drm_device *dev = ring->dev; 2827 struct drm_i915_private *dev_priv = dev->dev_private; 2828 u32 tmp; 2829 2830 if (acthd != ring->hangcheck.acthd) { 2831 if (acthd > ring->hangcheck.max_acthd) { 2832 ring->hangcheck.max_acthd = acthd; 2833 return HANGCHECK_ACTIVE; 2834 } 2835 2836 return HANGCHECK_ACTIVE_LOOP; 2837 } 2838 2839 if (IS_GEN2(dev)) 2840 return HANGCHECK_HUNG; 2841 2842 /* Is the chip hanging on a WAIT_FOR_EVENT? 2843 * If so we can simply poke the RB_WAIT bit 2844 * and break the hang. This should work on 2845 * all but the second generation chipsets. 2846 */ 2847 tmp = I915_READ_CTL(ring); 2848 if (tmp & RING_WAIT) { 2849 i915_handle_error(dev, false, 2850 "Kicking stuck wait on %s", 2851 ring->name); 2852 I915_WRITE_CTL(ring, tmp); 2853 return HANGCHECK_KICK; 2854 } 2855 2856 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2857 switch (semaphore_passed(ring)) { 2858 default: 2859 return HANGCHECK_HUNG; 2860 case 1: 2861 i915_handle_error(dev, false, 2862 "Kicking stuck semaphore on %s", 2863 ring->name); 2864 I915_WRITE_CTL(ring, tmp); 2865 return HANGCHECK_KICK; 2866 case 0: 2867 return HANGCHECK_WAIT; 2868 } 2869 } 2870 2871 return HANGCHECK_HUNG; 2872 } 2873 2874 /* 2875 * This is called when the chip hasn't reported back with completed 2876 * batchbuffers in a long time. We keep track per ring seqno progress and 2877 * if there are no progress, hangcheck score for that ring is increased. 2878 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2879 * we kick the ring. If we see no progress on three subsequent calls 2880 * we assume chip is wedged and try to fix it by resetting the chip. 2881 */ 2882 static void i915_hangcheck_elapsed(struct work_struct *work) 2883 { 2884 struct drm_i915_private *dev_priv = 2885 container_of(work, typeof(*dev_priv), 2886 gpu_error.hangcheck_work.work); 2887 struct drm_device *dev = dev_priv->dev; 2888 struct intel_engine_cs *ring; 2889 int i; 2890 int busy_count = 0, rings_hung = 0; 2891 bool stuck[I915_NUM_RINGS] = { 0 }; 2892 #define BUSY 1 2893 #define KICK 5 2894 #define HUNG 20 2895 2896 if (!i915.enable_hangcheck) 2897 return; 2898 2899 for_each_ring(ring, dev_priv, i) { 2900 u64 acthd; 2901 u32 seqno; 2902 bool busy = true; 2903 2904 semaphore_clear_deadlocks(dev_priv); 2905 2906 seqno = ring->get_seqno(ring, false); 2907 acthd = intel_ring_get_active_head(ring); 2908 2909 if (ring->hangcheck.seqno == seqno) { 2910 if (ring_idle(ring, seqno)) { 2911 ring->hangcheck.action = HANGCHECK_IDLE; 2912 2913 if (waitqueue_active(&ring->irq_queue)) { 2914 /* Issue a wake-up to catch stuck h/w. */ 2915 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2916 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2917 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2918 ring->name); 2919 else 2920 DRM_INFO("Fake missed irq on %s\n", 2921 ring->name); 2922 wake_up_all(&ring->irq_queue); 2923 } 2924 /* Safeguard against driver failure */ 2925 ring->hangcheck.score += BUSY; 2926 } else 2927 busy = false; 2928 } else { 2929 /* We always increment the hangcheck score 2930 * if the ring is busy and still processing 2931 * the same request, so that no single request 2932 * can run indefinitely (such as a chain of 2933 * batches). The only time we do not increment 2934 * the hangcheck score on this ring, if this 2935 * ring is in a legitimate wait for another 2936 * ring. In that case the waiting ring is a 2937 * victim and we want to be sure we catch the 2938 * right culprit. Then every time we do kick 2939 * the ring, add a small increment to the 2940 * score so that we can catch a batch that is 2941 * being repeatedly kicked and so responsible 2942 * for stalling the machine. 2943 */ 2944 ring->hangcheck.action = ring_stuck(ring, 2945 acthd); 2946 2947 switch (ring->hangcheck.action) { 2948 case HANGCHECK_IDLE: 2949 case HANGCHECK_WAIT: 2950 case HANGCHECK_ACTIVE: 2951 break; 2952 case HANGCHECK_ACTIVE_LOOP: 2953 ring->hangcheck.score += BUSY; 2954 break; 2955 case HANGCHECK_KICK: 2956 ring->hangcheck.score += KICK; 2957 break; 2958 case HANGCHECK_HUNG: 2959 ring->hangcheck.score += HUNG; 2960 stuck[i] = true; 2961 break; 2962 } 2963 } 2964 } else { 2965 ring->hangcheck.action = HANGCHECK_ACTIVE; 2966 2967 /* Gradually reduce the count so that we catch DoS 2968 * attempts across multiple batches. 2969 */ 2970 if (ring->hangcheck.score > 0) 2971 ring->hangcheck.score--; 2972 2973 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 2974 } 2975 2976 ring->hangcheck.seqno = seqno; 2977 ring->hangcheck.acthd = acthd; 2978 busy_count += busy; 2979 } 2980 2981 for_each_ring(ring, dev_priv, i) { 2982 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2983 DRM_INFO("%s on %s\n", 2984 stuck[i] ? "stuck" : "no progress", 2985 ring->name); 2986 rings_hung++; 2987 } 2988 } 2989 2990 if (rings_hung) 2991 return i915_handle_error(dev, true, "Ring hung"); 2992 2993 if (busy_count) 2994 /* Reset timer case chip hangs without another request 2995 * being added */ 2996 i915_queue_hangcheck(dev); 2997 } 2998 2999 void i915_queue_hangcheck(struct drm_device *dev) 3000 { 3001 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3002 3003 if (!i915.enable_hangcheck) 3004 return; 3005 3006 /* Don't continually defer the hangcheck so that it is always run at 3007 * least once after work has been scheduled on any ring. Otherwise, 3008 * we will ignore a hung ring if a second ring is kept busy. 3009 */ 3010 3011 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3012 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3013 } 3014 3015 static void ibx_irq_reset(struct drm_device *dev) 3016 { 3017 struct drm_i915_private *dev_priv = dev->dev_private; 3018 3019 if (HAS_PCH_NOP(dev)) 3020 return; 3021 3022 GEN5_IRQ_RESET(SDE); 3023 3024 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3025 I915_WRITE(SERR_INT, 0xffffffff); 3026 } 3027 3028 /* 3029 * SDEIER is also touched by the interrupt handler to work around missed PCH 3030 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3031 * instead we unconditionally enable all PCH interrupt sources here, but then 3032 * only unmask them as needed with SDEIMR. 3033 * 3034 * This function needs to be called before interrupts are enabled. 3035 */ 3036 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3037 { 3038 struct drm_i915_private *dev_priv = dev->dev_private; 3039 3040 if (HAS_PCH_NOP(dev)) 3041 return; 3042 3043 WARN_ON(I915_READ(SDEIER) != 0); 3044 I915_WRITE(SDEIER, 0xffffffff); 3045 POSTING_READ(SDEIER); 3046 } 3047 3048 static void gen5_gt_irq_reset(struct drm_device *dev) 3049 { 3050 struct drm_i915_private *dev_priv = dev->dev_private; 3051 3052 GEN5_IRQ_RESET(GT); 3053 if (INTEL_INFO(dev)->gen >= 6) 3054 GEN5_IRQ_RESET(GEN6_PM); 3055 } 3056 3057 /* drm_dma.h hooks 3058 */ 3059 static void ironlake_irq_reset(struct drm_device *dev) 3060 { 3061 struct drm_i915_private *dev_priv = dev->dev_private; 3062 3063 I915_WRITE(HWSTAM, 0xffffffff); 3064 3065 GEN5_IRQ_RESET(DE); 3066 if (IS_GEN7(dev)) 3067 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3068 3069 gen5_gt_irq_reset(dev); 3070 3071 ibx_irq_reset(dev); 3072 } 3073 3074 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3075 { 3076 enum i915_pipe pipe; 3077 3078 I915_WRITE(PORT_HOTPLUG_EN, 0); 3079 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3080 3081 for_each_pipe(dev_priv, pipe) 3082 I915_WRITE(PIPESTAT(pipe), 0xffff); 3083 3084 GEN5_IRQ_RESET(VLV_); 3085 } 3086 3087 static void valleyview_irq_preinstall(struct drm_device *dev) 3088 { 3089 struct drm_i915_private *dev_priv = dev->dev_private; 3090 3091 /* VLV magic */ 3092 I915_WRITE(VLV_IMR, 0); 3093 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3094 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3095 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3096 3097 gen5_gt_irq_reset(dev); 3098 3099 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3100 3101 vlv_display_irq_reset(dev_priv); 3102 } 3103 3104 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3105 { 3106 GEN8_IRQ_RESET_NDX(GT, 0); 3107 GEN8_IRQ_RESET_NDX(GT, 1); 3108 GEN8_IRQ_RESET_NDX(GT, 2); 3109 GEN8_IRQ_RESET_NDX(GT, 3); 3110 } 3111 3112 static void gen8_irq_reset(struct drm_device *dev) 3113 { 3114 struct drm_i915_private *dev_priv = dev->dev_private; 3115 int pipe; 3116 3117 I915_WRITE(GEN8_MASTER_IRQ, 0); 3118 POSTING_READ(GEN8_MASTER_IRQ); 3119 3120 gen8_gt_irq_reset(dev_priv); 3121 3122 for_each_pipe(dev_priv, pipe) 3123 if (intel_display_power_is_enabled(dev_priv, 3124 POWER_DOMAIN_PIPE(pipe))) 3125 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3126 3127 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3128 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3129 GEN5_IRQ_RESET(GEN8_PCU_); 3130 3131 if (HAS_PCH_SPLIT(dev)) 3132 ibx_irq_reset(dev); 3133 } 3134 3135 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3136 unsigned int pipe_mask) 3137 { 3138 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3139 3140 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3141 if (pipe_mask & 1 << PIPE_A) 3142 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 3143 dev_priv->de_irq_mask[PIPE_A], 3144 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 3145 if (pipe_mask & 1 << PIPE_B) 3146 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 3147 dev_priv->de_irq_mask[PIPE_B], 3148 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3149 if (pipe_mask & 1 << PIPE_C) 3150 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 3151 dev_priv->de_irq_mask[PIPE_C], 3152 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3153 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3154 } 3155 3156 static void cherryview_irq_preinstall(struct drm_device *dev) 3157 { 3158 struct drm_i915_private *dev_priv = dev->dev_private; 3159 3160 I915_WRITE(GEN8_MASTER_IRQ, 0); 3161 POSTING_READ(GEN8_MASTER_IRQ); 3162 3163 gen8_gt_irq_reset(dev_priv); 3164 3165 GEN5_IRQ_RESET(GEN8_PCU_); 3166 3167 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3168 3169 vlv_display_irq_reset(dev_priv); 3170 } 3171 3172 static void ibx_hpd_irq_setup(struct drm_device *dev) 3173 { 3174 struct drm_i915_private *dev_priv = dev->dev_private; 3175 struct intel_encoder *intel_encoder; 3176 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3177 3178 if (HAS_PCH_IBX(dev)) { 3179 hotplug_irqs = SDE_HOTPLUG_MASK; 3180 for_each_intel_encoder(dev, intel_encoder) 3181 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3182 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3183 } else { 3184 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3185 for_each_intel_encoder(dev, intel_encoder) 3186 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3187 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3188 } 3189 3190 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3191 3192 /* 3193 * Enable digital hotplug on the PCH, and configure the DP short pulse 3194 * duration to 2ms (which is the minimum in the Display Port spec) 3195 * 3196 * This register is the same on all known PCH chips. 3197 */ 3198 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3199 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3200 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3201 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3202 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3203 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3204 } 3205 3206 static void bxt_hpd_irq_setup(struct drm_device *dev) 3207 { 3208 struct drm_i915_private *dev_priv = dev->dev_private; 3209 struct intel_encoder *intel_encoder; 3210 u32 hotplug_port = 0; 3211 u32 hotplug_ctrl; 3212 3213 /* Now, enable HPD */ 3214 for_each_intel_encoder(dev, intel_encoder) { 3215 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark 3216 == HPD_ENABLED) 3217 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3218 } 3219 3220 /* Mask all HPD control bits */ 3221 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3222 3223 /* Enable requested port in hotplug control */ 3224 /* TODO: implement (short) HPD support on port A */ 3225 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); 3226 if (hotplug_port & BXT_DE_PORT_HP_DDIB) 3227 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; 3228 if (hotplug_port & BXT_DE_PORT_HP_DDIC) 3229 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; 3230 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); 3231 3232 /* Unmask DDI hotplug in IMR */ 3233 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; 3234 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); 3235 3236 /* Enable DDI hotplug in IER */ 3237 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3238 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3239 POSTING_READ(GEN8_DE_PORT_IER); 3240 } 3241 3242 static void ibx_irq_postinstall(struct drm_device *dev) 3243 { 3244 struct drm_i915_private *dev_priv = dev->dev_private; 3245 u32 mask; 3246 3247 if (HAS_PCH_NOP(dev)) 3248 return; 3249 3250 if (HAS_PCH_IBX(dev)) 3251 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3252 else 3253 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3254 3255 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3256 I915_WRITE(SDEIMR, ~mask); 3257 } 3258 3259 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3260 { 3261 struct drm_i915_private *dev_priv = dev->dev_private; 3262 u32 pm_irqs, gt_irqs; 3263 3264 pm_irqs = gt_irqs = 0; 3265 3266 dev_priv->gt_irq_mask = ~0; 3267 if (HAS_L3_DPF(dev)) { 3268 /* L3 parity interrupt is always unmasked. */ 3269 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3270 gt_irqs |= GT_PARITY_ERROR(dev); 3271 } 3272 3273 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3274 if (IS_GEN5(dev)) { 3275 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3276 ILK_BSD_USER_INTERRUPT; 3277 } else { 3278 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3279 } 3280 3281 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3282 3283 if (INTEL_INFO(dev)->gen >= 6) { 3284 /* 3285 * RPS interrupts will get enabled/disabled on demand when RPS 3286 * itself is enabled/disabled. 3287 */ 3288 if (HAS_VEBOX(dev)) 3289 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3290 3291 dev_priv->pm_irq_mask = 0xffffffff; 3292 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3293 } 3294 } 3295 3296 static int ironlake_irq_postinstall(struct drm_device *dev) 3297 { 3298 struct drm_i915_private *dev_priv = dev->dev_private; 3299 u32 display_mask, extra_mask; 3300 3301 if (INTEL_INFO(dev)->gen >= 7) { 3302 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3303 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3304 DE_PLANEB_FLIP_DONE_IVB | 3305 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3306 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3307 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3308 } else { 3309 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3310 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3311 DE_AUX_CHANNEL_A | 3312 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3313 DE_POISON); 3314 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3315 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3316 } 3317 3318 dev_priv->irq_mask = ~display_mask; 3319 3320 I915_WRITE(HWSTAM, 0xeffe); 3321 3322 ibx_irq_pre_postinstall(dev); 3323 3324 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3325 3326 gen5_gt_irq_postinstall(dev); 3327 3328 ibx_irq_postinstall(dev); 3329 3330 if (IS_IRONLAKE_M(dev)) { 3331 /* Enable PCU event interrupts 3332 * 3333 * spinlocking not required here for correctness since interrupt 3334 * setup is guaranteed to run in single-threaded context. But we 3335 * need it to make the assert_spin_locked happy. */ 3336 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3337 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3338 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3339 } 3340 3341 return 0; 3342 } 3343 3344 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3345 { 3346 u32 pipestat_mask; 3347 u32 iir_mask; 3348 enum i915_pipe pipe; 3349 3350 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3351 PIPE_FIFO_UNDERRUN_STATUS; 3352 3353 for_each_pipe(dev_priv, pipe) 3354 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3355 POSTING_READ(PIPESTAT(PIPE_A)); 3356 3357 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3358 PIPE_CRC_DONE_INTERRUPT_STATUS; 3359 3360 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3361 for_each_pipe(dev_priv, pipe) 3362 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3363 3364 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3365 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3366 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3367 if (IS_CHERRYVIEW(dev_priv)) 3368 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3369 dev_priv->irq_mask &= ~iir_mask; 3370 3371 I915_WRITE(VLV_IIR, iir_mask); 3372 I915_WRITE(VLV_IIR, iir_mask); 3373 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3374 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3375 POSTING_READ(VLV_IMR); 3376 } 3377 3378 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3379 { 3380 u32 pipestat_mask; 3381 u32 iir_mask; 3382 enum i915_pipe pipe; 3383 3384 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3385 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3386 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3387 if (IS_CHERRYVIEW(dev_priv)) 3388 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3389 3390 dev_priv->irq_mask |= iir_mask; 3391 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3392 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3393 I915_WRITE(VLV_IIR, iir_mask); 3394 I915_WRITE(VLV_IIR, iir_mask); 3395 POSTING_READ(VLV_IIR); 3396 3397 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3398 PIPE_CRC_DONE_INTERRUPT_STATUS; 3399 3400 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3401 for_each_pipe(dev_priv, pipe) 3402 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3403 3404 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3405 PIPE_FIFO_UNDERRUN_STATUS; 3406 3407 for_each_pipe(dev_priv, pipe) 3408 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3409 POSTING_READ(PIPESTAT(PIPE_A)); 3410 } 3411 3412 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3413 { 3414 assert_spin_locked(&dev_priv->irq_lock); 3415 3416 if (dev_priv->display_irqs_enabled) 3417 return; 3418 3419 dev_priv->display_irqs_enabled = true; 3420 3421 if (intel_irqs_enabled(dev_priv)) 3422 valleyview_display_irqs_install(dev_priv); 3423 } 3424 3425 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3426 { 3427 assert_spin_locked(&dev_priv->irq_lock); 3428 3429 if (!dev_priv->display_irqs_enabled) 3430 return; 3431 3432 dev_priv->display_irqs_enabled = false; 3433 3434 if (intel_irqs_enabled(dev_priv)) 3435 valleyview_display_irqs_uninstall(dev_priv); 3436 } 3437 3438 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3439 { 3440 dev_priv->irq_mask = ~0; 3441 3442 I915_WRITE(PORT_HOTPLUG_EN, 0); 3443 POSTING_READ(PORT_HOTPLUG_EN); 3444 3445 I915_WRITE(VLV_IIR, 0xffffffff); 3446 I915_WRITE(VLV_IIR, 0xffffffff); 3447 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3448 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3449 POSTING_READ(VLV_IMR); 3450 3451 /* Interrupt setup is already guaranteed to be single-threaded, this is 3452 * just to make the assert_spin_locked check happy. */ 3453 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3454 if (dev_priv->display_irqs_enabled) 3455 valleyview_display_irqs_install(dev_priv); 3456 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3457 } 3458 3459 static int valleyview_irq_postinstall(struct drm_device *dev) 3460 { 3461 struct drm_i915_private *dev_priv = dev->dev_private; 3462 3463 vlv_display_irq_postinstall(dev_priv); 3464 3465 gen5_gt_irq_postinstall(dev); 3466 3467 /* ack & enable invalid PTE error interrupts */ 3468 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3469 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3470 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3471 #endif 3472 3473 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3474 3475 return 0; 3476 } 3477 3478 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3479 { 3480 /* These are interrupts we'll toggle with the ring mask register */ 3481 uint32_t gt_interrupts[] = { 3482 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3483 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3484 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3485 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3486 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3487 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3488 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3489 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3490 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3491 0, 3492 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3493 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3494 }; 3495 3496 dev_priv->pm_irq_mask = 0xffffffff; 3497 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3498 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3499 /* 3500 * RPS interrupts will get enabled/disabled on demand when RPS itself 3501 * is enabled/disabled. 3502 */ 3503 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3504 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3505 } 3506 3507 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3508 { 3509 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3510 uint32_t de_pipe_enables; 3511 int pipe; 3512 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3513 3514 if (IS_GEN9(dev_priv)) { 3515 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3516 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3517 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3518 GEN9_AUX_CHANNEL_D; 3519 3520 if (IS_BROXTON(dev_priv)) 3521 de_port_en |= BXT_DE_PORT_GMBUS; 3522 } else 3523 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3524 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3525 3526 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3527 GEN8_PIPE_FIFO_UNDERRUN; 3528 3529 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3530 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3531 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3532 3533 for_each_pipe(dev_priv, pipe) 3534 if (intel_display_power_is_enabled(dev_priv, 3535 POWER_DOMAIN_PIPE(pipe))) 3536 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3537 dev_priv->de_irq_mask[pipe], 3538 de_pipe_enables); 3539 3540 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3541 } 3542 3543 static int gen8_irq_postinstall(struct drm_device *dev) 3544 { 3545 struct drm_i915_private *dev_priv = dev->dev_private; 3546 3547 if (HAS_PCH_SPLIT(dev)) 3548 ibx_irq_pre_postinstall(dev); 3549 3550 gen8_gt_irq_postinstall(dev_priv); 3551 gen8_de_irq_postinstall(dev_priv); 3552 3553 if (HAS_PCH_SPLIT(dev)) 3554 ibx_irq_postinstall(dev); 3555 3556 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3557 POSTING_READ(GEN8_MASTER_IRQ); 3558 3559 return 0; 3560 } 3561 3562 static int cherryview_irq_postinstall(struct drm_device *dev) 3563 { 3564 struct drm_i915_private *dev_priv = dev->dev_private; 3565 3566 vlv_display_irq_postinstall(dev_priv); 3567 3568 gen8_gt_irq_postinstall(dev_priv); 3569 3570 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3571 POSTING_READ(GEN8_MASTER_IRQ); 3572 3573 return 0; 3574 } 3575 3576 static void gen8_irq_uninstall(struct drm_device *dev) 3577 { 3578 struct drm_i915_private *dev_priv = dev->dev_private; 3579 3580 if (!dev_priv) 3581 return; 3582 3583 gen8_irq_reset(dev); 3584 } 3585 3586 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3587 { 3588 /* Interrupt setup is already guaranteed to be single-threaded, this is 3589 * just to make the assert_spin_locked check happy. */ 3590 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3591 if (dev_priv->display_irqs_enabled) 3592 valleyview_display_irqs_uninstall(dev_priv); 3593 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3594 3595 vlv_display_irq_reset(dev_priv); 3596 3597 dev_priv->irq_mask = ~0; 3598 } 3599 3600 static void valleyview_irq_uninstall(struct drm_device *dev) 3601 { 3602 struct drm_i915_private *dev_priv = dev->dev_private; 3603 3604 if (!dev_priv) 3605 return; 3606 3607 I915_WRITE(VLV_MASTER_IER, 0); 3608 3609 gen5_gt_irq_reset(dev); 3610 3611 I915_WRITE(HWSTAM, 0xffffffff); 3612 3613 vlv_display_irq_uninstall(dev_priv); 3614 } 3615 3616 static void cherryview_irq_uninstall(struct drm_device *dev) 3617 { 3618 struct drm_i915_private *dev_priv = dev->dev_private; 3619 3620 if (!dev_priv) 3621 return; 3622 3623 I915_WRITE(GEN8_MASTER_IRQ, 0); 3624 POSTING_READ(GEN8_MASTER_IRQ); 3625 3626 gen8_gt_irq_reset(dev_priv); 3627 3628 GEN5_IRQ_RESET(GEN8_PCU_); 3629 3630 vlv_display_irq_uninstall(dev_priv); 3631 } 3632 3633 static void ironlake_irq_uninstall(struct drm_device *dev) 3634 { 3635 struct drm_i915_private *dev_priv = dev->dev_private; 3636 3637 if (!dev_priv) 3638 return; 3639 3640 ironlake_irq_reset(dev); 3641 } 3642 3643 static void i8xx_irq_preinstall(struct drm_device * dev) 3644 { 3645 struct drm_i915_private *dev_priv = dev->dev_private; 3646 int pipe; 3647 3648 for_each_pipe(dev_priv, pipe) 3649 I915_WRITE(PIPESTAT(pipe), 0); 3650 I915_WRITE16(IMR, 0xffff); 3651 I915_WRITE16(IER, 0x0); 3652 POSTING_READ16(IER); 3653 } 3654 3655 static int i8xx_irq_postinstall(struct drm_device *dev) 3656 { 3657 struct drm_i915_private *dev_priv = dev->dev_private; 3658 3659 I915_WRITE16(EMR, 3660 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3661 3662 /* Unmask the interrupts that we always want on. */ 3663 dev_priv->irq_mask = 3664 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3665 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3666 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3667 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3668 I915_WRITE16(IMR, dev_priv->irq_mask); 3669 3670 I915_WRITE16(IER, 3671 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3672 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3673 I915_USER_INTERRUPT); 3674 POSTING_READ16(IER); 3675 3676 /* Interrupt setup is already guaranteed to be single-threaded, this is 3677 * just to make the assert_spin_locked check happy. */ 3678 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3679 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3680 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3681 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3682 3683 return 0; 3684 } 3685 3686 /* 3687 * Returns true when a page flip has completed. 3688 */ 3689 static bool i8xx_handle_vblank(struct drm_device *dev, 3690 int plane, int pipe, u32 iir) 3691 { 3692 struct drm_i915_private *dev_priv = dev->dev_private; 3693 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3694 3695 if (!intel_pipe_handle_vblank(dev, pipe)) 3696 return false; 3697 3698 if ((iir & flip_pending) == 0) 3699 goto check_page_flip; 3700 3701 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3702 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3703 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3704 * the flip is completed (no longer pending). Since this doesn't raise 3705 * an interrupt per se, we watch for the change at vblank. 3706 */ 3707 if (I915_READ16(ISR) & flip_pending) 3708 goto check_page_flip; 3709 3710 intel_prepare_page_flip(dev, plane); 3711 intel_finish_page_flip(dev, pipe); 3712 return true; 3713 3714 check_page_flip: 3715 intel_check_page_flip(dev, pipe); 3716 return false; 3717 } 3718 3719 static irqreturn_t i8xx_irq_handler(void *arg) 3720 { 3721 struct drm_device *dev = arg; 3722 struct drm_i915_private *dev_priv = dev->dev_private; 3723 u16 iir, new_iir; 3724 u32 pipe_stats[2]; 3725 int pipe; 3726 u16 flip_mask = 3727 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3728 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3729 3730 if (!intel_irqs_enabled(dev_priv)) 3731 return IRQ_NONE; 3732 3733 iir = I915_READ16(IIR); 3734 if (iir == 0) 3735 return; 3736 3737 while (iir & ~flip_mask) { 3738 /* Can't rely on pipestat interrupt bit in iir as it might 3739 * have been cleared after the pipestat interrupt was received. 3740 * It doesn't set the bit in iir again, but it still produces 3741 * interrupts (for non-MSI). 3742 */ 3743 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3744 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3745 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3746 3747 for_each_pipe(dev_priv, pipe) { 3748 int reg = PIPESTAT(pipe); 3749 pipe_stats[pipe] = I915_READ(reg); 3750 3751 /* 3752 * Clear the PIPE*STAT regs before the IIR 3753 */ 3754 if (pipe_stats[pipe] & 0x8000ffff) 3755 I915_WRITE(reg, pipe_stats[pipe]); 3756 } 3757 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3758 3759 I915_WRITE16(IIR, iir & ~flip_mask); 3760 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3761 3762 if (iir & I915_USER_INTERRUPT) 3763 notify_ring(&dev_priv->ring[RCS]); 3764 3765 for_each_pipe(dev_priv, pipe) { 3766 int plane = pipe; 3767 if (HAS_FBC(dev)) 3768 plane = !plane; 3769 3770 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3771 i8xx_handle_vblank(dev, plane, pipe, iir)) 3772 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3773 3774 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3775 i9xx_pipe_crc_irq_handler(dev, pipe); 3776 3777 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3778 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3779 pipe); 3780 } 3781 3782 iir = new_iir; 3783 } 3784 3785 } 3786 3787 static void i8xx_irq_uninstall(struct drm_device * dev) 3788 { 3789 struct drm_i915_private *dev_priv = dev->dev_private; 3790 int pipe; 3791 3792 for_each_pipe(dev_priv, pipe) { 3793 /* Clear enable bits; then clear status bits */ 3794 I915_WRITE(PIPESTAT(pipe), 0); 3795 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3796 } 3797 I915_WRITE16(IMR, 0xffff); 3798 I915_WRITE16(IER, 0x0); 3799 I915_WRITE16(IIR, I915_READ16(IIR)); 3800 } 3801 3802 static void i915_irq_preinstall(struct drm_device * dev) 3803 { 3804 struct drm_i915_private *dev_priv = dev->dev_private; 3805 int pipe; 3806 3807 if (I915_HAS_HOTPLUG(dev)) { 3808 I915_WRITE(PORT_HOTPLUG_EN, 0); 3809 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3810 } 3811 3812 I915_WRITE16(HWSTAM, 0xeffe); 3813 for_each_pipe(dev_priv, pipe) 3814 I915_WRITE(PIPESTAT(pipe), 0); 3815 I915_WRITE(IMR, 0xffffffff); 3816 I915_WRITE(IER, 0x0); 3817 POSTING_READ(IER); 3818 } 3819 3820 static int i915_irq_postinstall(struct drm_device *dev) 3821 { 3822 struct drm_i915_private *dev_priv = dev->dev_private; 3823 u32 enable_mask; 3824 3825 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3826 3827 /* Unmask the interrupts that we always want on. */ 3828 dev_priv->irq_mask = 3829 ~(I915_ASLE_INTERRUPT | 3830 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3831 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3832 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3833 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3834 3835 enable_mask = 3836 I915_ASLE_INTERRUPT | 3837 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3838 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3839 I915_USER_INTERRUPT; 3840 3841 if (I915_HAS_HOTPLUG(dev)) { 3842 I915_WRITE(PORT_HOTPLUG_EN, 0); 3843 POSTING_READ(PORT_HOTPLUG_EN); 3844 3845 /* Enable in IER... */ 3846 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3847 /* and unmask in IMR */ 3848 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3849 } 3850 3851 I915_WRITE(IMR, dev_priv->irq_mask); 3852 I915_WRITE(IER, enable_mask); 3853 POSTING_READ(IER); 3854 3855 i915_enable_asle_pipestat(dev); 3856 3857 /* Interrupt setup is already guaranteed to be single-threaded, this is 3858 * just to make the assert_spin_locked check happy. */ 3859 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3860 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3861 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3862 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3863 3864 return 0; 3865 } 3866 3867 /* 3868 * Returns true when a page flip has completed. 3869 */ 3870 static bool i915_handle_vblank(struct drm_device *dev, 3871 int plane, int pipe, u32 iir) 3872 { 3873 struct drm_i915_private *dev_priv = dev->dev_private; 3874 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3875 3876 if (!intel_pipe_handle_vblank(dev, pipe)) 3877 return false; 3878 3879 if ((iir & flip_pending) == 0) 3880 goto check_page_flip; 3881 3882 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3883 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3884 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3885 * the flip is completed (no longer pending). Since this doesn't raise 3886 * an interrupt per se, we watch for the change at vblank. 3887 */ 3888 if (I915_READ(ISR) & flip_pending) 3889 goto check_page_flip; 3890 3891 intel_prepare_page_flip(dev, plane); 3892 intel_finish_page_flip(dev, pipe); 3893 return true; 3894 3895 check_page_flip: 3896 intel_check_page_flip(dev, pipe); 3897 return false; 3898 } 3899 3900 static irqreturn_t i915_irq_handler(void *arg) 3901 { 3902 struct drm_device *dev = arg; 3903 struct drm_i915_private *dev_priv = dev->dev_private; 3904 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3905 u32 flip_mask = 3906 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3907 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3908 int pipe; 3909 3910 if (!intel_irqs_enabled(dev_priv)) 3911 return IRQ_NONE; 3912 3913 iir = I915_READ(IIR); 3914 do { 3915 bool irq_received = (iir & ~flip_mask) != 0; 3916 bool blc_event = false; 3917 3918 /* Can't rely on pipestat interrupt bit in iir as it might 3919 * have been cleared after the pipestat interrupt was received. 3920 * It doesn't set the bit in iir again, but it still produces 3921 * interrupts (for non-MSI). 3922 */ 3923 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3924 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3925 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3926 3927 for_each_pipe(dev_priv, pipe) { 3928 int reg = PIPESTAT(pipe); 3929 pipe_stats[pipe] = I915_READ(reg); 3930 3931 /* Clear the PIPE*STAT regs before the IIR */ 3932 if (pipe_stats[pipe] & 0x8000ffff) { 3933 I915_WRITE(reg, pipe_stats[pipe]); 3934 irq_received = true; 3935 } 3936 } 3937 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3938 3939 if (!irq_received) 3940 break; 3941 3942 /* Consume port. Then clear IIR or we'll miss events */ 3943 if (I915_HAS_HOTPLUG(dev) && 3944 iir & I915_DISPLAY_PORT_INTERRUPT) 3945 i9xx_hpd_irq_handler(dev); 3946 3947 I915_WRITE(IIR, iir & ~flip_mask); 3948 new_iir = I915_READ(IIR); /* Flush posted writes */ 3949 3950 if (iir & I915_USER_INTERRUPT) 3951 notify_ring(&dev_priv->ring[RCS]); 3952 3953 for_each_pipe(dev_priv, pipe) { 3954 int plane = pipe; 3955 if (HAS_FBC(dev)) 3956 plane = !plane; 3957 3958 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3959 i915_handle_vblank(dev, plane, pipe, iir)) 3960 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3961 3962 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3963 blc_event = true; 3964 3965 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3966 i9xx_pipe_crc_irq_handler(dev, pipe); 3967 3968 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3969 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3970 pipe); 3971 } 3972 3973 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3974 intel_opregion_asle_intr(dev); 3975 3976 /* With MSI, interrupts are only generated when iir 3977 * transitions from zero to nonzero. If another bit got 3978 * set while we were handling the existing iir bits, then 3979 * we would never get another interrupt. 3980 * 3981 * This is fine on non-MSI as well, as if we hit this path 3982 * we avoid exiting the interrupt handler only to generate 3983 * another one. 3984 * 3985 * Note that for MSI this could cause a stray interrupt report 3986 * if an interrupt landed in the time between writing IIR and 3987 * the posting read. This should be rare enough to never 3988 * trigger the 99% of 100,000 interrupts test for disabling 3989 * stray interrupts. 3990 */ 3991 iir = new_iir; 3992 } while (iir & ~flip_mask); 3993 3994 } 3995 3996 static void i915_irq_uninstall(struct drm_device * dev) 3997 { 3998 struct drm_i915_private *dev_priv = dev->dev_private; 3999 int pipe; 4000 4001 if (I915_HAS_HOTPLUG(dev)) { 4002 I915_WRITE(PORT_HOTPLUG_EN, 0); 4003 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4004 } 4005 4006 I915_WRITE16(HWSTAM, 0xffff); 4007 for_each_pipe(dev_priv, pipe) { 4008 /* Clear enable bits; then clear status bits */ 4009 I915_WRITE(PIPESTAT(pipe), 0); 4010 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4011 } 4012 I915_WRITE(IMR, 0xffffffff); 4013 I915_WRITE(IER, 0x0); 4014 4015 I915_WRITE(IIR, I915_READ(IIR)); 4016 } 4017 4018 static void i965_irq_preinstall(struct drm_device * dev) 4019 { 4020 struct drm_i915_private *dev_priv = dev->dev_private; 4021 int pipe; 4022 4023 I915_WRITE(PORT_HOTPLUG_EN, 0); 4024 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4025 4026 I915_WRITE(HWSTAM, 0xeffe); 4027 for_each_pipe(dev_priv, pipe) 4028 I915_WRITE(PIPESTAT(pipe), 0); 4029 I915_WRITE(IMR, 0xffffffff); 4030 I915_WRITE(IER, 0x0); 4031 POSTING_READ(IER); 4032 } 4033 4034 static int i965_irq_postinstall(struct drm_device *dev) 4035 { 4036 struct drm_i915_private *dev_priv = dev->dev_private; 4037 u32 enable_mask; 4038 u32 error_mask; 4039 4040 /* Unmask the interrupts that we always want on. */ 4041 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4042 I915_DISPLAY_PORT_INTERRUPT | 4043 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4044 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4045 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4046 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4047 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4048 4049 enable_mask = ~dev_priv->irq_mask; 4050 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4051 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4052 enable_mask |= I915_USER_INTERRUPT; 4053 4054 if (IS_G4X(dev)) 4055 enable_mask |= I915_BSD_USER_INTERRUPT; 4056 4057 /* Interrupt setup is already guaranteed to be single-threaded, this is 4058 * just to make the assert_spin_locked check happy. */ 4059 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4060 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4061 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4062 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4063 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4064 4065 /* 4066 * Enable some error detection, note the instruction error mask 4067 * bit is reserved, so we leave it masked. 4068 */ 4069 if (IS_G4X(dev)) { 4070 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4071 GM45_ERROR_MEM_PRIV | 4072 GM45_ERROR_CP_PRIV | 4073 I915_ERROR_MEMORY_REFRESH); 4074 } else { 4075 error_mask = ~(I915_ERROR_PAGE_TABLE | 4076 I915_ERROR_MEMORY_REFRESH); 4077 } 4078 I915_WRITE(EMR, error_mask); 4079 4080 I915_WRITE(IMR, dev_priv->irq_mask); 4081 I915_WRITE(IER, enable_mask); 4082 POSTING_READ(IER); 4083 4084 I915_WRITE(PORT_HOTPLUG_EN, 0); 4085 POSTING_READ(PORT_HOTPLUG_EN); 4086 4087 i915_enable_asle_pipestat(dev); 4088 4089 return 0; 4090 } 4091 4092 static void i915_hpd_irq_setup(struct drm_device *dev) 4093 { 4094 struct drm_i915_private *dev_priv = dev->dev_private; 4095 struct intel_encoder *intel_encoder; 4096 u32 hotplug_en; 4097 4098 assert_spin_locked(&dev_priv->irq_lock); 4099 4100 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4101 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4102 /* Note HDMI and DP share hotplug bits */ 4103 /* enable bits are the same for all generations */ 4104 for_each_intel_encoder(dev, intel_encoder) 4105 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4106 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4107 /* Programming the CRT detection parameters tends 4108 to generate a spurious hotplug event about three 4109 seconds later. So just do it once. 4110 */ 4111 if (IS_G4X(dev)) 4112 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4113 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4114 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4115 4116 /* Ignore TV since it's buggy */ 4117 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4118 } 4119 4120 static irqreturn_t i965_irq_handler(void *arg) 4121 { 4122 struct drm_device *dev = arg; 4123 struct drm_i915_private *dev_priv = dev->dev_private; 4124 u32 iir, new_iir; 4125 u32 pipe_stats[I915_MAX_PIPES]; 4126 int pipe; 4127 u32 flip_mask = 4128 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4129 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4130 4131 if (!intel_irqs_enabled(dev_priv)) 4132 return IRQ_NONE; 4133 4134 iir = I915_READ(IIR); 4135 4136 for (;;) { 4137 bool irq_received = (iir & ~flip_mask) != 0; 4138 bool blc_event = false; 4139 4140 /* Can't rely on pipestat interrupt bit in iir as it might 4141 * have been cleared after the pipestat interrupt was received. 4142 * It doesn't set the bit in iir again, but it still produces 4143 * interrupts (for non-MSI). 4144 */ 4145 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4146 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4147 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4148 4149 for_each_pipe(dev_priv, pipe) { 4150 int reg = PIPESTAT(pipe); 4151 pipe_stats[pipe] = I915_READ(reg); 4152 4153 /* 4154 * Clear the PIPE*STAT regs before the IIR 4155 */ 4156 if (pipe_stats[pipe] & 0x8000ffff) { 4157 I915_WRITE(reg, pipe_stats[pipe]); 4158 irq_received = true; 4159 } 4160 } 4161 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4162 4163 if (!irq_received) 4164 break; 4165 4166 /* Consume port. Then clear IIR or we'll miss events */ 4167 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4168 i9xx_hpd_irq_handler(dev); 4169 4170 I915_WRITE(IIR, iir & ~flip_mask); 4171 new_iir = I915_READ(IIR); /* Flush posted writes */ 4172 4173 if (iir & I915_USER_INTERRUPT) 4174 notify_ring(&dev_priv->ring[RCS]); 4175 if (iir & I915_BSD_USER_INTERRUPT) 4176 notify_ring(&dev_priv->ring[VCS]); 4177 4178 for_each_pipe(dev_priv, pipe) { 4179 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4180 i915_handle_vblank(dev, pipe, pipe, iir)) 4181 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4182 4183 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4184 blc_event = true; 4185 4186 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4187 i9xx_pipe_crc_irq_handler(dev, pipe); 4188 4189 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4190 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4191 } 4192 4193 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4194 intel_opregion_asle_intr(dev); 4195 4196 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4197 gmbus_irq_handler(dev); 4198 4199 /* With MSI, interrupts are only generated when iir 4200 * transitions from zero to nonzero. If another bit got 4201 * set while we were handling the existing iir bits, then 4202 * we would never get another interrupt. 4203 * 4204 * This is fine on non-MSI as well, as if we hit this path 4205 * we avoid exiting the interrupt handler only to generate 4206 * another one. 4207 * 4208 * Note that for MSI this could cause a stray interrupt report 4209 * if an interrupt landed in the time between writing IIR and 4210 * the posting read. This should be rare enough to never 4211 * trigger the 99% of 100,000 interrupts test for disabling 4212 * stray interrupts. 4213 */ 4214 iir = new_iir; 4215 } 4216 4217 } 4218 4219 static void i965_irq_uninstall(struct drm_device * dev) 4220 { 4221 struct drm_i915_private *dev_priv = dev->dev_private; 4222 int pipe; 4223 4224 if (!dev_priv) 4225 return; 4226 4227 I915_WRITE(PORT_HOTPLUG_EN, 0); 4228 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4229 4230 I915_WRITE(HWSTAM, 0xffffffff); 4231 for_each_pipe(dev_priv, pipe) 4232 I915_WRITE(PIPESTAT(pipe), 0); 4233 I915_WRITE(IMR, 0xffffffff); 4234 I915_WRITE(IER, 0x0); 4235 4236 for_each_pipe(dev_priv, pipe) 4237 I915_WRITE(PIPESTAT(pipe), 4238 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4239 I915_WRITE(IIR, I915_READ(IIR)); 4240 } 4241 4242 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4243 { 4244 struct drm_i915_private *dev_priv = 4245 container_of(work, typeof(*dev_priv), 4246 hotplug_reenable_work.work); 4247 struct drm_device *dev = dev_priv->dev; 4248 struct drm_mode_config *mode_config = &dev->mode_config; 4249 int i; 4250 4251 intel_runtime_pm_get(dev_priv); 4252 4253 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4254 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4255 struct drm_connector *connector; 4256 4257 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4258 continue; 4259 4260 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4261 4262 list_for_each_entry(connector, &mode_config->connector_list, head) { 4263 struct intel_connector *intel_connector = to_intel_connector(connector); 4264 4265 if (intel_connector->encoder->hpd_pin == i) { 4266 if (connector->polled != intel_connector->polled) 4267 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4268 connector->name); 4269 connector->polled = intel_connector->polled; 4270 if (!connector->polled) 4271 connector->polled = DRM_CONNECTOR_POLL_HPD; 4272 } 4273 } 4274 } 4275 if (dev_priv->display.hpd_irq_setup) 4276 dev_priv->display.hpd_irq_setup(dev); 4277 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4278 4279 intel_runtime_pm_put(dev_priv); 4280 } 4281 4282 /** 4283 * intel_irq_init - initializes irq support 4284 * @dev_priv: i915 device instance 4285 * 4286 * This function initializes all the irq support including work items, timers 4287 * and all the vtables. It does not setup the interrupt itself though. 4288 */ 4289 void intel_irq_init(struct drm_i915_private *dev_priv) 4290 { 4291 struct drm_device *dev = dev_priv->dev; 4292 4293 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4294 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4295 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4296 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4297 4298 /* Let's track the enabled rps events */ 4299 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4300 /* WaGsvRC0ResidencyMethod:vlv */ 4301 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4302 else 4303 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4304 4305 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4306 i915_hangcheck_elapsed); 4307 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4308 intel_hpd_irq_reenable_work); 4309 4310 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4311 4312 if (IS_GEN2(dev_priv)) { 4313 dev->max_vblank_count = 0; 4314 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4315 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4316 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4317 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4318 } else { 4319 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4320 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4321 } 4322 4323 /* 4324 * Opt out of the vblank disable timer on everything except gen2. 4325 * Gen2 doesn't have a hardware frame counter and so depends on 4326 * vblank interrupts to produce sane vblank seuquence numbers. 4327 */ 4328 if (!IS_GEN2(dev_priv)) 4329 dev->vblank_disable_immediate = true; 4330 4331 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4332 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4333 4334 if (IS_CHERRYVIEW(dev_priv)) { 4335 dev->driver->irq_handler = cherryview_irq_handler; 4336 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4337 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4338 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4339 dev->driver->enable_vblank = valleyview_enable_vblank; 4340 dev->driver->disable_vblank = valleyview_disable_vblank; 4341 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4342 } else if (IS_VALLEYVIEW(dev_priv)) { 4343 dev->driver->irq_handler = valleyview_irq_handler; 4344 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4345 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4346 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4347 dev->driver->enable_vblank = valleyview_enable_vblank; 4348 dev->driver->disable_vblank = valleyview_disable_vblank; 4349 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4350 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4351 dev->driver->irq_handler = gen8_irq_handler; 4352 dev->driver->irq_preinstall = gen8_irq_reset; 4353 dev->driver->irq_postinstall = gen8_irq_postinstall; 4354 dev->driver->irq_uninstall = gen8_irq_uninstall; 4355 dev->driver->enable_vblank = gen8_enable_vblank; 4356 dev->driver->disable_vblank = gen8_disable_vblank; 4357 if (HAS_PCH_SPLIT(dev)) 4358 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4359 else 4360 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4361 } else if (HAS_PCH_SPLIT(dev)) { 4362 dev->driver->irq_handler = ironlake_irq_handler; 4363 dev->driver->irq_preinstall = ironlake_irq_reset; 4364 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4365 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4366 dev->driver->enable_vblank = ironlake_enable_vblank; 4367 dev->driver->disable_vblank = ironlake_disable_vblank; 4368 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4369 } else { 4370 if (INTEL_INFO(dev_priv)->gen == 2) { 4371 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4372 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4373 dev->driver->irq_handler = i8xx_irq_handler; 4374 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4375 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4376 dev->driver->irq_preinstall = i915_irq_preinstall; 4377 dev->driver->irq_postinstall = i915_irq_postinstall; 4378 dev->driver->irq_uninstall = i915_irq_uninstall; 4379 dev->driver->irq_handler = i915_irq_handler; 4380 } else { 4381 dev->driver->irq_preinstall = i965_irq_preinstall; 4382 dev->driver->irq_postinstall = i965_irq_postinstall; 4383 dev->driver->irq_uninstall = i965_irq_uninstall; 4384 dev->driver->irq_handler = i965_irq_handler; 4385 } 4386 if (I915_HAS_HOTPLUG(dev_priv)) 4387 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4388 dev->driver->enable_vblank = i915_enable_vblank; 4389 dev->driver->disable_vblank = i915_disable_vblank; 4390 } 4391 } 4392 4393 /** 4394 * intel_hpd_init - initializes and enables hpd support 4395 * @dev_priv: i915 device instance 4396 * 4397 * This function enables the hotplug support. It requires that interrupts have 4398 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4399 * poll request can run concurrently to other code, so locking rules must be 4400 * obeyed. 4401 * 4402 * This is a separate step from interrupt enabling to simplify the locking rules 4403 * in the driver load and resume code. 4404 */ 4405 void intel_hpd_init(struct drm_i915_private *dev_priv) 4406 { 4407 struct drm_device *dev = dev_priv->dev; 4408 struct drm_mode_config *mode_config = &dev->mode_config; 4409 struct drm_connector *connector; 4410 int i; 4411 4412 for (i = 1; i < HPD_NUM_PINS; i++) { 4413 dev_priv->hpd_stats[i].hpd_cnt = 0; 4414 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4415 } 4416 list_for_each_entry(connector, &mode_config->connector_list, head) { 4417 struct intel_connector *intel_connector = to_intel_connector(connector); 4418 connector->polled = intel_connector->polled; 4419 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4420 connector->polled = DRM_CONNECTOR_POLL_HPD; 4421 } 4422 4423 /* Interrupt setup is already guaranteed to be single-threaded, this is 4424 * just to make the assert_spin_locked checks happy. */ 4425 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4426 if (dev_priv->display.hpd_irq_setup) 4427 dev_priv->display.hpd_irq_setup(dev); 4428 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4429 } 4430 4431 /** 4432 * intel_irq_install - enables the hardware interrupt 4433 * @dev_priv: i915 device instance 4434 * 4435 * This function enables the hardware interrupt handling, but leaves the hotplug 4436 * handling still disabled. It is called after intel_irq_init(). 4437 * 4438 * In the driver load and resume code we need working interrupts in a few places 4439 * but don't want to deal with the hassle of concurrent probe and hotplug 4440 * workers. Hence the split into this two-stage approach. 4441 */ 4442 int intel_irq_install(struct drm_i915_private *dev_priv) 4443 { 4444 /* 4445 * We enable some interrupt sources in our postinstall hooks, so mark 4446 * interrupts as enabled _before_ actually enabling them to avoid 4447 * special cases in our ordering checks. 4448 */ 4449 dev_priv->pm.irqs_enabled = true; 4450 4451 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4452 } 4453 4454 /** 4455 * intel_irq_uninstall - finilizes all irq handling 4456 * @dev_priv: i915 device instance 4457 * 4458 * This stops interrupt and hotplug handling and unregisters and frees all 4459 * resources acquired in the init functions. 4460 */ 4461 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4462 { 4463 drm_irq_uninstall(dev_priv->dev); 4464 intel_hpd_cancel_work(dev_priv); 4465 dev_priv->pm.irqs_enabled = false; 4466 } 4467 4468 /** 4469 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4470 * @dev_priv: i915 device instance 4471 * 4472 * This function is used to disable interrupts at runtime, both in the runtime 4473 * pm and the system suspend/resume code. 4474 */ 4475 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4476 { 4477 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4478 dev_priv->pm.irqs_enabled = false; 4479 #if 0 4480 synchronize_irq(dev_priv->dev->irq); 4481 #endif 4482 } 4483 4484 /** 4485 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4486 * @dev_priv: i915 device instance 4487 * 4488 * This function is used to enable interrupts at runtime, both in the runtime 4489 * pm and the system suspend/resume code. 4490 */ 4491 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4492 { 4493 dev_priv->pm.irqs_enabled = true; 4494 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4495 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4496 } 4497