1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: interrupt handling 37 * 38 * These functions provide the basic support for enabling and disabling the 39 * interrupt handling support. There's a lot more functionality in i915_irq.c 40 * and related files, but that will be described in separate chapters. 41 */ 42 43 static const u32 hpd_ibx[HPD_NUM_PINS] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 49 }; 50 51 static const u32 hpd_cpt[HPD_NUM_PINS] = { 52 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 53 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 54 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 55 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 56 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 57 }; 58 59 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 60 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 61 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 62 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 63 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 64 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 65 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 66 }; 67 68 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 69 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 70 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 71 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 72 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 73 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 74 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 75 }; 76 77 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */ 78 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 79 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 80 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 81 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 82 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 83 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 84 }; 85 86 /* IIR can theoretically queue up two events. Be paranoid. */ 87 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 88 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 89 POSTING_READ(GEN8_##type##_IMR(which)); \ 90 I915_WRITE(GEN8_##type##_IER(which), 0); \ 91 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 92 POSTING_READ(GEN8_##type##_IIR(which)); \ 93 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 94 POSTING_READ(GEN8_##type##_IIR(which)); \ 95 } while (0) 96 97 #define GEN5_IRQ_RESET(type) do { \ 98 I915_WRITE(type##IMR, 0xffffffff); \ 99 POSTING_READ(type##IMR); \ 100 I915_WRITE(type##IER, 0); \ 101 I915_WRITE(type##IIR, 0xffffffff); \ 102 POSTING_READ(type##IIR); \ 103 I915_WRITE(type##IIR, 0xffffffff); \ 104 POSTING_READ(type##IIR); \ 105 } while (0) 106 107 /* 108 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 109 */ 110 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 111 u32 val = I915_READ(reg); \ 112 if (val) { \ 113 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 114 (reg), val); \ 115 I915_WRITE((reg), 0xffffffff); \ 116 POSTING_READ(reg); \ 117 I915_WRITE((reg), 0xffffffff); \ 118 POSTING_READ(reg); \ 119 } \ 120 } while (0) 121 122 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 123 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 124 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 125 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 126 POSTING_READ(GEN8_##type##_IMR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 130 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 131 I915_WRITE(type##IER, (ier_val)); \ 132 I915_WRITE(type##IMR, (imr_val)); \ 133 POSTING_READ(type##IMR); \ 134 } while (0) 135 136 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 137 138 /* For display hotplug interrupt */ 139 void 140 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 141 { 142 assert_spin_locked(&dev_priv->irq_lock); 143 144 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 145 return; 146 147 if ((dev_priv->irq_mask & mask) != 0) { 148 dev_priv->irq_mask &= ~mask; 149 I915_WRITE(DEIMR, dev_priv->irq_mask); 150 POSTING_READ(DEIMR); 151 } 152 } 153 154 void 155 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 156 { 157 assert_spin_locked(&dev_priv->irq_lock); 158 159 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 160 return; 161 162 if ((dev_priv->irq_mask & mask) != mask) { 163 dev_priv->irq_mask |= mask; 164 I915_WRITE(DEIMR, dev_priv->irq_mask); 165 POSTING_READ(DEIMR); 166 } 167 } 168 169 /** 170 * ilk_update_gt_irq - update GTIMR 171 * @dev_priv: driver private 172 * @interrupt_mask: mask of interrupt bits to update 173 * @enabled_irq_mask: mask of interrupt bits to enable 174 */ 175 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 176 uint32_t interrupt_mask, 177 uint32_t enabled_irq_mask) 178 { 179 assert_spin_locked(&dev_priv->irq_lock); 180 181 WARN_ON(enabled_irq_mask & ~interrupt_mask); 182 183 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 184 return; 185 186 dev_priv->gt_irq_mask &= ~interrupt_mask; 187 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 188 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 189 POSTING_READ(GTIMR); 190 } 191 192 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 193 { 194 ilk_update_gt_irq(dev_priv, mask, mask); 195 } 196 197 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 198 { 199 ilk_update_gt_irq(dev_priv, mask, 0); 200 } 201 202 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 203 { 204 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 205 } 206 207 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 208 { 209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 210 } 211 212 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 213 { 214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 215 } 216 217 /** 218 * snb_update_pm_irq - update GEN6_PMIMR 219 * @dev_priv: driver private 220 * @interrupt_mask: mask of interrupt bits to update 221 * @enabled_irq_mask: mask of interrupt bits to enable 222 */ 223 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 224 uint32_t interrupt_mask, 225 uint32_t enabled_irq_mask) 226 { 227 uint32_t new_val; 228 229 WARN_ON(enabled_irq_mask & ~interrupt_mask); 230 231 assert_spin_locked(&dev_priv->irq_lock); 232 233 new_val = dev_priv->pm_irq_mask; 234 new_val &= ~interrupt_mask; 235 new_val |= (~enabled_irq_mask & interrupt_mask); 236 237 if (new_val != dev_priv->pm_irq_mask) { 238 dev_priv->pm_irq_mask = new_val; 239 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 240 POSTING_READ(gen6_pm_imr(dev_priv)); 241 } 242 } 243 244 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 245 { 246 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 247 return; 248 249 snb_update_pm_irq(dev_priv, mask, mask); 250 } 251 252 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 253 uint32_t mask) 254 { 255 snb_update_pm_irq(dev_priv, mask, 0); 256 } 257 258 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 259 { 260 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 261 return; 262 263 __gen6_disable_pm_irq(dev_priv, mask); 264 } 265 266 void gen6_reset_rps_interrupts(struct drm_device *dev) 267 { 268 struct drm_i915_private *dev_priv = dev->dev_private; 269 uint32_t reg = gen6_pm_iir(dev_priv); 270 271 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 272 I915_WRITE(reg, dev_priv->pm_rps_events); 273 I915_WRITE(reg, dev_priv->pm_rps_events); 274 POSTING_READ(reg); 275 dev_priv->rps.pm_iir = 0; 276 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 277 } 278 279 void gen6_enable_rps_interrupts(struct drm_device *dev) 280 { 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 283 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 284 285 WARN_ON(dev_priv->rps.pm_iir); 286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 287 dev_priv->rps.interrupts_enabled = true; 288 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 289 dev_priv->pm_rps_events); 290 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 291 292 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 293 } 294 295 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 296 { 297 /* 298 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 299 * if GEN6_PM_UP_EI_EXPIRED is masked. 300 * 301 * TODO: verify if this can be reproduced on VLV,CHV. 302 */ 303 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 304 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 305 306 if (INTEL_INFO(dev_priv)->gen >= 8) 307 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 308 309 return mask; 310 } 311 312 void gen6_disable_rps_interrupts(struct drm_device *dev) 313 { 314 struct drm_i915_private *dev_priv = dev->dev_private; 315 316 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 317 dev_priv->rps.interrupts_enabled = false; 318 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 319 320 cancel_work_sync(&dev_priv->rps.work); 321 322 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 323 324 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 325 326 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 327 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 328 ~dev_priv->pm_rps_events); 329 330 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 331 332 /* Wait for pending IRQ handlers to complete (on other CPUs) */ 333 #if 0 334 synchronize_irq(dev->irq); 335 #endif 336 } 337 338 /** 339 * ibx_display_interrupt_update - update SDEIMR 340 * @dev_priv: driver private 341 * @interrupt_mask: mask of interrupt bits to update 342 * @enabled_irq_mask: mask of interrupt bits to enable 343 */ 344 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 345 uint32_t interrupt_mask, 346 uint32_t enabled_irq_mask) 347 { 348 uint32_t sdeimr = I915_READ(SDEIMR); 349 sdeimr &= ~interrupt_mask; 350 sdeimr |= (~enabled_irq_mask & interrupt_mask); 351 352 WARN_ON(enabled_irq_mask & ~interrupt_mask); 353 354 assert_spin_locked(&dev_priv->irq_lock); 355 356 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 357 return; 358 359 I915_WRITE(SDEIMR, sdeimr); 360 POSTING_READ(SDEIMR); 361 } 362 363 static void 364 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 365 u32 enable_mask, u32 status_mask) 366 { 367 u32 reg = PIPESTAT(pipe); 368 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 369 370 assert_spin_locked(&dev_priv->irq_lock); 371 WARN_ON(!intel_irqs_enabled(dev_priv)); 372 373 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 374 status_mask & ~PIPESTAT_INT_STATUS_MASK, 375 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 376 pipe_name(pipe), enable_mask, status_mask)) 377 return; 378 379 if ((pipestat & enable_mask) == enable_mask) 380 return; 381 382 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 383 384 /* Enable the interrupt, clear any pending status */ 385 pipestat |= enable_mask | status_mask; 386 I915_WRITE(reg, pipestat); 387 POSTING_READ(reg); 388 } 389 390 static void 391 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 392 u32 enable_mask, u32 status_mask) 393 { 394 u32 reg = PIPESTAT(pipe); 395 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 396 397 assert_spin_locked(&dev_priv->irq_lock); 398 WARN_ON(!intel_irqs_enabled(dev_priv)); 399 400 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 401 status_mask & ~PIPESTAT_INT_STATUS_MASK, 402 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 403 pipe_name(pipe), enable_mask, status_mask)) 404 return; 405 406 if ((pipestat & enable_mask) == 0) 407 return; 408 409 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 410 411 pipestat &= ~enable_mask; 412 I915_WRITE(reg, pipestat); 413 POSTING_READ(reg); 414 } 415 416 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 417 { 418 u32 enable_mask = status_mask << 16; 419 420 /* 421 * On pipe A we don't support the PSR interrupt yet, 422 * on pipe B and C the same bit MBZ. 423 */ 424 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 425 return 0; 426 /* 427 * On pipe B and C we don't support the PSR interrupt yet, on pipe 428 * A the same bit is for perf counters which we don't use either. 429 */ 430 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 431 return 0; 432 433 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 434 SPRITE0_FLIP_DONE_INT_EN_VLV | 435 SPRITE1_FLIP_DONE_INT_EN_VLV); 436 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 437 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 438 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 439 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 440 441 return enable_mask; 442 } 443 444 void 445 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 446 u32 status_mask) 447 { 448 u32 enable_mask; 449 450 if (IS_VALLEYVIEW(dev_priv->dev)) 451 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 452 status_mask); 453 else 454 enable_mask = status_mask << 16; 455 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 456 } 457 458 void 459 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 460 u32 status_mask) 461 { 462 u32 enable_mask; 463 464 if (IS_VALLEYVIEW(dev_priv->dev)) 465 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 466 status_mask); 467 else 468 enable_mask = status_mask << 16; 469 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 470 } 471 472 /** 473 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 474 */ 475 static void i915_enable_asle_pipestat(struct drm_device *dev) 476 { 477 struct drm_i915_private *dev_priv = dev->dev_private; 478 479 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 480 return; 481 482 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 483 484 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 485 if (INTEL_INFO(dev)->gen >= 4) 486 i915_enable_pipestat(dev_priv, PIPE_A, 487 PIPE_LEGACY_BLC_EVENT_STATUS); 488 489 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 490 } 491 492 /* 493 * This timing diagram depicts the video signal in and 494 * around the vertical blanking period. 495 * 496 * Assumptions about the fictitious mode used in this example: 497 * vblank_start >= 3 498 * vsync_start = vblank_start + 1 499 * vsync_end = vblank_start + 2 500 * vtotal = vblank_start + 3 501 * 502 * start of vblank: 503 * latch double buffered registers 504 * increment frame counter (ctg+) 505 * generate start of vblank interrupt (gen4+) 506 * | 507 * | frame start: 508 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 509 * | may be shifted forward 1-3 extra lines via PIPECONF 510 * | | 511 * | | start of vsync: 512 * | | generate vsync interrupt 513 * | | | 514 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 515 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 516 * ----va---> <-----------------vb--------------------> <--------va------------- 517 * | | <----vs-----> | 518 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 519 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 520 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 521 * | | | 522 * last visible pixel first visible pixel 523 * | increment frame counter (gen3/4) 524 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 525 * 526 * x = horizontal active 527 * _ = horizontal blanking 528 * hs = horizontal sync 529 * va = vertical active 530 * vb = vertical blanking 531 * vs = vertical sync 532 * vbs = vblank_start (number) 533 * 534 * Summary: 535 * - most events happen at the start of horizontal sync 536 * - frame start happens at the start of horizontal blank, 1-4 lines 537 * (depending on PIPECONF settings) after the start of vblank 538 * - gen3/4 pixel and frame counter are synchronized with the start 539 * of horizontal active on the first line of vertical active 540 */ 541 542 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 543 { 544 /* Gen2 doesn't have a hardware frame counter */ 545 return 0; 546 } 547 548 /* Called from drm generic code, passed a 'crtc', which 549 * we use as a pipe index 550 */ 551 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 552 { 553 struct drm_i915_private *dev_priv = dev->dev_private; 554 unsigned long high_frame; 555 unsigned long low_frame; 556 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 557 struct intel_crtc *intel_crtc = 558 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 559 const struct drm_display_mode *mode = 560 &intel_crtc->config->base.adjusted_mode; 561 562 htotal = mode->crtc_htotal; 563 hsync_start = mode->crtc_hsync_start; 564 vbl_start = mode->crtc_vblank_start; 565 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 566 vbl_start = DIV_ROUND_UP(vbl_start, 2); 567 568 /* Convert to pixel count */ 569 vbl_start *= htotal; 570 571 /* Start of vblank event occurs at start of hsync */ 572 vbl_start -= htotal - hsync_start; 573 574 high_frame = PIPEFRAME(pipe); 575 low_frame = PIPEFRAMEPIXEL(pipe); 576 577 /* 578 * High & low register fields aren't synchronized, so make sure 579 * we get a low value that's stable across two reads of the high 580 * register. 581 */ 582 do { 583 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 584 low = I915_READ(low_frame); 585 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 586 } while (high1 != high2); 587 588 high1 >>= PIPE_FRAME_HIGH_SHIFT; 589 pixel = low & PIPE_PIXEL_MASK; 590 low >>= PIPE_FRAME_LOW_SHIFT; 591 592 /* 593 * The frame counter increments at beginning of active. 594 * Cook up a vblank counter by also checking the pixel 595 * counter against vblank start. 596 */ 597 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 598 } 599 600 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 601 { 602 struct drm_i915_private *dev_priv = dev->dev_private; 603 int reg = PIPE_FRMCOUNT_GM45(pipe); 604 605 return I915_READ(reg); 606 } 607 608 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 609 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 610 611 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 612 { 613 struct drm_device *dev = crtc->base.dev; 614 struct drm_i915_private *dev_priv = dev->dev_private; 615 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 616 enum i915_pipe pipe = crtc->pipe; 617 int position, vtotal; 618 619 vtotal = mode->crtc_vtotal; 620 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 621 vtotal /= 2; 622 623 if (IS_GEN2(dev)) 624 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 625 else 626 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 627 628 /* 629 * See update_scanline_offset() for the details on the 630 * scanline_offset adjustment. 631 */ 632 return (position + crtc->scanline_offset) % vtotal; 633 } 634 635 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 636 unsigned int flags, int *vpos, int *hpos, 637 ktime_t *stime, ktime_t *etime) 638 { 639 struct drm_i915_private *dev_priv = dev->dev_private; 640 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 641 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 642 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 643 int position; 644 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 645 bool in_vbl = true; 646 int ret = 0; 647 648 if (!intel_crtc->active) { 649 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 650 "pipe %c\n", pipe_name(pipe)); 651 return 0; 652 } 653 654 htotal = mode->crtc_htotal; 655 hsync_start = mode->crtc_hsync_start; 656 vtotal = mode->crtc_vtotal; 657 vbl_start = mode->crtc_vblank_start; 658 vbl_end = mode->crtc_vblank_end; 659 660 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 661 vbl_start = DIV_ROUND_UP(vbl_start, 2); 662 vbl_end /= 2; 663 vtotal /= 2; 664 } 665 666 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 667 668 /* 669 * Lock uncore.lock, as we will do multiple timing critical raw 670 * register reads, potentially with preemption disabled, so the 671 * following code must not block on uncore.lock. 672 */ 673 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 674 675 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 676 677 /* Get optional system timestamp before query. */ 678 if (stime) 679 *stime = ktime_get(); 680 681 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 682 /* No obvious pixelcount register. Only query vertical 683 * scanout position from Display scan line register. 684 */ 685 position = __intel_get_crtc_scanline(intel_crtc); 686 } else { 687 /* Have access to pixelcount since start of frame. 688 * We can split this into vertical and horizontal 689 * scanout position. 690 */ 691 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 692 693 /* convert to pixel counts */ 694 vbl_start *= htotal; 695 vbl_end *= htotal; 696 vtotal *= htotal; 697 698 /* 699 * In interlaced modes, the pixel counter counts all pixels, 700 * so one field will have htotal more pixels. In order to avoid 701 * the reported position from jumping backwards when the pixel 702 * counter is beyond the length of the shorter field, just 703 * clamp the position the length of the shorter field. This 704 * matches how the scanline counter based position works since 705 * the scanline counter doesn't count the two half lines. 706 */ 707 if (position >= vtotal) 708 position = vtotal - 1; 709 710 /* 711 * Start of vblank interrupt is triggered at start of hsync, 712 * just prior to the first active line of vblank. However we 713 * consider lines to start at the leading edge of horizontal 714 * active. So, should we get here before we've crossed into 715 * the horizontal active of the first line in vblank, we would 716 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 717 * always add htotal-hsync_start to the current pixel position. 718 */ 719 position = (position + htotal - hsync_start) % vtotal; 720 } 721 722 /* Get optional system timestamp after query. */ 723 if (etime) 724 *etime = ktime_get(); 725 726 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 727 728 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 729 730 in_vbl = position >= vbl_start && position < vbl_end; 731 732 /* 733 * While in vblank, position will be negative 734 * counting up towards 0 at vbl_end. And outside 735 * vblank, position will be positive counting 736 * up since vbl_end. 737 */ 738 if (position >= vbl_start) 739 position -= vbl_end; 740 else 741 position += vtotal - vbl_end; 742 743 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 744 *vpos = position; 745 *hpos = 0; 746 } else { 747 *vpos = position / htotal; 748 *hpos = position - (*vpos * htotal); 749 } 750 751 /* In vblank? */ 752 if (in_vbl) 753 ret |= DRM_SCANOUTPOS_IN_VBLANK; 754 755 return ret; 756 } 757 758 int intel_get_crtc_scanline(struct intel_crtc *crtc) 759 { 760 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 761 int position; 762 763 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 764 position = __intel_get_crtc_scanline(crtc); 765 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 766 767 return position; 768 } 769 770 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 771 int *max_error, 772 struct timeval *vblank_time, 773 unsigned flags) 774 { 775 struct drm_crtc *crtc; 776 777 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 778 DRM_ERROR("Invalid crtc %d\n", pipe); 779 return -EINVAL; 780 } 781 782 /* Get drm_crtc to timestamp: */ 783 crtc = intel_get_crtc_for_pipe(dev, pipe); 784 if (crtc == NULL) { 785 DRM_ERROR("Invalid crtc %d\n", pipe); 786 return -EINVAL; 787 } 788 789 if (!crtc->state->enable) { 790 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 791 return -EBUSY; 792 } 793 794 /* Helper routine in DRM core does all the work: */ 795 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 796 vblank_time, flags, 797 crtc, 798 &to_intel_crtc(crtc)->config->base.adjusted_mode); 799 } 800 801 static bool intel_hpd_irq_event(struct drm_device *dev, 802 struct drm_connector *connector) 803 { 804 enum drm_connector_status old_status; 805 806 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 807 old_status = connector->status; 808 809 connector->status = connector->funcs->detect(connector, false); 810 if (old_status == connector->status) 811 return false; 812 813 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 814 connector->base.id, 815 connector->name, 816 drm_get_connector_status_name(old_status), 817 drm_get_connector_status_name(connector->status)); 818 819 return true; 820 } 821 822 static void i915_digport_work_func(struct work_struct *work) 823 { 824 struct drm_i915_private *dev_priv = 825 container_of(work, struct drm_i915_private, dig_port_work); 826 u32 long_port_mask, short_port_mask; 827 struct intel_digital_port *intel_dig_port; 828 int i, ret; 829 u32 old_bits = 0; 830 831 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 832 long_port_mask = dev_priv->long_hpd_port_mask; 833 dev_priv->long_hpd_port_mask = 0; 834 short_port_mask = dev_priv->short_hpd_port_mask; 835 dev_priv->short_hpd_port_mask = 0; 836 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 837 838 for (i = 0; i < I915_MAX_PORTS; i++) { 839 bool valid = false; 840 bool long_hpd = false; 841 intel_dig_port = dev_priv->hpd_irq_port[i]; 842 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 843 continue; 844 845 if (long_port_mask & (1 << i)) { 846 valid = true; 847 long_hpd = true; 848 } else if (short_port_mask & (1 << i)) 849 valid = true; 850 851 if (valid) { 852 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 853 if (ret == true) { 854 /* if we get true fallback to old school hpd */ 855 old_bits |= (1 << intel_dig_port->base.hpd_pin); 856 } 857 } 858 } 859 860 if (old_bits) { 861 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 862 dev_priv->hpd_event_bits |= old_bits; 863 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 864 schedule_work(&dev_priv->hotplug_work); 865 } 866 } 867 868 /* 869 * Handle hotplug events outside the interrupt handler proper. 870 */ 871 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 872 873 static void i915_hotplug_work_func(struct work_struct *work) 874 { 875 struct drm_i915_private *dev_priv = 876 container_of(work, struct drm_i915_private, hotplug_work); 877 struct drm_device *dev = dev_priv->dev; 878 struct drm_mode_config *mode_config = &dev->mode_config; 879 struct intel_connector *intel_connector; 880 struct intel_encoder *intel_encoder; 881 struct drm_connector *connector; 882 bool hpd_disabled = false; 883 bool changed = false; 884 u32 hpd_event_bits; 885 886 mutex_lock(&mode_config->mutex); 887 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 888 889 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 890 891 hpd_event_bits = dev_priv->hpd_event_bits; 892 dev_priv->hpd_event_bits = 0; 893 list_for_each_entry(connector, &mode_config->connector_list, head) { 894 intel_connector = to_intel_connector(connector); 895 if (!intel_connector->encoder) 896 continue; 897 intel_encoder = intel_connector->encoder; 898 if (intel_encoder->hpd_pin > HPD_NONE && 899 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 900 connector->polled == DRM_CONNECTOR_POLL_HPD) { 901 DRM_INFO("HPD interrupt storm detected on connector %s: " 902 "switching from hotplug detection to polling\n", 903 connector->name); 904 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 905 connector->polled = DRM_CONNECTOR_POLL_CONNECT 906 | DRM_CONNECTOR_POLL_DISCONNECT; 907 hpd_disabled = true; 908 } 909 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 910 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 911 connector->name, intel_encoder->hpd_pin); 912 } 913 } 914 /* if there were no outputs to poll, poll was disabled, 915 * therefore make sure it's enabled when disabling HPD on 916 * some connectors */ 917 if (hpd_disabled) { 918 drm_kms_helper_poll_enable(dev); 919 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 920 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 921 } 922 923 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 924 925 list_for_each_entry(connector, &mode_config->connector_list, head) { 926 intel_connector = to_intel_connector(connector); 927 if (!intel_connector->encoder) 928 continue; 929 intel_encoder = intel_connector->encoder; 930 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 931 if (intel_encoder->hot_plug) 932 intel_encoder->hot_plug(intel_encoder); 933 if (intel_hpd_irq_event(dev, connector)) 934 changed = true; 935 } 936 } 937 mutex_unlock(&mode_config->mutex); 938 939 if (changed) 940 drm_kms_helper_hotplug_event(dev); 941 } 942 943 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 944 { 945 struct drm_i915_private *dev_priv = dev->dev_private; 946 u32 busy_up, busy_down, max_avg, min_avg; 947 u8 new_delay; 948 949 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 950 951 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 952 953 new_delay = dev_priv->ips.cur_delay; 954 955 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 956 busy_up = I915_READ(RCPREVBSYTUPAVG); 957 busy_down = I915_READ(RCPREVBSYTDNAVG); 958 max_avg = I915_READ(RCBMAXAVG); 959 min_avg = I915_READ(RCBMINAVG); 960 961 /* Handle RCS change request from hw */ 962 if (busy_up > max_avg) { 963 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 964 new_delay = dev_priv->ips.cur_delay - 1; 965 if (new_delay < dev_priv->ips.max_delay) 966 new_delay = dev_priv->ips.max_delay; 967 } else if (busy_down < min_avg) { 968 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 969 new_delay = dev_priv->ips.cur_delay + 1; 970 if (new_delay > dev_priv->ips.min_delay) 971 new_delay = dev_priv->ips.min_delay; 972 } 973 974 if (ironlake_set_drps(dev, new_delay)) 975 dev_priv->ips.cur_delay = new_delay; 976 977 lockmgr(&mchdev_lock, LK_RELEASE); 978 979 return; 980 } 981 982 static void notify_ring(struct drm_device *dev, 983 struct intel_engine_cs *ring) 984 { 985 if (!intel_ring_initialized(ring)) 986 return; 987 988 trace_i915_gem_request_notify(ring); 989 990 wake_up_all(&ring->irq_queue); 991 } 992 993 static void vlv_c0_read(struct drm_i915_private *dev_priv, 994 struct intel_rps_ei *ei) 995 { 996 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 997 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 998 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 999 } 1000 1001 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1002 const struct intel_rps_ei *old, 1003 const struct intel_rps_ei *now, 1004 int threshold) 1005 { 1006 u64 time, c0; 1007 1008 if (old->cz_clock == 0) 1009 return false; 1010 1011 time = now->cz_clock - old->cz_clock; 1012 time *= threshold * dev_priv->mem_freq; 1013 1014 /* Workload can be split between render + media, e.g. SwapBuffers 1015 * being blitted in X after being rendered in mesa. To account for 1016 * this we need to combine both engines into our activity counter. 1017 */ 1018 c0 = now->render_c0 - old->render_c0; 1019 c0 += now->media_c0 - old->media_c0; 1020 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 1021 1022 return c0 >= time; 1023 } 1024 1025 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1026 { 1027 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1028 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1029 } 1030 1031 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1032 { 1033 struct intel_rps_ei now; 1034 u32 events = 0; 1035 1036 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1037 return 0; 1038 1039 vlv_c0_read(dev_priv, &now); 1040 if (now.cz_clock == 0) 1041 return 0; 1042 1043 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1044 if (!vlv_c0_above(dev_priv, 1045 &dev_priv->rps.down_ei, &now, 1046 VLV_RP_DOWN_EI_THRESHOLD)) 1047 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1048 dev_priv->rps.down_ei = now; 1049 } 1050 1051 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1052 if (vlv_c0_above(dev_priv, 1053 &dev_priv->rps.up_ei, &now, 1054 VLV_RP_UP_EI_THRESHOLD)) 1055 events |= GEN6_PM_RP_UP_THRESHOLD; 1056 dev_priv->rps.up_ei = now; 1057 } 1058 1059 return events; 1060 } 1061 1062 static void gen6_pm_rps_work(struct work_struct *work) 1063 { 1064 struct drm_i915_private *dev_priv = 1065 container_of(work, struct drm_i915_private, rps.work); 1066 u32 pm_iir; 1067 int new_delay, adj; 1068 1069 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1070 /* Speed up work cancelation during disabling rps interrupts. */ 1071 if (!dev_priv->rps.interrupts_enabled) { 1072 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1073 return; 1074 } 1075 pm_iir = dev_priv->rps.pm_iir; 1076 dev_priv->rps.pm_iir = 0; 1077 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1078 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1079 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1080 1081 /* Make sure we didn't queue anything we're not going to process. */ 1082 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1083 1084 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1085 return; 1086 1087 mutex_lock(&dev_priv->rps.hw_lock); 1088 1089 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1090 1091 adj = dev_priv->rps.last_adj; 1092 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1093 if (adj > 0) 1094 adj *= 2; 1095 else { 1096 /* CHV needs even encode values */ 1097 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1098 } 1099 new_delay = dev_priv->rps.cur_freq + adj; 1100 1101 /* 1102 * For better performance, jump directly 1103 * to RPe if we're below it. 1104 */ 1105 if (new_delay < dev_priv->rps.efficient_freq) 1106 new_delay = dev_priv->rps.efficient_freq; 1107 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1108 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1109 new_delay = dev_priv->rps.efficient_freq; 1110 else 1111 new_delay = dev_priv->rps.min_freq_softlimit; 1112 adj = 0; 1113 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1114 if (adj < 0) 1115 adj *= 2; 1116 else { 1117 /* CHV needs even encode values */ 1118 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1119 } 1120 new_delay = dev_priv->rps.cur_freq + adj; 1121 } else { /* unknown event */ 1122 new_delay = dev_priv->rps.cur_freq; 1123 } 1124 1125 /* sysfs frequency interfaces may have snuck in while servicing the 1126 * interrupt 1127 */ 1128 new_delay = clamp_t(int, new_delay, 1129 dev_priv->rps.min_freq_softlimit, 1130 dev_priv->rps.max_freq_softlimit); 1131 1132 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1133 1134 intel_set_rps(dev_priv->dev, new_delay); 1135 1136 mutex_unlock(&dev_priv->rps.hw_lock); 1137 } 1138 1139 /** 1140 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1141 * occurred. 1142 * @work: workqueue struct 1143 * 1144 * Doesn't actually do anything except notify userspace. As a consequence of 1145 * this event, userspace should try to remap the bad rows since statistically 1146 * it is likely the same row is more likely to go bad again. 1147 */ 1148 static void ivybridge_parity_work(struct work_struct *work) 1149 { 1150 struct drm_i915_private *dev_priv = 1151 container_of(work, struct drm_i915_private, l3_parity.error_work); 1152 u32 error_status, row, bank, subbank; 1153 char *parity_event[6]; 1154 uint32_t misccpctl; 1155 uint8_t slice = 0; 1156 1157 /* We must turn off DOP level clock gating to access the L3 registers. 1158 * In order to prevent a get/put style interface, acquire struct mutex 1159 * any time we access those registers. 1160 */ 1161 mutex_lock(&dev_priv->dev->struct_mutex); 1162 1163 /* If we've screwed up tracking, just let the interrupt fire again */ 1164 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1165 goto out; 1166 1167 misccpctl = I915_READ(GEN7_MISCCPCTL); 1168 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1169 POSTING_READ(GEN7_MISCCPCTL); 1170 1171 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1172 u32 reg; 1173 1174 slice--; 1175 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1176 break; 1177 1178 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1179 1180 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1181 1182 error_status = I915_READ(reg); 1183 row = GEN7_PARITY_ERROR_ROW(error_status); 1184 bank = GEN7_PARITY_ERROR_BANK(error_status); 1185 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1186 1187 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1188 POSTING_READ(reg); 1189 1190 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1191 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1192 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1193 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1194 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1195 parity_event[5] = NULL; 1196 1197 #if 0 1198 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1199 KOBJ_CHANGE, parity_event); 1200 #endif 1201 1202 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1203 slice, row, bank, subbank); 1204 1205 kfree(parity_event[4]); 1206 kfree(parity_event[3]); 1207 kfree(parity_event[2]); 1208 kfree(parity_event[1]); 1209 } 1210 1211 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1212 1213 out: 1214 WARN_ON(dev_priv->l3_parity.which_slice); 1215 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1216 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1217 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1218 1219 mutex_unlock(&dev_priv->dev->struct_mutex); 1220 } 1221 1222 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1223 { 1224 struct drm_i915_private *dev_priv = dev->dev_private; 1225 1226 if (!HAS_L3_DPF(dev)) 1227 return; 1228 1229 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1230 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1231 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1232 1233 iir &= GT_PARITY_ERROR(dev); 1234 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1235 dev_priv->l3_parity.which_slice |= 1 << 1; 1236 1237 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1238 dev_priv->l3_parity.which_slice |= 1 << 0; 1239 1240 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1241 } 1242 1243 static void ilk_gt_irq_handler(struct drm_device *dev, 1244 struct drm_i915_private *dev_priv, 1245 u32 gt_iir) 1246 { 1247 if (gt_iir & 1248 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1249 notify_ring(dev, &dev_priv->ring[RCS]); 1250 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1251 notify_ring(dev, &dev_priv->ring[VCS]); 1252 } 1253 1254 static void snb_gt_irq_handler(struct drm_device *dev, 1255 struct drm_i915_private *dev_priv, 1256 u32 gt_iir) 1257 { 1258 1259 if (gt_iir & 1260 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1261 notify_ring(dev, &dev_priv->ring[RCS]); 1262 if (gt_iir & GT_BSD_USER_INTERRUPT) 1263 notify_ring(dev, &dev_priv->ring[VCS]); 1264 if (gt_iir & GT_BLT_USER_INTERRUPT) 1265 notify_ring(dev, &dev_priv->ring[BCS]); 1266 1267 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1268 GT_BSD_CS_ERROR_INTERRUPT | 1269 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1270 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1271 1272 if (gt_iir & GT_PARITY_ERROR(dev)) 1273 ivybridge_parity_error_irq_handler(dev, gt_iir); 1274 } 1275 1276 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1277 struct drm_i915_private *dev_priv, 1278 u32 master_ctl) 1279 { 1280 struct intel_engine_cs *ring; 1281 u32 rcs, bcs, vcs; 1282 uint32_t tmp = 0; 1283 1284 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1285 tmp = I915_READ(GEN8_GT_IIR(0)); 1286 if (tmp) { 1287 I915_WRITE(GEN8_GT_IIR(0), tmp); 1288 1289 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1290 ring = &dev_priv->ring[RCS]; 1291 if (rcs & GT_RENDER_USER_INTERRUPT) 1292 notify_ring(dev, ring); 1293 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) 1294 intel_lrc_irq_handler(ring); 1295 1296 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1297 ring = &dev_priv->ring[BCS]; 1298 if (bcs & GT_RENDER_USER_INTERRUPT) 1299 notify_ring(dev, ring); 1300 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) 1301 intel_lrc_irq_handler(ring); 1302 } else 1303 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1304 } 1305 1306 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1307 tmp = I915_READ(GEN8_GT_IIR(1)); 1308 if (tmp) { 1309 I915_WRITE(GEN8_GT_IIR(1), tmp); 1310 1311 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1312 ring = &dev_priv->ring[VCS]; 1313 if (vcs & GT_RENDER_USER_INTERRUPT) 1314 notify_ring(dev, ring); 1315 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1316 intel_lrc_irq_handler(ring); 1317 1318 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1319 ring = &dev_priv->ring[VCS2]; 1320 if (vcs & GT_RENDER_USER_INTERRUPT) 1321 notify_ring(dev, ring); 1322 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1323 intel_lrc_irq_handler(ring); 1324 } else 1325 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1326 } 1327 1328 if (master_ctl & GEN8_GT_PM_IRQ) { 1329 tmp = I915_READ(GEN8_GT_IIR(2)); 1330 if (tmp & dev_priv->pm_rps_events) { 1331 I915_WRITE(GEN8_GT_IIR(2), 1332 tmp & dev_priv->pm_rps_events); 1333 gen6_rps_irq_handler(dev_priv, tmp); 1334 } else 1335 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1336 } 1337 1338 if (master_ctl & GEN8_GT_VECS_IRQ) { 1339 tmp = I915_READ(GEN8_GT_IIR(3)); 1340 if (tmp) { 1341 I915_WRITE(GEN8_GT_IIR(3), tmp); 1342 1343 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1344 ring = &dev_priv->ring[VECS]; 1345 if (vcs & GT_RENDER_USER_INTERRUPT) 1346 notify_ring(dev, ring); 1347 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1348 intel_lrc_irq_handler(ring); 1349 } else 1350 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1351 } 1352 1353 } 1354 1355 #define HPD_STORM_DETECT_PERIOD 1000 1356 #define HPD_STORM_THRESHOLD 5 1357 1358 static int pch_port_to_hotplug_shift(enum port port) 1359 { 1360 switch (port) { 1361 case PORT_A: 1362 case PORT_E: 1363 default: 1364 return -1; 1365 case PORT_B: 1366 return 0; 1367 case PORT_C: 1368 return 8; 1369 case PORT_D: 1370 return 16; 1371 } 1372 } 1373 1374 static int i915_port_to_hotplug_shift(enum port port) 1375 { 1376 switch (port) { 1377 case PORT_A: 1378 case PORT_E: 1379 default: 1380 return -1; 1381 case PORT_B: 1382 return 17; 1383 case PORT_C: 1384 return 19; 1385 case PORT_D: 1386 return 21; 1387 } 1388 } 1389 1390 static inline enum port get_port_from_pin(enum hpd_pin pin) 1391 { 1392 switch (pin) { 1393 case HPD_PORT_B: 1394 return PORT_B; 1395 case HPD_PORT_C: 1396 return PORT_C; 1397 case HPD_PORT_D: 1398 return PORT_D; 1399 default: 1400 return PORT_A; /* no hpd */ 1401 } 1402 } 1403 1404 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1405 u32 hotplug_trigger, 1406 u32 dig_hotplug_reg, 1407 const u32 hpd[HPD_NUM_PINS]) 1408 { 1409 struct drm_i915_private *dev_priv = dev->dev_private; 1410 int i; 1411 enum port port; 1412 bool storm_detected = false; 1413 bool queue_dig = false, queue_hp = false; 1414 u32 dig_shift; 1415 u32 dig_port_mask = 0; 1416 1417 if (!hotplug_trigger) 1418 return; 1419 1420 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1421 hotplug_trigger, dig_hotplug_reg); 1422 1423 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1424 for (i = 1; i < HPD_NUM_PINS; i++) { 1425 if (!(hpd[i] & hotplug_trigger)) 1426 continue; 1427 1428 port = get_port_from_pin(i); 1429 if (port && dev_priv->hpd_irq_port[port]) { 1430 bool long_hpd; 1431 1432 if (HAS_PCH_SPLIT(dev)) { 1433 dig_shift = pch_port_to_hotplug_shift(port); 1434 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1435 } else { 1436 dig_shift = i915_port_to_hotplug_shift(port); 1437 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1438 } 1439 1440 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1441 port_name(port), 1442 long_hpd ? "long" : "short"); 1443 /* for long HPD pulses we want to have the digital queue happen, 1444 but we still want HPD storm detection to function. */ 1445 if (long_hpd) { 1446 dev_priv->long_hpd_port_mask |= (1 << port); 1447 dig_port_mask |= hpd[i]; 1448 } else { 1449 /* for short HPD just trigger the digital queue */ 1450 dev_priv->short_hpd_port_mask |= (1 << port); 1451 hotplug_trigger &= ~hpd[i]; 1452 } 1453 queue_dig = true; 1454 } 1455 } 1456 1457 for (i = 1; i < HPD_NUM_PINS; i++) { 1458 if (hpd[i] & hotplug_trigger && 1459 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1460 /* 1461 * On GMCH platforms the interrupt mask bits only 1462 * prevent irq generation, not the setting of the 1463 * hotplug bits itself. So only WARN about unexpected 1464 * interrupts on saner platforms. 1465 */ 1466 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1467 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1468 hotplug_trigger, i, hpd[i]); 1469 1470 continue; 1471 } 1472 1473 if (!(hpd[i] & hotplug_trigger) || 1474 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1475 continue; 1476 1477 if (!(dig_port_mask & hpd[i])) { 1478 dev_priv->hpd_event_bits |= (1 << i); 1479 queue_hp = true; 1480 } 1481 1482 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1483 dev_priv->hpd_stats[i].hpd_last_jiffies 1484 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1485 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1486 dev_priv->hpd_stats[i].hpd_cnt = 0; 1487 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1488 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1489 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1490 dev_priv->hpd_event_bits &= ~(1 << i); 1491 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1492 storm_detected = true; 1493 } else { 1494 dev_priv->hpd_stats[i].hpd_cnt++; 1495 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1496 dev_priv->hpd_stats[i].hpd_cnt); 1497 } 1498 } 1499 1500 if (storm_detected) 1501 dev_priv->display.hpd_irq_setup(dev); 1502 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1503 1504 /* 1505 * Our hotplug handler can grab modeset locks (by calling down into the 1506 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1507 * queue for otherwise the flush_work in the pageflip code will 1508 * deadlock. 1509 */ 1510 if (queue_dig) 1511 schedule_work(&dev_priv->dig_port_work); 1512 if (queue_hp) 1513 schedule_work(&dev_priv->hotplug_work); 1514 } 1515 1516 static void gmbus_irq_handler(struct drm_device *dev) 1517 { 1518 struct drm_i915_private *dev_priv = dev->dev_private; 1519 1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1521 } 1522 1523 static void dp_aux_irq_handler(struct drm_device *dev) 1524 { 1525 struct drm_i915_private *dev_priv = dev->dev_private; 1526 1527 wake_up_all(&dev_priv->gmbus_wait_queue); 1528 } 1529 1530 #if defined(CONFIG_DEBUG_FS) 1531 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1532 uint32_t crc0, uint32_t crc1, 1533 uint32_t crc2, uint32_t crc3, 1534 uint32_t crc4) 1535 { 1536 struct drm_i915_private *dev_priv = dev->dev_private; 1537 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1538 struct intel_pipe_crc_entry *entry; 1539 int head, tail; 1540 1541 spin_lock(&pipe_crc->lock); 1542 1543 if (!pipe_crc->entries) { 1544 spin_unlock(&pipe_crc->lock); 1545 DRM_DEBUG_KMS("spurious interrupt\n"); 1546 return; 1547 } 1548 1549 head = pipe_crc->head; 1550 tail = pipe_crc->tail; 1551 1552 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1553 spin_unlock(&pipe_crc->lock); 1554 DRM_ERROR("CRC buffer overflowing\n"); 1555 return; 1556 } 1557 1558 entry = &pipe_crc->entries[head]; 1559 1560 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1561 entry->crc[0] = crc0; 1562 entry->crc[1] = crc1; 1563 entry->crc[2] = crc2; 1564 entry->crc[3] = crc3; 1565 entry->crc[4] = crc4; 1566 1567 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1568 pipe_crc->head = head; 1569 1570 spin_unlock(&pipe_crc->lock); 1571 1572 wake_up_interruptible(&pipe_crc->wq); 1573 } 1574 #else 1575 static inline void 1576 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1577 uint32_t crc0, uint32_t crc1, 1578 uint32_t crc2, uint32_t crc3, 1579 uint32_t crc4) {} 1580 #endif 1581 1582 1583 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1584 { 1585 struct drm_i915_private *dev_priv = dev->dev_private; 1586 1587 display_pipe_crc_irq_handler(dev, pipe, 1588 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1589 0, 0, 0, 0); 1590 } 1591 1592 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1593 { 1594 struct drm_i915_private *dev_priv = dev->dev_private; 1595 1596 display_pipe_crc_irq_handler(dev, pipe, 1597 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1598 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1599 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1600 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1601 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1602 } 1603 1604 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1605 { 1606 struct drm_i915_private *dev_priv = dev->dev_private; 1607 uint32_t res1, res2; 1608 1609 if (INTEL_INFO(dev)->gen >= 3) 1610 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1611 else 1612 res1 = 0; 1613 1614 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1615 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1616 else 1617 res2 = 0; 1618 1619 display_pipe_crc_irq_handler(dev, pipe, 1620 I915_READ(PIPE_CRC_RES_RED(pipe)), 1621 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1622 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1623 res1, res2); 1624 } 1625 1626 /* The RPS events need forcewake, so we add them to a work queue and mask their 1627 * IMR bits until the work is done. Other interrupts can be processed without 1628 * the work queue. */ 1629 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1630 { 1631 if (pm_iir & dev_priv->pm_rps_events) { 1632 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1633 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1634 if (dev_priv->rps.interrupts_enabled) { 1635 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1636 queue_work(dev_priv->wq, &dev_priv->rps.work); 1637 } 1638 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1639 } 1640 1641 if (INTEL_INFO(dev_priv)->gen >= 8) 1642 return; 1643 1644 if (HAS_VEBOX(dev_priv->dev)) { 1645 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1646 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1647 1648 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1649 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1650 } 1651 } 1652 1653 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1654 { 1655 if (!drm_handle_vblank(dev, pipe)) 1656 return false; 1657 1658 return true; 1659 } 1660 1661 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1662 { 1663 struct drm_i915_private *dev_priv = dev->dev_private; 1664 u32 pipe_stats[I915_MAX_PIPES] = { }; 1665 int pipe; 1666 1667 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1668 for_each_pipe(dev_priv, pipe) { 1669 int reg; 1670 u32 mask, iir_bit = 0; 1671 1672 /* 1673 * PIPESTAT bits get signalled even when the interrupt is 1674 * disabled with the mask bits, and some of the status bits do 1675 * not generate interrupts at all (like the underrun bit). Hence 1676 * we need to be careful that we only handle what we want to 1677 * handle. 1678 */ 1679 1680 /* fifo underruns are filterered in the underrun handler. */ 1681 mask = PIPE_FIFO_UNDERRUN_STATUS; 1682 1683 switch (pipe) { 1684 case PIPE_A: 1685 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1686 break; 1687 case PIPE_B: 1688 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1689 break; 1690 case PIPE_C: 1691 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1692 break; 1693 } 1694 if (iir & iir_bit) 1695 mask |= dev_priv->pipestat_irq_mask[pipe]; 1696 1697 if (!mask) 1698 continue; 1699 1700 reg = PIPESTAT(pipe); 1701 mask |= PIPESTAT_INT_ENABLE_MASK; 1702 pipe_stats[pipe] = I915_READ(reg) & mask; 1703 1704 /* 1705 * Clear the PIPE*STAT regs before the IIR 1706 */ 1707 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1708 PIPESTAT_INT_STATUS_MASK)) 1709 I915_WRITE(reg, pipe_stats[pipe]); 1710 } 1711 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1712 1713 for_each_pipe(dev_priv, pipe) { 1714 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1715 intel_pipe_handle_vblank(dev, pipe)) 1716 intel_check_page_flip(dev, pipe); 1717 1718 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1719 intel_prepare_page_flip(dev, pipe); 1720 intel_finish_page_flip(dev, pipe); 1721 } 1722 1723 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1724 i9xx_pipe_crc_irq_handler(dev, pipe); 1725 1726 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1727 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1728 } 1729 1730 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1731 gmbus_irq_handler(dev); 1732 } 1733 1734 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1735 { 1736 struct drm_i915_private *dev_priv = dev->dev_private; 1737 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1738 1739 if (hotplug_status) { 1740 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1741 /* 1742 * Make sure hotplug status is cleared before we clear IIR, or else we 1743 * may miss hotplug events. 1744 */ 1745 POSTING_READ(PORT_HOTPLUG_STAT); 1746 1747 if (IS_G4X(dev)) { 1748 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1749 1750 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1751 } else { 1752 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1753 1754 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1755 } 1756 1757 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1758 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1759 dp_aux_irq_handler(dev); 1760 } 1761 } 1762 1763 static irqreturn_t valleyview_irq_handler(void *arg) 1764 { 1765 struct drm_device *dev = arg; 1766 struct drm_i915_private *dev_priv = dev->dev_private; 1767 u32 iir, gt_iir, pm_iir; 1768 1769 if (!intel_irqs_enabled(dev_priv)) 1770 return IRQ_NONE; 1771 1772 while (true) { 1773 /* Find, clear, then process each source of interrupt */ 1774 1775 gt_iir = I915_READ(GTIIR); 1776 if (gt_iir) 1777 I915_WRITE(GTIIR, gt_iir); 1778 1779 pm_iir = I915_READ(GEN6_PMIIR); 1780 if (pm_iir) 1781 I915_WRITE(GEN6_PMIIR, pm_iir); 1782 1783 iir = I915_READ(VLV_IIR); 1784 if (iir) { 1785 /* Consume port before clearing IIR or we'll miss events */ 1786 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1787 i9xx_hpd_irq_handler(dev); 1788 I915_WRITE(VLV_IIR, iir); 1789 } 1790 1791 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1792 goto out; 1793 1794 if (gt_iir) 1795 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1796 if (pm_iir) 1797 gen6_rps_irq_handler(dev_priv, pm_iir); 1798 /* Call regardless, as some status bits might not be 1799 * signalled in iir */ 1800 valleyview_pipestat_irq_handler(dev, iir); 1801 } 1802 1803 out: 1804 return; 1805 } 1806 1807 static irqreturn_t cherryview_irq_handler(void *arg) 1808 { 1809 struct drm_device *dev = arg; 1810 struct drm_i915_private *dev_priv = dev->dev_private; 1811 u32 master_ctl, iir; 1812 1813 if (!intel_irqs_enabled(dev_priv)) 1814 return IRQ_NONE; 1815 1816 for (;;) { 1817 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1818 iir = I915_READ(VLV_IIR); 1819 1820 if (master_ctl == 0 && iir == 0) 1821 break; 1822 1823 1824 I915_WRITE(GEN8_MASTER_IRQ, 0); 1825 1826 /* Find, clear, then process each source of interrupt */ 1827 1828 if (iir) { 1829 /* Consume port before clearing IIR or we'll miss events */ 1830 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1831 i9xx_hpd_irq_handler(dev); 1832 I915_WRITE(VLV_IIR, iir); 1833 } 1834 1835 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1836 1837 /* Call regardless, as some status bits might not be 1838 * signalled in iir */ 1839 valleyview_pipestat_irq_handler(dev, iir); 1840 1841 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1842 POSTING_READ(GEN8_MASTER_IRQ); 1843 } 1844 1845 } 1846 1847 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1848 { 1849 struct drm_i915_private *dev_priv = dev->dev_private; 1850 int pipe; 1851 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1852 u32 dig_hotplug_reg; 1853 1854 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1855 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1856 1857 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1858 1859 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1860 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1861 SDE_AUDIO_POWER_SHIFT); 1862 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1863 port_name(port)); 1864 } 1865 1866 if (pch_iir & SDE_AUX_MASK) 1867 dp_aux_irq_handler(dev); 1868 1869 if (pch_iir & SDE_GMBUS) 1870 gmbus_irq_handler(dev); 1871 1872 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1873 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1874 1875 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1876 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1877 1878 if (pch_iir & SDE_POISON) 1879 DRM_ERROR("PCH poison interrupt\n"); 1880 1881 if (pch_iir & SDE_FDI_MASK) 1882 for_each_pipe(dev_priv, pipe) 1883 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1884 pipe_name(pipe), 1885 I915_READ(FDI_RX_IIR(pipe))); 1886 1887 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1888 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1889 1890 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1891 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1892 1893 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1894 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1895 1896 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1897 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1898 } 1899 1900 static void ivb_err_int_handler(struct drm_device *dev) 1901 { 1902 struct drm_i915_private *dev_priv = dev->dev_private; 1903 u32 err_int = I915_READ(GEN7_ERR_INT); 1904 enum i915_pipe pipe; 1905 1906 if (err_int & ERR_INT_POISON) 1907 DRM_ERROR("Poison interrupt\n"); 1908 1909 for_each_pipe(dev_priv, pipe) { 1910 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1911 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1912 1913 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1914 if (IS_IVYBRIDGE(dev)) 1915 ivb_pipe_crc_irq_handler(dev, pipe); 1916 else 1917 hsw_pipe_crc_irq_handler(dev, pipe); 1918 } 1919 } 1920 1921 I915_WRITE(GEN7_ERR_INT, err_int); 1922 } 1923 1924 static void cpt_serr_int_handler(struct drm_device *dev) 1925 { 1926 struct drm_i915_private *dev_priv = dev->dev_private; 1927 u32 serr_int = I915_READ(SERR_INT); 1928 1929 if (serr_int & SERR_INT_POISON) 1930 DRM_ERROR("PCH poison interrupt\n"); 1931 1932 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1933 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1934 1935 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1936 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1937 1938 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1939 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1940 1941 I915_WRITE(SERR_INT, serr_int); 1942 } 1943 1944 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1945 { 1946 struct drm_i915_private *dev_priv = dev->dev_private; 1947 int pipe; 1948 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1949 u32 dig_hotplug_reg; 1950 1951 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1952 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1953 1954 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 1955 1956 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1957 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1958 SDE_AUDIO_POWER_SHIFT_CPT); 1959 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1960 port_name(port)); 1961 } 1962 1963 if (pch_iir & SDE_AUX_MASK_CPT) 1964 dp_aux_irq_handler(dev); 1965 1966 if (pch_iir & SDE_GMBUS_CPT) 1967 gmbus_irq_handler(dev); 1968 1969 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1970 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1971 1972 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1973 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1974 1975 if (pch_iir & SDE_FDI_MASK_CPT) 1976 for_each_pipe(dev_priv, pipe) 1977 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1978 pipe_name(pipe), 1979 I915_READ(FDI_RX_IIR(pipe))); 1980 1981 if (pch_iir & SDE_ERROR_CPT) 1982 cpt_serr_int_handler(dev); 1983 } 1984 1985 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1986 { 1987 struct drm_i915_private *dev_priv = dev->dev_private; 1988 enum i915_pipe pipe; 1989 1990 if (de_iir & DE_AUX_CHANNEL_A) 1991 dp_aux_irq_handler(dev); 1992 1993 if (de_iir & DE_GSE) 1994 intel_opregion_asle_intr(dev); 1995 1996 if (de_iir & DE_POISON) 1997 DRM_ERROR("Poison interrupt\n"); 1998 1999 for_each_pipe(dev_priv, pipe) { 2000 if (de_iir & DE_PIPE_VBLANK(pipe) && 2001 intel_pipe_handle_vblank(dev, pipe)) 2002 intel_check_page_flip(dev, pipe); 2003 2004 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2005 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2006 2007 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2008 i9xx_pipe_crc_irq_handler(dev, pipe); 2009 2010 /* plane/pipes map 1:1 on ilk+ */ 2011 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2012 intel_prepare_page_flip(dev, pipe); 2013 intel_finish_page_flip_plane(dev, pipe); 2014 } 2015 } 2016 2017 /* check event from PCH */ 2018 if (de_iir & DE_PCH_EVENT) { 2019 u32 pch_iir = I915_READ(SDEIIR); 2020 2021 if (HAS_PCH_CPT(dev)) 2022 cpt_irq_handler(dev, pch_iir); 2023 else 2024 ibx_irq_handler(dev, pch_iir); 2025 2026 /* should clear PCH hotplug event before clear CPU irq */ 2027 I915_WRITE(SDEIIR, pch_iir); 2028 } 2029 2030 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2031 ironlake_rps_change_irq_handler(dev); 2032 } 2033 2034 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2035 { 2036 struct drm_i915_private *dev_priv = dev->dev_private; 2037 enum i915_pipe pipe; 2038 2039 if (de_iir & DE_ERR_INT_IVB) 2040 ivb_err_int_handler(dev); 2041 2042 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2043 dp_aux_irq_handler(dev); 2044 2045 if (de_iir & DE_GSE_IVB) 2046 intel_opregion_asle_intr(dev); 2047 2048 for_each_pipe(dev_priv, pipe) { 2049 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2050 intel_pipe_handle_vblank(dev, pipe)) 2051 intel_check_page_flip(dev, pipe); 2052 2053 /* plane/pipes map 1:1 on ilk+ */ 2054 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2055 intel_prepare_page_flip(dev, pipe); 2056 intel_finish_page_flip_plane(dev, pipe); 2057 } 2058 } 2059 2060 /* check event from PCH */ 2061 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2062 u32 pch_iir = I915_READ(SDEIIR); 2063 2064 cpt_irq_handler(dev, pch_iir); 2065 2066 /* clear PCH hotplug event before clear CPU irq */ 2067 I915_WRITE(SDEIIR, pch_iir); 2068 } 2069 } 2070 2071 /* 2072 * To handle irqs with the minimum potential races with fresh interrupts, we: 2073 * 1 - Disable Master Interrupt Control. 2074 * 2 - Find the source(s) of the interrupt. 2075 * 3 - Clear the Interrupt Identity bits (IIR). 2076 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2077 * 5 - Re-enable Master Interrupt Control. 2078 */ 2079 static irqreturn_t ironlake_irq_handler(void *arg) 2080 { 2081 struct drm_device *dev = arg; 2082 struct drm_i915_private *dev_priv = dev->dev_private; 2083 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2084 2085 if (!intel_irqs_enabled(dev_priv)) 2086 return IRQ_NONE; 2087 2088 /* We get interrupts on unclaimed registers, so check for this before we 2089 * do any I915_{READ,WRITE}. */ 2090 intel_uncore_check_errors(dev); 2091 2092 /* disable master interrupt before clearing iir */ 2093 de_ier = I915_READ(DEIER); 2094 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2095 POSTING_READ(DEIER); 2096 2097 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2098 * interrupts will will be stored on its back queue, and then we'll be 2099 * able to process them after we restore SDEIER (as soon as we restore 2100 * it, we'll get an interrupt if SDEIIR still has something to process 2101 * due to its back queue). */ 2102 if (!HAS_PCH_NOP(dev)) { 2103 sde_ier = I915_READ(SDEIER); 2104 I915_WRITE(SDEIER, 0); 2105 POSTING_READ(SDEIER); 2106 } 2107 2108 /* Find, clear, then process each source of interrupt */ 2109 2110 gt_iir = I915_READ(GTIIR); 2111 if (gt_iir) { 2112 I915_WRITE(GTIIR, gt_iir); 2113 if (INTEL_INFO(dev)->gen >= 6) 2114 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2115 else 2116 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2117 } 2118 2119 de_iir = I915_READ(DEIIR); 2120 if (de_iir) { 2121 I915_WRITE(DEIIR, de_iir); 2122 if (INTEL_INFO(dev)->gen >= 7) 2123 ivb_display_irq_handler(dev, de_iir); 2124 else 2125 ilk_display_irq_handler(dev, de_iir); 2126 } 2127 2128 if (INTEL_INFO(dev)->gen >= 6) { 2129 u32 pm_iir = I915_READ(GEN6_PMIIR); 2130 if (pm_iir) { 2131 I915_WRITE(GEN6_PMIIR, pm_iir); 2132 gen6_rps_irq_handler(dev_priv, pm_iir); 2133 } 2134 } 2135 2136 I915_WRITE(DEIER, de_ier); 2137 POSTING_READ(DEIER); 2138 if (!HAS_PCH_NOP(dev)) { 2139 I915_WRITE(SDEIER, sde_ier); 2140 POSTING_READ(SDEIER); 2141 } 2142 2143 } 2144 2145 static irqreturn_t gen8_irq_handler(void *arg) 2146 { 2147 struct drm_device *dev = arg; 2148 struct drm_i915_private *dev_priv = dev->dev_private; 2149 u32 master_ctl; 2150 uint32_t tmp = 0; 2151 enum i915_pipe pipe; 2152 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2153 2154 if (!intel_irqs_enabled(dev_priv)) 2155 return IRQ_NONE; 2156 2157 if (IS_GEN9(dev)) 2158 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2159 GEN9_AUX_CHANNEL_D; 2160 2161 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2162 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2163 if (!master_ctl) 2164 return; 2165 2166 I915_WRITE(GEN8_MASTER_IRQ, 0); 2167 POSTING_READ(GEN8_MASTER_IRQ); 2168 2169 /* Find, clear, then process each source of interrupt */ 2170 2171 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2172 2173 if (master_ctl & GEN8_DE_MISC_IRQ) { 2174 tmp = I915_READ(GEN8_DE_MISC_IIR); 2175 if (tmp) { 2176 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2177 if (tmp & GEN8_DE_MISC_GSE) 2178 intel_opregion_asle_intr(dev); 2179 else 2180 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2181 } 2182 else 2183 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2184 } 2185 2186 if (master_ctl & GEN8_DE_PORT_IRQ) { 2187 tmp = I915_READ(GEN8_DE_PORT_IIR); 2188 if (tmp) { 2189 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2190 if (tmp & aux_mask) 2191 dp_aux_irq_handler(dev); 2192 else 2193 DRM_ERROR("Unexpected DE Port interrupt\n"); 2194 } 2195 else 2196 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2197 } 2198 2199 for_each_pipe(dev_priv, pipe) { 2200 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2201 2202 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2203 continue; 2204 2205 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2206 if (pipe_iir) { 2207 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2208 2209 if (pipe_iir & GEN8_PIPE_VBLANK && 2210 intel_pipe_handle_vblank(dev, pipe)) 2211 intel_check_page_flip(dev, pipe); 2212 2213 if (IS_GEN9(dev)) 2214 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2215 else 2216 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2217 2218 if (flip_done) { 2219 intel_prepare_page_flip(dev, pipe); 2220 intel_finish_page_flip_plane(dev, pipe); 2221 } 2222 2223 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2224 hsw_pipe_crc_irq_handler(dev, pipe); 2225 2226 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2227 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2228 pipe); 2229 2230 2231 if (IS_GEN9(dev)) 2232 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2233 else 2234 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2235 2236 if (fault_errors) 2237 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2238 pipe_name(pipe), 2239 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2240 } else 2241 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2242 } 2243 2244 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2245 /* 2246 * FIXME(BDW): Assume for now that the new interrupt handling 2247 * scheme also closed the SDE interrupt handling race we've seen 2248 * on older pch-split platforms. But this needs testing. 2249 */ 2250 u32 pch_iir = I915_READ(SDEIIR); 2251 if (pch_iir) { 2252 I915_WRITE(SDEIIR, pch_iir); 2253 cpt_irq_handler(dev, pch_iir); 2254 } else 2255 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2256 2257 } 2258 2259 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2260 POSTING_READ(GEN8_MASTER_IRQ); 2261 2262 } 2263 2264 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2265 bool reset_completed) 2266 { 2267 struct intel_engine_cs *ring; 2268 int i; 2269 2270 /* 2271 * Notify all waiters for GPU completion events that reset state has 2272 * been changed, and that they need to restart their wait after 2273 * checking for potential errors (and bail out to drop locks if there is 2274 * a gpu reset pending so that i915_error_work_func can acquire them). 2275 */ 2276 2277 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2278 for_each_ring(ring, dev_priv, i) 2279 wake_up_all(&ring->irq_queue); 2280 2281 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2282 wake_up_all(&dev_priv->pending_flip_queue); 2283 2284 /* 2285 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2286 * reset state is cleared. 2287 */ 2288 if (reset_completed) 2289 wake_up_all(&dev_priv->gpu_error.reset_queue); 2290 } 2291 2292 /** 2293 * i915_reset_and_wakeup - do process context error handling work 2294 * 2295 * Fire an error uevent so userspace can see that a hang or error 2296 * was detected. 2297 */ 2298 static void i915_reset_and_wakeup(struct drm_device *dev) 2299 { 2300 struct drm_i915_private *dev_priv = to_i915(dev); 2301 struct i915_gpu_error *error = &dev_priv->gpu_error; 2302 #if 0 2303 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2304 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2305 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2306 #endif 2307 int ret; 2308 2309 #if 0 2310 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2311 #endif 2312 2313 /* 2314 * Note that there's only one work item which does gpu resets, so we 2315 * need not worry about concurrent gpu resets potentially incrementing 2316 * error->reset_counter twice. We only need to take care of another 2317 * racing irq/hangcheck declaring the gpu dead for a second time. A 2318 * quick check for that is good enough: schedule_work ensures the 2319 * correct ordering between hang detection and this work item, and since 2320 * the reset in-progress bit is only ever set by code outside of this 2321 * work we don't need to worry about any other races. 2322 */ 2323 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2324 DRM_DEBUG_DRIVER("resetting chip\n"); 2325 #if 0 2326 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2327 reset_event); 2328 #endif 2329 2330 /* 2331 * In most cases it's guaranteed that we get here with an RPM 2332 * reference held, for example because there is a pending GPU 2333 * request that won't finish until the reset is done. This 2334 * isn't the case at least when we get here by doing a 2335 * simulated reset via debugs, so get an RPM reference. 2336 */ 2337 intel_runtime_pm_get(dev_priv); 2338 2339 intel_prepare_reset(dev); 2340 2341 /* 2342 * All state reset _must_ be completed before we update the 2343 * reset counter, for otherwise waiters might miss the reset 2344 * pending state and not properly drop locks, resulting in 2345 * deadlocks with the reset work. 2346 */ 2347 ret = i915_reset(dev); 2348 2349 intel_finish_reset(dev); 2350 2351 intel_runtime_pm_put(dev_priv); 2352 2353 if (ret == 0) { 2354 /* 2355 * After all the gem state is reset, increment the reset 2356 * counter and wake up everyone waiting for the reset to 2357 * complete. 2358 * 2359 * Since unlock operations are a one-sided barrier only, 2360 * we need to insert a barrier here to order any seqno 2361 * updates before 2362 * the counter increment. 2363 */ 2364 smp_mb__before_atomic(); 2365 atomic_inc(&dev_priv->gpu_error.reset_counter); 2366 2367 #if 0 2368 kobject_uevent_env(&dev->primary->kdev->kobj, 2369 KOBJ_CHANGE, reset_done_event); 2370 #endif 2371 } else { 2372 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2373 } 2374 2375 /* 2376 * Note: The wake_up also serves as a memory barrier so that 2377 * waiters see the update value of the reset counter atomic_t. 2378 */ 2379 i915_error_wake_up(dev_priv, true); 2380 } 2381 } 2382 2383 static void i915_report_and_clear_eir(struct drm_device *dev) 2384 { 2385 struct drm_i915_private *dev_priv = dev->dev_private; 2386 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2387 u32 eir = I915_READ(EIR); 2388 int pipe, i; 2389 2390 if (!eir) 2391 return; 2392 2393 pr_err("render error detected, EIR: 0x%08x\n", eir); 2394 2395 #if 0 2396 i915_get_extra_instdone(dev, instdone); 2397 #endif 2398 2399 if (IS_G4X(dev)) { 2400 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2401 u32 ipeir = I915_READ(IPEIR_I965); 2402 2403 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2404 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2405 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2406 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2407 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2408 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2409 I915_WRITE(IPEIR_I965, ipeir); 2410 POSTING_READ(IPEIR_I965); 2411 } 2412 if (eir & GM45_ERROR_PAGE_TABLE) { 2413 u32 pgtbl_err = I915_READ(PGTBL_ER); 2414 pr_err("page table error\n"); 2415 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2416 I915_WRITE(PGTBL_ER, pgtbl_err); 2417 POSTING_READ(PGTBL_ER); 2418 } 2419 } 2420 2421 if (!IS_GEN2(dev)) { 2422 if (eir & I915_ERROR_PAGE_TABLE) { 2423 u32 pgtbl_err = I915_READ(PGTBL_ER); 2424 pr_err("page table error\n"); 2425 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2426 I915_WRITE(PGTBL_ER, pgtbl_err); 2427 POSTING_READ(PGTBL_ER); 2428 } 2429 } 2430 2431 if (eir & I915_ERROR_MEMORY_REFRESH) { 2432 pr_err("memory refresh error:\n"); 2433 for_each_pipe(dev_priv, pipe) 2434 pr_err("pipe %c stat: 0x%08x\n", 2435 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2436 /* pipestat has already been acked */ 2437 } 2438 if (eir & I915_ERROR_INSTRUCTION) { 2439 pr_err("instruction error\n"); 2440 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2441 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2442 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2443 if (INTEL_INFO(dev)->gen < 4) { 2444 u32 ipeir = I915_READ(IPEIR); 2445 2446 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2447 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2448 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2449 I915_WRITE(IPEIR, ipeir); 2450 POSTING_READ(IPEIR); 2451 } else { 2452 u32 ipeir = I915_READ(IPEIR_I965); 2453 2454 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2455 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2456 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2457 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2458 I915_WRITE(IPEIR_I965, ipeir); 2459 POSTING_READ(IPEIR_I965); 2460 } 2461 } 2462 2463 I915_WRITE(EIR, eir); 2464 POSTING_READ(EIR); 2465 eir = I915_READ(EIR); 2466 if (eir) { 2467 /* 2468 * some errors might have become stuck, 2469 * mask them. 2470 */ 2471 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2472 I915_WRITE(EMR, I915_READ(EMR) | eir); 2473 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2474 } 2475 } 2476 2477 /** 2478 * i915_handle_error - handle a gpu error 2479 * @dev: drm device 2480 * 2481 * Do some basic checking of regsiter state at error time and 2482 * dump it to the syslog. Also call i915_capture_error_state() to make 2483 * sure we get a record and make it available in debugfs. Fire a uevent 2484 * so userspace knows something bad happened (should trigger collection 2485 * of a ring dump etc.). 2486 */ 2487 void i915_handle_error(struct drm_device *dev, bool wedged, 2488 const char *fmt, ...) 2489 { 2490 struct drm_i915_private *dev_priv = dev->dev_private; 2491 #if 0 2492 va_list args; 2493 char error_msg[80]; 2494 2495 va_start(args, fmt); 2496 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2497 va_end(args); 2498 2499 i915_capture_error_state(dev, wedged, error_msg); 2500 #endif 2501 i915_report_and_clear_eir(dev); 2502 2503 if (wedged) { 2504 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2505 &dev_priv->gpu_error.reset_counter); 2506 2507 /* 2508 * Wakeup waiting processes so that the reset function 2509 * i915_reset_and_wakeup doesn't deadlock trying to grab 2510 * various locks. By bumping the reset counter first, the woken 2511 * processes will see a reset in progress and back off, 2512 * releasing their locks and then wait for the reset completion. 2513 * We must do this for _all_ gpu waiters that might hold locks 2514 * that the reset work needs to acquire. 2515 * 2516 * Note: The wake_up serves as the required memory barrier to 2517 * ensure that the waiters see the updated value of the reset 2518 * counter atomic_t. 2519 */ 2520 i915_error_wake_up(dev_priv, false); 2521 } 2522 2523 i915_reset_and_wakeup(dev); 2524 } 2525 2526 /* Called from drm generic code, passed 'crtc' which 2527 * we use as a pipe index 2528 */ 2529 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2530 { 2531 struct drm_i915_private *dev_priv = dev->dev_private; 2532 2533 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2534 if (INTEL_INFO(dev)->gen >= 4) 2535 i915_enable_pipestat(dev_priv, pipe, 2536 PIPE_START_VBLANK_INTERRUPT_STATUS); 2537 i915_enable_pipestat(dev_priv, pipe, 2538 PIPE_VBLANK_INTERRUPT_STATUS); 2539 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2540 2541 return 0; 2542 } 2543 2544 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2545 { 2546 struct drm_i915_private *dev_priv = dev->dev_private; 2547 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2548 DE_PIPE_VBLANK(pipe); 2549 2550 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2551 ironlake_enable_display_irq(dev_priv, bit); 2552 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2553 2554 return 0; 2555 } 2556 2557 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2558 { 2559 struct drm_i915_private *dev_priv = dev->dev_private; 2560 2561 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2562 i915_enable_pipestat(dev_priv, pipe, 2563 PIPE_START_VBLANK_INTERRUPT_STATUS); 2564 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2565 2566 return 0; 2567 } 2568 2569 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2570 { 2571 struct drm_i915_private *dev_priv = dev->dev_private; 2572 2573 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2574 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2575 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2576 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2577 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2578 return 0; 2579 } 2580 2581 /* Called from drm generic code, passed 'crtc' which 2582 * we use as a pipe index 2583 */ 2584 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2585 { 2586 struct drm_i915_private *dev_priv = dev->dev_private; 2587 2588 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2589 i915_disable_pipestat(dev_priv, pipe, 2590 PIPE_VBLANK_INTERRUPT_STATUS | 2591 PIPE_START_VBLANK_INTERRUPT_STATUS); 2592 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2593 } 2594 2595 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2596 { 2597 struct drm_i915_private *dev_priv = dev->dev_private; 2598 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2599 DE_PIPE_VBLANK(pipe); 2600 2601 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2602 ironlake_disable_display_irq(dev_priv, bit); 2603 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2604 } 2605 2606 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2607 { 2608 struct drm_i915_private *dev_priv = dev->dev_private; 2609 2610 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2611 i915_disable_pipestat(dev_priv, pipe, 2612 PIPE_START_VBLANK_INTERRUPT_STATUS); 2613 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2614 } 2615 2616 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2617 { 2618 struct drm_i915_private *dev_priv = dev->dev_private; 2619 2620 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2621 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2622 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2623 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2624 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2625 } 2626 2627 static struct drm_i915_gem_request * 2628 ring_last_request(struct intel_engine_cs *ring) 2629 { 2630 return list_entry(ring->request_list.prev, 2631 struct drm_i915_gem_request, list); 2632 } 2633 2634 static bool 2635 ring_idle(struct intel_engine_cs *ring) 2636 { 2637 return (list_empty(&ring->request_list) || 2638 i915_gem_request_completed(ring_last_request(ring), false)); 2639 } 2640 2641 static bool 2642 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2643 { 2644 if (INTEL_INFO(dev)->gen >= 8) { 2645 return (ipehr >> 23) == 0x1c; 2646 } else { 2647 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2648 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2649 MI_SEMAPHORE_REGISTER); 2650 } 2651 } 2652 2653 static struct intel_engine_cs * 2654 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2655 { 2656 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2657 struct intel_engine_cs *signaller; 2658 int i; 2659 2660 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2661 for_each_ring(signaller, dev_priv, i) { 2662 if (ring == signaller) 2663 continue; 2664 2665 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2666 return signaller; 2667 } 2668 } else { 2669 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2670 2671 for_each_ring(signaller, dev_priv, i) { 2672 if(ring == signaller) 2673 continue; 2674 2675 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2676 return signaller; 2677 } 2678 } 2679 2680 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n", 2681 ring->id, ipehr, offset); 2682 2683 return NULL; 2684 } 2685 2686 static struct intel_engine_cs * 2687 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2688 { 2689 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2690 u32 cmd, ipehr, head; 2691 u64 offset = 0; 2692 int i, backwards; 2693 2694 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2695 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2696 return NULL; 2697 2698 /* 2699 * HEAD is likely pointing to the dword after the actual command, 2700 * so scan backwards until we find the MBOX. But limit it to just 3 2701 * or 4 dwords depending on the semaphore wait command size. 2702 * Note that we don't care about ACTHD here since that might 2703 * point at at batch, and semaphores are always emitted into the 2704 * ringbuffer itself. 2705 */ 2706 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2707 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2708 2709 for (i = backwards; i; --i) { 2710 /* 2711 * Be paranoid and presume the hw has gone off into the wild - 2712 * our ring is smaller than what the hardware (and hence 2713 * HEAD_ADDR) allows. Also handles wrap-around. 2714 */ 2715 head &= ring->buffer->size - 1; 2716 2717 /* This here seems to blow up */ 2718 cmd = ioread32(ring->buffer->virtual_start + head); 2719 if (cmd == ipehr) 2720 break; 2721 2722 head -= 4; 2723 } 2724 2725 if (!i) 2726 return NULL; 2727 2728 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2729 if (INTEL_INFO(ring->dev)->gen >= 8) { 2730 offset = ioread32(ring->buffer->virtual_start + head + 12); 2731 offset <<= 32; 2732 offset = ioread32(ring->buffer->virtual_start + head + 8); 2733 } 2734 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2735 } 2736 2737 static int semaphore_passed(struct intel_engine_cs *ring) 2738 { 2739 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2740 struct intel_engine_cs *signaller; 2741 u32 seqno; 2742 2743 ring->hangcheck.deadlock++; 2744 2745 signaller = semaphore_waits_for(ring, &seqno); 2746 if (signaller == NULL) 2747 return -1; 2748 2749 /* Prevent pathological recursion due to driver bugs */ 2750 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2751 return -1; 2752 2753 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2754 return 1; 2755 2756 /* cursory check for an unkickable deadlock */ 2757 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2758 semaphore_passed(signaller) < 0) 2759 return -1; 2760 2761 return 0; 2762 } 2763 2764 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2765 { 2766 struct intel_engine_cs *ring; 2767 int i; 2768 2769 for_each_ring(ring, dev_priv, i) 2770 ring->hangcheck.deadlock = 0; 2771 } 2772 2773 static enum intel_ring_hangcheck_action 2774 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2775 { 2776 struct drm_device *dev = ring->dev; 2777 struct drm_i915_private *dev_priv = dev->dev_private; 2778 u32 tmp; 2779 2780 if (acthd != ring->hangcheck.acthd) { 2781 if (acthd > ring->hangcheck.max_acthd) { 2782 ring->hangcheck.max_acthd = acthd; 2783 return HANGCHECK_ACTIVE; 2784 } 2785 2786 return HANGCHECK_ACTIVE_LOOP; 2787 } 2788 2789 if (IS_GEN2(dev)) 2790 return HANGCHECK_HUNG; 2791 2792 /* Is the chip hanging on a WAIT_FOR_EVENT? 2793 * If so we can simply poke the RB_WAIT bit 2794 * and break the hang. This should work on 2795 * all but the second generation chipsets. 2796 */ 2797 tmp = I915_READ_CTL(ring); 2798 if (tmp & RING_WAIT) { 2799 i915_handle_error(dev, false, 2800 "Kicking stuck wait on %s", 2801 ring->name); 2802 I915_WRITE_CTL(ring, tmp); 2803 return HANGCHECK_KICK; 2804 } 2805 2806 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2807 switch (semaphore_passed(ring)) { 2808 default: 2809 return HANGCHECK_HUNG; 2810 case 1: 2811 i915_handle_error(dev, false, 2812 "Kicking stuck semaphore on %s", 2813 ring->name); 2814 I915_WRITE_CTL(ring, tmp); 2815 return HANGCHECK_KICK; 2816 case 0: 2817 return HANGCHECK_WAIT; 2818 } 2819 } 2820 2821 return HANGCHECK_HUNG; 2822 } 2823 2824 /* 2825 * This is called when the chip hasn't reported back with completed 2826 * batchbuffers in a long time. We keep track per ring seqno progress and 2827 * if there are no progress, hangcheck score for that ring is increased. 2828 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2829 * we kick the ring. If we see no progress on three subsequent calls 2830 * we assume chip is wedged and try to fix it by resetting the chip. 2831 */ 2832 static void i915_hangcheck_elapsed(struct work_struct *work) 2833 { 2834 struct drm_i915_private *dev_priv = 2835 container_of(work, typeof(*dev_priv), 2836 gpu_error.hangcheck_work.work); 2837 struct drm_device *dev = dev_priv->dev; 2838 struct intel_engine_cs *ring; 2839 int i; 2840 int busy_count = 0, rings_hung = 0; 2841 bool stuck[I915_NUM_RINGS] = { 0 }; 2842 #define BUSY 1 2843 #define KICK 5 2844 #define HUNG 20 2845 2846 if (!i915.enable_hangcheck) 2847 return; 2848 2849 for_each_ring(ring, dev_priv, i) { 2850 u64 acthd; 2851 u32 seqno; 2852 bool busy = true; 2853 2854 semaphore_clear_deadlocks(dev_priv); 2855 2856 seqno = ring->get_seqno(ring, false); 2857 acthd = intel_ring_get_active_head(ring); 2858 2859 if (ring->hangcheck.seqno == seqno) { 2860 if (ring_idle(ring)) { 2861 ring->hangcheck.action = HANGCHECK_IDLE; 2862 2863 if (waitqueue_active(&ring->irq_queue)) { 2864 /* Issue a wake-up to catch stuck h/w. */ 2865 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2866 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2867 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2868 ring->name); 2869 else 2870 DRM_INFO("Fake missed irq on %s\n", 2871 ring->name); 2872 wake_up_all(&ring->irq_queue); 2873 } 2874 /* Safeguard against driver failure */ 2875 ring->hangcheck.score += BUSY; 2876 } else 2877 busy = false; 2878 } else { 2879 /* We always increment the hangcheck score 2880 * if the ring is busy and still processing 2881 * the same request, so that no single request 2882 * can run indefinitely (such as a chain of 2883 * batches). The only time we do not increment 2884 * the hangcheck score on this ring, if this 2885 * ring is in a legitimate wait for another 2886 * ring. In that case the waiting ring is a 2887 * victim and we want to be sure we catch the 2888 * right culprit. Then every time we do kick 2889 * the ring, add a small increment to the 2890 * score so that we can catch a batch that is 2891 * being repeatedly kicked and so responsible 2892 * for stalling the machine. 2893 */ 2894 ring->hangcheck.action = ring_stuck(ring, 2895 acthd); 2896 2897 switch (ring->hangcheck.action) { 2898 case HANGCHECK_IDLE: 2899 case HANGCHECK_WAIT: 2900 case HANGCHECK_ACTIVE: 2901 break; 2902 case HANGCHECK_ACTIVE_LOOP: 2903 ring->hangcheck.score += BUSY; 2904 break; 2905 case HANGCHECK_KICK: 2906 ring->hangcheck.score += KICK; 2907 break; 2908 case HANGCHECK_HUNG: 2909 ring->hangcheck.score += HUNG; 2910 stuck[i] = true; 2911 break; 2912 } 2913 } 2914 } else { 2915 ring->hangcheck.action = HANGCHECK_ACTIVE; 2916 2917 /* Gradually reduce the count so that we catch DoS 2918 * attempts across multiple batches. 2919 */ 2920 if (ring->hangcheck.score > 0) 2921 ring->hangcheck.score--; 2922 2923 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 2924 } 2925 2926 ring->hangcheck.seqno = seqno; 2927 ring->hangcheck.acthd = acthd; 2928 busy_count += busy; 2929 } 2930 2931 for_each_ring(ring, dev_priv, i) { 2932 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2933 DRM_INFO("%s on %s\n", 2934 stuck[i] ? "stuck" : "no progress", 2935 ring->name); 2936 rings_hung++; 2937 } 2938 } 2939 2940 if (rings_hung) 2941 return i915_handle_error(dev, true, "Ring hung"); 2942 2943 if (busy_count) 2944 /* Reset timer case chip hangs without another request 2945 * being added */ 2946 i915_queue_hangcheck(dev); 2947 } 2948 2949 void i915_queue_hangcheck(struct drm_device *dev) 2950 { 2951 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 2952 2953 if (!i915.enable_hangcheck) 2954 return; 2955 2956 /* Don't continually defer the hangcheck so that it is always run at 2957 * least once after work has been scheduled on any ring. Otherwise, 2958 * we will ignore a hung ring if a second ring is kept busy. 2959 */ 2960 2961 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 2962 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 2963 } 2964 2965 static void ibx_irq_reset(struct drm_device *dev) 2966 { 2967 struct drm_i915_private *dev_priv = dev->dev_private; 2968 2969 if (HAS_PCH_NOP(dev)) 2970 return; 2971 2972 GEN5_IRQ_RESET(SDE); 2973 2974 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2975 I915_WRITE(SERR_INT, 0xffffffff); 2976 } 2977 2978 /* 2979 * SDEIER is also touched by the interrupt handler to work around missed PCH 2980 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2981 * instead we unconditionally enable all PCH interrupt sources here, but then 2982 * only unmask them as needed with SDEIMR. 2983 * 2984 * This function needs to be called before interrupts are enabled. 2985 */ 2986 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2987 { 2988 struct drm_i915_private *dev_priv = dev->dev_private; 2989 2990 if (HAS_PCH_NOP(dev)) 2991 return; 2992 2993 WARN_ON(I915_READ(SDEIER) != 0); 2994 I915_WRITE(SDEIER, 0xffffffff); 2995 POSTING_READ(SDEIER); 2996 } 2997 2998 static void gen5_gt_irq_reset(struct drm_device *dev) 2999 { 3000 struct drm_i915_private *dev_priv = dev->dev_private; 3001 3002 GEN5_IRQ_RESET(GT); 3003 if (INTEL_INFO(dev)->gen >= 6) 3004 GEN5_IRQ_RESET(GEN6_PM); 3005 } 3006 3007 /* drm_dma.h hooks 3008 */ 3009 static void ironlake_irq_reset(struct drm_device *dev) 3010 { 3011 struct drm_i915_private *dev_priv = dev->dev_private; 3012 3013 I915_WRITE(HWSTAM, 0xffffffff); 3014 3015 GEN5_IRQ_RESET(DE); 3016 if (IS_GEN7(dev)) 3017 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3018 3019 gen5_gt_irq_reset(dev); 3020 3021 ibx_irq_reset(dev); 3022 } 3023 3024 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3025 { 3026 enum i915_pipe pipe; 3027 3028 I915_WRITE(PORT_HOTPLUG_EN, 0); 3029 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3030 3031 for_each_pipe(dev_priv, pipe) 3032 I915_WRITE(PIPESTAT(pipe), 0xffff); 3033 3034 GEN5_IRQ_RESET(VLV_); 3035 } 3036 3037 static void valleyview_irq_preinstall(struct drm_device *dev) 3038 { 3039 struct drm_i915_private *dev_priv = dev->dev_private; 3040 3041 /* VLV magic */ 3042 I915_WRITE(VLV_IMR, 0); 3043 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3044 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3045 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3046 3047 gen5_gt_irq_reset(dev); 3048 3049 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3050 3051 vlv_display_irq_reset(dev_priv); 3052 } 3053 3054 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3055 { 3056 GEN8_IRQ_RESET_NDX(GT, 0); 3057 GEN8_IRQ_RESET_NDX(GT, 1); 3058 GEN8_IRQ_RESET_NDX(GT, 2); 3059 GEN8_IRQ_RESET_NDX(GT, 3); 3060 } 3061 3062 static void gen8_irq_reset(struct drm_device *dev) 3063 { 3064 struct drm_i915_private *dev_priv = dev->dev_private; 3065 int pipe; 3066 3067 I915_WRITE(GEN8_MASTER_IRQ, 0); 3068 POSTING_READ(GEN8_MASTER_IRQ); 3069 3070 gen8_gt_irq_reset(dev_priv); 3071 3072 for_each_pipe(dev_priv, pipe) 3073 if (intel_display_power_is_enabled(dev_priv, 3074 POWER_DOMAIN_PIPE(pipe))) 3075 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3076 3077 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3078 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3079 GEN5_IRQ_RESET(GEN8_PCU_); 3080 3081 ibx_irq_reset(dev); 3082 } 3083 3084 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3085 unsigned int pipe_mask) 3086 { 3087 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3088 3089 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3090 if (pipe_mask & 1 << PIPE_A) 3091 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 3092 dev_priv->de_irq_mask[PIPE_A], 3093 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 3094 if (pipe_mask & 1 << PIPE_B) 3095 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 3096 dev_priv->de_irq_mask[PIPE_B], 3097 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3098 if (pipe_mask & 1 << PIPE_C) 3099 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 3100 dev_priv->de_irq_mask[PIPE_C], 3101 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3102 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3103 } 3104 3105 static void cherryview_irq_preinstall(struct drm_device *dev) 3106 { 3107 struct drm_i915_private *dev_priv = dev->dev_private; 3108 3109 I915_WRITE(GEN8_MASTER_IRQ, 0); 3110 POSTING_READ(GEN8_MASTER_IRQ); 3111 3112 gen8_gt_irq_reset(dev_priv); 3113 3114 GEN5_IRQ_RESET(GEN8_PCU_); 3115 3116 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3117 3118 vlv_display_irq_reset(dev_priv); 3119 } 3120 3121 static void ibx_hpd_irq_setup(struct drm_device *dev) 3122 { 3123 struct drm_i915_private *dev_priv = dev->dev_private; 3124 struct intel_encoder *intel_encoder; 3125 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3126 3127 if (HAS_PCH_IBX(dev)) { 3128 hotplug_irqs = SDE_HOTPLUG_MASK; 3129 for_each_intel_encoder(dev, intel_encoder) 3130 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3131 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3132 } else { 3133 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3134 for_each_intel_encoder(dev, intel_encoder) 3135 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3136 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3137 } 3138 3139 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3140 3141 /* 3142 * Enable digital hotplug on the PCH, and configure the DP short pulse 3143 * duration to 2ms (which is the minimum in the Display Port spec) 3144 * 3145 * This register is the same on all known PCH chips. 3146 */ 3147 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3148 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3149 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3150 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3151 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3152 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3153 } 3154 3155 static void ibx_irq_postinstall(struct drm_device *dev) 3156 { 3157 struct drm_i915_private *dev_priv = dev->dev_private; 3158 u32 mask; 3159 3160 if (HAS_PCH_NOP(dev)) 3161 return; 3162 3163 if (HAS_PCH_IBX(dev)) 3164 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3165 else 3166 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3167 3168 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3169 I915_WRITE(SDEIMR, ~mask); 3170 } 3171 3172 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3173 { 3174 struct drm_i915_private *dev_priv = dev->dev_private; 3175 u32 pm_irqs, gt_irqs; 3176 3177 pm_irqs = gt_irqs = 0; 3178 3179 dev_priv->gt_irq_mask = ~0; 3180 if (HAS_L3_DPF(dev)) { 3181 /* L3 parity interrupt is always unmasked. */ 3182 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3183 gt_irqs |= GT_PARITY_ERROR(dev); 3184 } 3185 3186 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3187 if (IS_GEN5(dev)) { 3188 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3189 ILK_BSD_USER_INTERRUPT; 3190 } else { 3191 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3192 } 3193 3194 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3195 3196 if (INTEL_INFO(dev)->gen >= 6) { 3197 /* 3198 * RPS interrupts will get enabled/disabled on demand when RPS 3199 * itself is enabled/disabled. 3200 */ 3201 if (HAS_VEBOX(dev)) 3202 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3203 3204 dev_priv->pm_irq_mask = 0xffffffff; 3205 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3206 } 3207 } 3208 3209 static int ironlake_irq_postinstall(struct drm_device *dev) 3210 { 3211 struct drm_i915_private *dev_priv = dev->dev_private; 3212 u32 display_mask, extra_mask; 3213 3214 if (INTEL_INFO(dev)->gen >= 7) { 3215 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3216 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3217 DE_PLANEB_FLIP_DONE_IVB | 3218 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3219 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3220 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3221 } else { 3222 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3223 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3224 DE_AUX_CHANNEL_A | 3225 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3226 DE_POISON); 3227 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3228 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3229 } 3230 3231 dev_priv->irq_mask = ~display_mask; 3232 3233 I915_WRITE(HWSTAM, 0xeffe); 3234 3235 ibx_irq_pre_postinstall(dev); 3236 3237 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3238 3239 gen5_gt_irq_postinstall(dev); 3240 3241 ibx_irq_postinstall(dev); 3242 3243 if (IS_IRONLAKE_M(dev)) { 3244 /* Enable PCU event interrupts 3245 * 3246 * spinlocking not required here for correctness since interrupt 3247 * setup is guaranteed to run in single-threaded context. But we 3248 * need it to make the assert_spin_locked happy. */ 3249 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3250 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3251 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3252 } 3253 3254 return 0; 3255 } 3256 3257 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3258 { 3259 u32 pipestat_mask; 3260 u32 iir_mask; 3261 enum i915_pipe pipe; 3262 3263 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3264 PIPE_FIFO_UNDERRUN_STATUS; 3265 3266 for_each_pipe(dev_priv, pipe) 3267 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3268 POSTING_READ(PIPESTAT(PIPE_A)); 3269 3270 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3271 PIPE_CRC_DONE_INTERRUPT_STATUS; 3272 3273 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3274 for_each_pipe(dev_priv, pipe) 3275 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3276 3277 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3278 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3279 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3280 if (IS_CHERRYVIEW(dev_priv)) 3281 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3282 dev_priv->irq_mask &= ~iir_mask; 3283 3284 I915_WRITE(VLV_IIR, iir_mask); 3285 I915_WRITE(VLV_IIR, iir_mask); 3286 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3287 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3288 POSTING_READ(VLV_IMR); 3289 } 3290 3291 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3292 { 3293 u32 pipestat_mask; 3294 u32 iir_mask; 3295 enum i915_pipe pipe; 3296 3297 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3298 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3299 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3300 if (IS_CHERRYVIEW(dev_priv)) 3301 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3302 3303 dev_priv->irq_mask |= iir_mask; 3304 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3305 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3306 I915_WRITE(VLV_IIR, iir_mask); 3307 I915_WRITE(VLV_IIR, iir_mask); 3308 POSTING_READ(VLV_IIR); 3309 3310 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3311 PIPE_CRC_DONE_INTERRUPT_STATUS; 3312 3313 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3314 for_each_pipe(dev_priv, pipe) 3315 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3316 3317 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3318 PIPE_FIFO_UNDERRUN_STATUS; 3319 3320 for_each_pipe(dev_priv, pipe) 3321 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3322 POSTING_READ(PIPESTAT(PIPE_A)); 3323 } 3324 3325 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3326 { 3327 assert_spin_locked(&dev_priv->irq_lock); 3328 3329 if (dev_priv->display_irqs_enabled) 3330 return; 3331 3332 dev_priv->display_irqs_enabled = true; 3333 3334 if (intel_irqs_enabled(dev_priv)) 3335 valleyview_display_irqs_install(dev_priv); 3336 } 3337 3338 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3339 { 3340 assert_spin_locked(&dev_priv->irq_lock); 3341 3342 if (!dev_priv->display_irqs_enabled) 3343 return; 3344 3345 dev_priv->display_irqs_enabled = false; 3346 3347 if (intel_irqs_enabled(dev_priv)) 3348 valleyview_display_irqs_uninstall(dev_priv); 3349 } 3350 3351 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3352 { 3353 dev_priv->irq_mask = ~0; 3354 3355 I915_WRITE(PORT_HOTPLUG_EN, 0); 3356 POSTING_READ(PORT_HOTPLUG_EN); 3357 3358 I915_WRITE(VLV_IIR, 0xffffffff); 3359 I915_WRITE(VLV_IIR, 0xffffffff); 3360 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3361 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3362 POSTING_READ(VLV_IMR); 3363 3364 /* Interrupt setup is already guaranteed to be single-threaded, this is 3365 * just to make the assert_spin_locked check happy. */ 3366 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3367 if (dev_priv->display_irqs_enabled) 3368 valleyview_display_irqs_install(dev_priv); 3369 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3370 } 3371 3372 static int valleyview_irq_postinstall(struct drm_device *dev) 3373 { 3374 struct drm_i915_private *dev_priv = dev->dev_private; 3375 3376 vlv_display_irq_postinstall(dev_priv); 3377 3378 gen5_gt_irq_postinstall(dev); 3379 3380 /* ack & enable invalid PTE error interrupts */ 3381 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3382 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3383 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3384 #endif 3385 3386 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3387 3388 return 0; 3389 } 3390 3391 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3392 { 3393 /* These are interrupts we'll toggle with the ring mask register */ 3394 uint32_t gt_interrupts[] = { 3395 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3396 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3397 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3398 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3399 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3400 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3401 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3402 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3403 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3404 0, 3405 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3406 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3407 }; 3408 3409 dev_priv->pm_irq_mask = 0xffffffff; 3410 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3411 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3412 /* 3413 * RPS interrupts will get enabled/disabled on demand when RPS itself 3414 * is enabled/disabled. 3415 */ 3416 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3417 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3418 } 3419 3420 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3421 { 3422 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3423 uint32_t de_pipe_enables; 3424 int pipe; 3425 u32 aux_en = GEN8_AUX_CHANNEL_A; 3426 3427 if (IS_GEN9(dev_priv)) { 3428 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3429 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3430 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3431 GEN9_AUX_CHANNEL_D; 3432 } else 3433 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3434 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3435 3436 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3437 GEN8_PIPE_FIFO_UNDERRUN; 3438 3439 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3440 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3441 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3442 3443 for_each_pipe(dev_priv, pipe) 3444 if (intel_display_power_is_enabled(dev_priv, 3445 POWER_DOMAIN_PIPE(pipe))) 3446 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3447 dev_priv->de_irq_mask[pipe], 3448 de_pipe_enables); 3449 3450 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); 3451 } 3452 3453 static int gen8_irq_postinstall(struct drm_device *dev) 3454 { 3455 struct drm_i915_private *dev_priv = dev->dev_private; 3456 3457 ibx_irq_pre_postinstall(dev); 3458 3459 gen8_gt_irq_postinstall(dev_priv); 3460 gen8_de_irq_postinstall(dev_priv); 3461 3462 ibx_irq_postinstall(dev); 3463 3464 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3465 POSTING_READ(GEN8_MASTER_IRQ); 3466 3467 return 0; 3468 } 3469 3470 static int cherryview_irq_postinstall(struct drm_device *dev) 3471 { 3472 struct drm_i915_private *dev_priv = dev->dev_private; 3473 3474 vlv_display_irq_postinstall(dev_priv); 3475 3476 gen8_gt_irq_postinstall(dev_priv); 3477 3478 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3479 POSTING_READ(GEN8_MASTER_IRQ); 3480 3481 return 0; 3482 } 3483 3484 static void gen8_irq_uninstall(struct drm_device *dev) 3485 { 3486 struct drm_i915_private *dev_priv = dev->dev_private; 3487 3488 if (!dev_priv) 3489 return; 3490 3491 gen8_irq_reset(dev); 3492 } 3493 3494 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3495 { 3496 /* Interrupt setup is already guaranteed to be single-threaded, this is 3497 * just to make the assert_spin_locked check happy. */ 3498 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3499 if (dev_priv->display_irqs_enabled) 3500 valleyview_display_irqs_uninstall(dev_priv); 3501 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3502 3503 vlv_display_irq_reset(dev_priv); 3504 3505 dev_priv->irq_mask = ~0; 3506 } 3507 3508 static void valleyview_irq_uninstall(struct drm_device *dev) 3509 { 3510 struct drm_i915_private *dev_priv = dev->dev_private; 3511 3512 if (!dev_priv) 3513 return; 3514 3515 I915_WRITE(VLV_MASTER_IER, 0); 3516 3517 gen5_gt_irq_reset(dev); 3518 3519 I915_WRITE(HWSTAM, 0xffffffff); 3520 3521 vlv_display_irq_uninstall(dev_priv); 3522 } 3523 3524 static void cherryview_irq_uninstall(struct drm_device *dev) 3525 { 3526 struct drm_i915_private *dev_priv = dev->dev_private; 3527 3528 if (!dev_priv) 3529 return; 3530 3531 I915_WRITE(GEN8_MASTER_IRQ, 0); 3532 POSTING_READ(GEN8_MASTER_IRQ); 3533 3534 gen8_gt_irq_reset(dev_priv); 3535 3536 GEN5_IRQ_RESET(GEN8_PCU_); 3537 3538 vlv_display_irq_uninstall(dev_priv); 3539 } 3540 3541 static void ironlake_irq_uninstall(struct drm_device *dev) 3542 { 3543 struct drm_i915_private *dev_priv = dev->dev_private; 3544 3545 if (!dev_priv) 3546 return; 3547 3548 ironlake_irq_reset(dev); 3549 } 3550 3551 static void i8xx_irq_preinstall(struct drm_device * dev) 3552 { 3553 struct drm_i915_private *dev_priv = dev->dev_private; 3554 int pipe; 3555 3556 for_each_pipe(dev_priv, pipe) 3557 I915_WRITE(PIPESTAT(pipe), 0); 3558 I915_WRITE16(IMR, 0xffff); 3559 I915_WRITE16(IER, 0x0); 3560 POSTING_READ16(IER); 3561 } 3562 3563 static int i8xx_irq_postinstall(struct drm_device *dev) 3564 { 3565 struct drm_i915_private *dev_priv = dev->dev_private; 3566 3567 I915_WRITE16(EMR, 3568 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3569 3570 /* Unmask the interrupts that we always want on. */ 3571 dev_priv->irq_mask = 3572 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3573 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3574 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3575 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3576 I915_WRITE16(IMR, dev_priv->irq_mask); 3577 3578 I915_WRITE16(IER, 3579 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3580 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3581 I915_USER_INTERRUPT); 3582 POSTING_READ16(IER); 3583 3584 /* Interrupt setup is already guaranteed to be single-threaded, this is 3585 * just to make the assert_spin_locked check happy. */ 3586 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3587 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3588 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3589 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3590 3591 return 0; 3592 } 3593 3594 /* 3595 * Returns true when a page flip has completed. 3596 */ 3597 static bool i8xx_handle_vblank(struct drm_device *dev, 3598 int plane, int pipe, u32 iir) 3599 { 3600 struct drm_i915_private *dev_priv = dev->dev_private; 3601 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3602 3603 if (!intel_pipe_handle_vblank(dev, pipe)) 3604 return false; 3605 3606 if ((iir & flip_pending) == 0) 3607 goto check_page_flip; 3608 3609 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3610 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3611 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3612 * the flip is completed (no longer pending). Since this doesn't raise 3613 * an interrupt per se, we watch for the change at vblank. 3614 */ 3615 if (I915_READ16(ISR) & flip_pending) 3616 goto check_page_flip; 3617 3618 intel_prepare_page_flip(dev, plane); 3619 intel_finish_page_flip(dev, pipe); 3620 return true; 3621 3622 check_page_flip: 3623 intel_check_page_flip(dev, pipe); 3624 return false; 3625 } 3626 3627 static irqreturn_t i8xx_irq_handler(void *arg) 3628 { 3629 struct drm_device *dev = arg; 3630 struct drm_i915_private *dev_priv = dev->dev_private; 3631 u16 iir, new_iir; 3632 u32 pipe_stats[2]; 3633 int pipe; 3634 u16 flip_mask = 3635 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3636 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3637 3638 if (!intel_irqs_enabled(dev_priv)) 3639 return IRQ_NONE; 3640 3641 iir = I915_READ16(IIR); 3642 if (iir == 0) 3643 return; 3644 3645 while (iir & ~flip_mask) { 3646 /* Can't rely on pipestat interrupt bit in iir as it might 3647 * have been cleared after the pipestat interrupt was received. 3648 * It doesn't set the bit in iir again, but it still produces 3649 * interrupts (for non-MSI). 3650 */ 3651 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3652 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3653 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3654 3655 for_each_pipe(dev_priv, pipe) { 3656 int reg = PIPESTAT(pipe); 3657 pipe_stats[pipe] = I915_READ(reg); 3658 3659 /* 3660 * Clear the PIPE*STAT regs before the IIR 3661 */ 3662 if (pipe_stats[pipe] & 0x8000ffff) 3663 I915_WRITE(reg, pipe_stats[pipe]); 3664 } 3665 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3666 3667 I915_WRITE16(IIR, iir & ~flip_mask); 3668 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3669 3670 if (iir & I915_USER_INTERRUPT) 3671 notify_ring(dev, &dev_priv->ring[RCS]); 3672 3673 for_each_pipe(dev_priv, pipe) { 3674 int plane = pipe; 3675 if (HAS_FBC(dev)) 3676 plane = !plane; 3677 3678 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3679 i8xx_handle_vblank(dev, plane, pipe, iir)) 3680 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3681 3682 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3683 i9xx_pipe_crc_irq_handler(dev, pipe); 3684 3685 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3686 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3687 pipe); 3688 } 3689 3690 iir = new_iir; 3691 } 3692 3693 } 3694 3695 static void i8xx_irq_uninstall(struct drm_device * dev) 3696 { 3697 struct drm_i915_private *dev_priv = dev->dev_private; 3698 int pipe; 3699 3700 for_each_pipe(dev_priv, pipe) { 3701 /* Clear enable bits; then clear status bits */ 3702 I915_WRITE(PIPESTAT(pipe), 0); 3703 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3704 } 3705 I915_WRITE16(IMR, 0xffff); 3706 I915_WRITE16(IER, 0x0); 3707 I915_WRITE16(IIR, I915_READ16(IIR)); 3708 } 3709 3710 static void i915_irq_preinstall(struct drm_device * dev) 3711 { 3712 struct drm_i915_private *dev_priv = dev->dev_private; 3713 int pipe; 3714 3715 if (I915_HAS_HOTPLUG(dev)) { 3716 I915_WRITE(PORT_HOTPLUG_EN, 0); 3717 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3718 } 3719 3720 I915_WRITE16(HWSTAM, 0xeffe); 3721 for_each_pipe(dev_priv, pipe) 3722 I915_WRITE(PIPESTAT(pipe), 0); 3723 I915_WRITE(IMR, 0xffffffff); 3724 I915_WRITE(IER, 0x0); 3725 POSTING_READ(IER); 3726 } 3727 3728 static int i915_irq_postinstall(struct drm_device *dev) 3729 { 3730 struct drm_i915_private *dev_priv = dev->dev_private; 3731 u32 enable_mask; 3732 3733 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3734 3735 /* Unmask the interrupts that we always want on. */ 3736 dev_priv->irq_mask = 3737 ~(I915_ASLE_INTERRUPT | 3738 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3739 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3740 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3741 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3742 3743 enable_mask = 3744 I915_ASLE_INTERRUPT | 3745 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3746 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3747 I915_USER_INTERRUPT; 3748 3749 if (I915_HAS_HOTPLUG(dev)) { 3750 I915_WRITE(PORT_HOTPLUG_EN, 0); 3751 POSTING_READ(PORT_HOTPLUG_EN); 3752 3753 /* Enable in IER... */ 3754 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3755 /* and unmask in IMR */ 3756 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3757 } 3758 3759 I915_WRITE(IMR, dev_priv->irq_mask); 3760 I915_WRITE(IER, enable_mask); 3761 POSTING_READ(IER); 3762 3763 i915_enable_asle_pipestat(dev); 3764 3765 /* Interrupt setup is already guaranteed to be single-threaded, this is 3766 * just to make the assert_spin_locked check happy. */ 3767 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3768 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3769 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3770 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3771 3772 return 0; 3773 } 3774 3775 /* 3776 * Returns true when a page flip has completed. 3777 */ 3778 static bool i915_handle_vblank(struct drm_device *dev, 3779 int plane, int pipe, u32 iir) 3780 { 3781 struct drm_i915_private *dev_priv = dev->dev_private; 3782 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3783 3784 if (!intel_pipe_handle_vblank(dev, pipe)) 3785 return false; 3786 3787 if ((iir & flip_pending) == 0) 3788 goto check_page_flip; 3789 3790 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3791 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3792 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3793 * the flip is completed (no longer pending). Since this doesn't raise 3794 * an interrupt per se, we watch for the change at vblank. 3795 */ 3796 if (I915_READ(ISR) & flip_pending) 3797 goto check_page_flip; 3798 3799 intel_prepare_page_flip(dev, plane); 3800 intel_finish_page_flip(dev, pipe); 3801 return true; 3802 3803 check_page_flip: 3804 intel_check_page_flip(dev, pipe); 3805 return false; 3806 } 3807 3808 static irqreturn_t i915_irq_handler(void *arg) 3809 { 3810 struct drm_device *dev = arg; 3811 struct drm_i915_private *dev_priv = dev->dev_private; 3812 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3813 u32 flip_mask = 3814 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3815 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3816 int pipe; 3817 3818 if (!intel_irqs_enabled(dev_priv)) 3819 return IRQ_NONE; 3820 3821 iir = I915_READ(IIR); 3822 do { 3823 bool irq_received = (iir & ~flip_mask) != 0; 3824 bool blc_event = false; 3825 3826 /* Can't rely on pipestat interrupt bit in iir as it might 3827 * have been cleared after the pipestat interrupt was received. 3828 * It doesn't set the bit in iir again, but it still produces 3829 * interrupts (for non-MSI). 3830 */ 3831 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3832 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3833 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3834 3835 for_each_pipe(dev_priv, pipe) { 3836 int reg = PIPESTAT(pipe); 3837 pipe_stats[pipe] = I915_READ(reg); 3838 3839 /* Clear the PIPE*STAT regs before the IIR */ 3840 if (pipe_stats[pipe] & 0x8000ffff) { 3841 I915_WRITE(reg, pipe_stats[pipe]); 3842 irq_received = true; 3843 } 3844 } 3845 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3846 3847 if (!irq_received) 3848 break; 3849 3850 /* Consume port. Then clear IIR or we'll miss events */ 3851 if (I915_HAS_HOTPLUG(dev) && 3852 iir & I915_DISPLAY_PORT_INTERRUPT) 3853 i9xx_hpd_irq_handler(dev); 3854 3855 I915_WRITE(IIR, iir & ~flip_mask); 3856 new_iir = I915_READ(IIR); /* Flush posted writes */ 3857 3858 if (iir & I915_USER_INTERRUPT) 3859 notify_ring(dev, &dev_priv->ring[RCS]); 3860 3861 for_each_pipe(dev_priv, pipe) { 3862 int plane = pipe; 3863 if (HAS_FBC(dev)) 3864 plane = !plane; 3865 3866 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3867 i915_handle_vblank(dev, plane, pipe, iir)) 3868 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3869 3870 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3871 blc_event = true; 3872 3873 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3874 i9xx_pipe_crc_irq_handler(dev, pipe); 3875 3876 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3877 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3878 pipe); 3879 } 3880 3881 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3882 intel_opregion_asle_intr(dev); 3883 3884 /* With MSI, interrupts are only generated when iir 3885 * transitions from zero to nonzero. If another bit got 3886 * set while we were handling the existing iir bits, then 3887 * we would never get another interrupt. 3888 * 3889 * This is fine on non-MSI as well, as if we hit this path 3890 * we avoid exiting the interrupt handler only to generate 3891 * another one. 3892 * 3893 * Note that for MSI this could cause a stray interrupt report 3894 * if an interrupt landed in the time between writing IIR and 3895 * the posting read. This should be rare enough to never 3896 * trigger the 99% of 100,000 interrupts test for disabling 3897 * stray interrupts. 3898 */ 3899 iir = new_iir; 3900 } while (iir & ~flip_mask); 3901 3902 } 3903 3904 static void i915_irq_uninstall(struct drm_device * dev) 3905 { 3906 struct drm_i915_private *dev_priv = dev->dev_private; 3907 int pipe; 3908 3909 if (I915_HAS_HOTPLUG(dev)) { 3910 I915_WRITE(PORT_HOTPLUG_EN, 0); 3911 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3912 } 3913 3914 I915_WRITE16(HWSTAM, 0xffff); 3915 for_each_pipe(dev_priv, pipe) { 3916 /* Clear enable bits; then clear status bits */ 3917 I915_WRITE(PIPESTAT(pipe), 0); 3918 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3919 } 3920 I915_WRITE(IMR, 0xffffffff); 3921 I915_WRITE(IER, 0x0); 3922 3923 I915_WRITE(IIR, I915_READ(IIR)); 3924 } 3925 3926 static void i965_irq_preinstall(struct drm_device * dev) 3927 { 3928 struct drm_i915_private *dev_priv = dev->dev_private; 3929 int pipe; 3930 3931 I915_WRITE(PORT_HOTPLUG_EN, 0); 3932 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3933 3934 I915_WRITE(HWSTAM, 0xeffe); 3935 for_each_pipe(dev_priv, pipe) 3936 I915_WRITE(PIPESTAT(pipe), 0); 3937 I915_WRITE(IMR, 0xffffffff); 3938 I915_WRITE(IER, 0x0); 3939 POSTING_READ(IER); 3940 } 3941 3942 static int i965_irq_postinstall(struct drm_device *dev) 3943 { 3944 struct drm_i915_private *dev_priv = dev->dev_private; 3945 u32 enable_mask; 3946 u32 error_mask; 3947 3948 /* Unmask the interrupts that we always want on. */ 3949 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3950 I915_DISPLAY_PORT_INTERRUPT | 3951 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3952 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3953 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3954 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3955 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3956 3957 enable_mask = ~dev_priv->irq_mask; 3958 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3959 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3960 enable_mask |= I915_USER_INTERRUPT; 3961 3962 if (IS_G4X(dev)) 3963 enable_mask |= I915_BSD_USER_INTERRUPT; 3964 3965 /* Interrupt setup is already guaranteed to be single-threaded, this is 3966 * just to make the assert_spin_locked check happy. */ 3967 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3968 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3969 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3970 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3971 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3972 3973 /* 3974 * Enable some error detection, note the instruction error mask 3975 * bit is reserved, so we leave it masked. 3976 */ 3977 if (IS_G4X(dev)) { 3978 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3979 GM45_ERROR_MEM_PRIV | 3980 GM45_ERROR_CP_PRIV | 3981 I915_ERROR_MEMORY_REFRESH); 3982 } else { 3983 error_mask = ~(I915_ERROR_PAGE_TABLE | 3984 I915_ERROR_MEMORY_REFRESH); 3985 } 3986 I915_WRITE(EMR, error_mask); 3987 3988 I915_WRITE(IMR, dev_priv->irq_mask); 3989 I915_WRITE(IER, enable_mask); 3990 POSTING_READ(IER); 3991 3992 I915_WRITE(PORT_HOTPLUG_EN, 0); 3993 POSTING_READ(PORT_HOTPLUG_EN); 3994 3995 i915_enable_asle_pipestat(dev); 3996 3997 return 0; 3998 } 3999 4000 static void i915_hpd_irq_setup(struct drm_device *dev) 4001 { 4002 struct drm_i915_private *dev_priv = dev->dev_private; 4003 struct intel_encoder *intel_encoder; 4004 u32 hotplug_en; 4005 4006 assert_spin_locked(&dev_priv->irq_lock); 4007 4008 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4009 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4010 /* Note HDMI and DP share hotplug bits */ 4011 /* enable bits are the same for all generations */ 4012 for_each_intel_encoder(dev, intel_encoder) 4013 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4014 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4015 /* Programming the CRT detection parameters tends 4016 to generate a spurious hotplug event about three 4017 seconds later. So just do it once. 4018 */ 4019 if (IS_G4X(dev)) 4020 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4021 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4022 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4023 4024 /* Ignore TV since it's buggy */ 4025 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4026 } 4027 4028 static irqreturn_t i965_irq_handler(void *arg) 4029 { 4030 struct drm_device *dev = arg; 4031 struct drm_i915_private *dev_priv = dev->dev_private; 4032 u32 iir, new_iir; 4033 u32 pipe_stats[I915_MAX_PIPES]; 4034 int pipe; 4035 u32 flip_mask = 4036 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4037 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4038 4039 if (!intel_irqs_enabled(dev_priv)) 4040 return IRQ_NONE; 4041 4042 iir = I915_READ(IIR); 4043 4044 for (;;) { 4045 bool irq_received = (iir & ~flip_mask) != 0; 4046 bool blc_event = false; 4047 4048 /* Can't rely on pipestat interrupt bit in iir as it might 4049 * have been cleared after the pipestat interrupt was received. 4050 * It doesn't set the bit in iir again, but it still produces 4051 * interrupts (for non-MSI). 4052 */ 4053 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4054 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4055 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4056 4057 for_each_pipe(dev_priv, pipe) { 4058 int reg = PIPESTAT(pipe); 4059 pipe_stats[pipe] = I915_READ(reg); 4060 4061 /* 4062 * Clear the PIPE*STAT regs before the IIR 4063 */ 4064 if (pipe_stats[pipe] & 0x8000ffff) { 4065 I915_WRITE(reg, pipe_stats[pipe]); 4066 irq_received = true; 4067 } 4068 } 4069 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4070 4071 if (!irq_received) 4072 break; 4073 4074 /* Consume port. Then clear IIR or we'll miss events */ 4075 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4076 i9xx_hpd_irq_handler(dev); 4077 4078 I915_WRITE(IIR, iir & ~flip_mask); 4079 new_iir = I915_READ(IIR); /* Flush posted writes */ 4080 4081 if (iir & I915_USER_INTERRUPT) 4082 notify_ring(dev, &dev_priv->ring[RCS]); 4083 if (iir & I915_BSD_USER_INTERRUPT) 4084 notify_ring(dev, &dev_priv->ring[VCS]); 4085 4086 for_each_pipe(dev_priv, pipe) { 4087 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4088 i915_handle_vblank(dev, pipe, pipe, iir)) 4089 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4090 4091 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4092 blc_event = true; 4093 4094 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4095 i9xx_pipe_crc_irq_handler(dev, pipe); 4096 4097 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4098 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4099 } 4100 4101 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4102 intel_opregion_asle_intr(dev); 4103 4104 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4105 gmbus_irq_handler(dev); 4106 4107 /* With MSI, interrupts are only generated when iir 4108 * transitions from zero to nonzero. If another bit got 4109 * set while we were handling the existing iir bits, then 4110 * we would never get another interrupt. 4111 * 4112 * This is fine on non-MSI as well, as if we hit this path 4113 * we avoid exiting the interrupt handler only to generate 4114 * another one. 4115 * 4116 * Note that for MSI this could cause a stray interrupt report 4117 * if an interrupt landed in the time between writing IIR and 4118 * the posting read. This should be rare enough to never 4119 * trigger the 99% of 100,000 interrupts test for disabling 4120 * stray interrupts. 4121 */ 4122 iir = new_iir; 4123 } 4124 4125 } 4126 4127 static void i965_irq_uninstall(struct drm_device * dev) 4128 { 4129 struct drm_i915_private *dev_priv = dev->dev_private; 4130 int pipe; 4131 4132 if (!dev_priv) 4133 return; 4134 4135 I915_WRITE(PORT_HOTPLUG_EN, 0); 4136 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4137 4138 I915_WRITE(HWSTAM, 0xffffffff); 4139 for_each_pipe(dev_priv, pipe) 4140 I915_WRITE(PIPESTAT(pipe), 0); 4141 I915_WRITE(IMR, 0xffffffff); 4142 I915_WRITE(IER, 0x0); 4143 4144 for_each_pipe(dev_priv, pipe) 4145 I915_WRITE(PIPESTAT(pipe), 4146 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4147 I915_WRITE(IIR, I915_READ(IIR)); 4148 } 4149 4150 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4151 { 4152 struct drm_i915_private *dev_priv = 4153 container_of(work, typeof(*dev_priv), 4154 hotplug_reenable_work.work); 4155 struct drm_device *dev = dev_priv->dev; 4156 struct drm_mode_config *mode_config = &dev->mode_config; 4157 int i; 4158 4159 intel_runtime_pm_get(dev_priv); 4160 4161 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4162 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4163 struct drm_connector *connector; 4164 4165 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4166 continue; 4167 4168 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4169 4170 list_for_each_entry(connector, &mode_config->connector_list, head) { 4171 struct intel_connector *intel_connector = to_intel_connector(connector); 4172 4173 if (intel_connector->encoder->hpd_pin == i) { 4174 if (connector->polled != intel_connector->polled) 4175 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4176 connector->name); 4177 connector->polled = intel_connector->polled; 4178 if (!connector->polled) 4179 connector->polled = DRM_CONNECTOR_POLL_HPD; 4180 } 4181 } 4182 } 4183 if (dev_priv->display.hpd_irq_setup) 4184 dev_priv->display.hpd_irq_setup(dev); 4185 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4186 4187 intel_runtime_pm_put(dev_priv); 4188 } 4189 4190 /** 4191 * intel_irq_init - initializes irq support 4192 * @dev_priv: i915 device instance 4193 * 4194 * This function initializes all the irq support including work items, timers 4195 * and all the vtables. It does not setup the interrupt itself though. 4196 */ 4197 void intel_irq_init(struct drm_i915_private *dev_priv) 4198 { 4199 struct drm_device *dev = dev_priv->dev; 4200 4201 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4202 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4203 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4204 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4205 4206 /* Let's track the enabled rps events */ 4207 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4208 /* WaGsvRC0ResidencyMethod:vlv */ 4209 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4210 else 4211 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4212 4213 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4214 i915_hangcheck_elapsed); 4215 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4216 intel_hpd_irq_reenable_work); 4217 4218 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4219 4220 if (IS_GEN2(dev_priv)) { 4221 dev->max_vblank_count = 0; 4222 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4223 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4224 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4225 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4226 } else { 4227 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4228 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4229 } 4230 4231 /* 4232 * Opt out of the vblank disable timer on everything except gen2. 4233 * Gen2 doesn't have a hardware frame counter and so depends on 4234 * vblank interrupts to produce sane vblank seuquence numbers. 4235 */ 4236 if (!IS_GEN2(dev_priv)) 4237 dev->vblank_disable_immediate = true; 4238 4239 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4240 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4241 4242 if (IS_CHERRYVIEW(dev_priv)) { 4243 dev->driver->irq_handler = cherryview_irq_handler; 4244 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4245 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4246 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4247 dev->driver->enable_vblank = valleyview_enable_vblank; 4248 dev->driver->disable_vblank = valleyview_disable_vblank; 4249 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4250 } else if (IS_VALLEYVIEW(dev_priv)) { 4251 dev->driver->irq_handler = valleyview_irq_handler; 4252 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4253 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4254 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4255 dev->driver->enable_vblank = valleyview_enable_vblank; 4256 dev->driver->disable_vblank = valleyview_disable_vblank; 4257 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4258 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4259 dev->driver->irq_handler = gen8_irq_handler; 4260 dev->driver->irq_preinstall = gen8_irq_reset; 4261 dev->driver->irq_postinstall = gen8_irq_postinstall; 4262 dev->driver->irq_uninstall = gen8_irq_uninstall; 4263 dev->driver->enable_vblank = gen8_enable_vblank; 4264 dev->driver->disable_vblank = gen8_disable_vblank; 4265 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4266 } else if (HAS_PCH_SPLIT(dev)) { 4267 dev->driver->irq_handler = ironlake_irq_handler; 4268 dev->driver->irq_preinstall = ironlake_irq_reset; 4269 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4270 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4271 dev->driver->enable_vblank = ironlake_enable_vblank; 4272 dev->driver->disable_vblank = ironlake_disable_vblank; 4273 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4274 } else { 4275 if (INTEL_INFO(dev_priv)->gen == 2) { 4276 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4277 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4278 dev->driver->irq_handler = i8xx_irq_handler; 4279 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4280 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4281 dev->driver->irq_preinstall = i915_irq_preinstall; 4282 dev->driver->irq_postinstall = i915_irq_postinstall; 4283 dev->driver->irq_uninstall = i915_irq_uninstall; 4284 dev->driver->irq_handler = i915_irq_handler; 4285 } else { 4286 dev->driver->irq_preinstall = i965_irq_preinstall; 4287 dev->driver->irq_postinstall = i965_irq_postinstall; 4288 dev->driver->irq_uninstall = i965_irq_uninstall; 4289 dev->driver->irq_handler = i965_irq_handler; 4290 } 4291 if (I915_HAS_HOTPLUG(dev_priv)) 4292 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4293 dev->driver->enable_vblank = i915_enable_vblank; 4294 dev->driver->disable_vblank = i915_disable_vblank; 4295 } 4296 } 4297 4298 /** 4299 * intel_hpd_init - initializes and enables hpd support 4300 * @dev_priv: i915 device instance 4301 * 4302 * This function enables the hotplug support. It requires that interrupts have 4303 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4304 * poll request can run concurrently to other code, so locking rules must be 4305 * obeyed. 4306 * 4307 * This is a separate step from interrupt enabling to simplify the locking rules 4308 * in the driver load and resume code. 4309 */ 4310 void intel_hpd_init(struct drm_i915_private *dev_priv) 4311 { 4312 struct drm_device *dev = dev_priv->dev; 4313 struct drm_mode_config *mode_config = &dev->mode_config; 4314 struct drm_connector *connector; 4315 int i; 4316 4317 for (i = 1; i < HPD_NUM_PINS; i++) { 4318 dev_priv->hpd_stats[i].hpd_cnt = 0; 4319 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4320 } 4321 list_for_each_entry(connector, &mode_config->connector_list, head) { 4322 struct intel_connector *intel_connector = to_intel_connector(connector); 4323 connector->polled = intel_connector->polled; 4324 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4325 connector->polled = DRM_CONNECTOR_POLL_HPD; 4326 } 4327 4328 /* Interrupt setup is already guaranteed to be single-threaded, this is 4329 * just to make the assert_spin_locked checks happy. */ 4330 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4331 if (dev_priv->display.hpd_irq_setup) 4332 dev_priv->display.hpd_irq_setup(dev); 4333 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4334 } 4335 4336 /** 4337 * intel_irq_install - enables the hardware interrupt 4338 * @dev_priv: i915 device instance 4339 * 4340 * This function enables the hardware interrupt handling, but leaves the hotplug 4341 * handling still disabled. It is called after intel_irq_init(). 4342 * 4343 * In the driver load and resume code we need working interrupts in a few places 4344 * but don't want to deal with the hassle of concurrent probe and hotplug 4345 * workers. Hence the split into this two-stage approach. 4346 */ 4347 int intel_irq_install(struct drm_i915_private *dev_priv) 4348 { 4349 /* 4350 * We enable some interrupt sources in our postinstall hooks, so mark 4351 * interrupts as enabled _before_ actually enabling them to avoid 4352 * special cases in our ordering checks. 4353 */ 4354 dev_priv->pm.irqs_enabled = true; 4355 4356 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4357 } 4358 4359 /** 4360 * intel_irq_uninstall - finilizes all irq handling 4361 * @dev_priv: i915 device instance 4362 * 4363 * This stops interrupt and hotplug handling and unregisters and frees all 4364 * resources acquired in the init functions. 4365 */ 4366 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4367 { 4368 drm_irq_uninstall(dev_priv->dev); 4369 intel_hpd_cancel_work(dev_priv); 4370 dev_priv->pm.irqs_enabled = false; 4371 } 4372 4373 /** 4374 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4375 * @dev_priv: i915 device instance 4376 * 4377 * This function is used to disable interrupts at runtime, both in the runtime 4378 * pm and the system suspend/resume code. 4379 */ 4380 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4381 { 4382 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4383 dev_priv->pm.irqs_enabled = false; 4384 #if 0 4385 synchronize_irq(dev_priv->dev->irq); 4386 #endif 4387 } 4388 4389 /** 4390 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4391 * @dev_priv: i915 device instance 4392 * 4393 * This function is used to enable interrupts at runtime, both in the runtime 4394 * pm and the system suspend/resume code. 4395 */ 4396 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4397 { 4398 dev_priv->pm.irqs_enabled = true; 4399 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4400 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4401 } 4402