1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: interrupt handling 37 * 38 * These functions provide the basic support for enabling and disabling the 39 * interrupt handling support. There's a lot more functionality in i915_irq.c 40 * and related files, but that will be described in separate chapters. 41 */ 42 43 static const u32 hpd_ibx[HPD_NUM_PINS] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 49 }; 50 51 static const u32 hpd_cpt[HPD_NUM_PINS] = { 52 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 53 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 54 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 55 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 56 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 57 }; 58 59 static const u32 hpd_spt[HPD_NUM_PINS] = { 60 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 61 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 62 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 63 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 64 }; 65 66 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 67 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 68 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 69 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 70 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 71 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 72 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 73 }; 74 75 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 76 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 77 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 78 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 79 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 80 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 81 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 82 }; 83 84 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 91 }; 92 93 /* BXT hpd list */ 94 static const u32 hpd_bxt[HPD_NUM_PINS] = { 95 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 96 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 97 }; 98 99 /* IIR can theoretically queue up two events. Be paranoid. */ 100 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 101 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 102 POSTING_READ(GEN8_##type##_IMR(which)); \ 103 I915_WRITE(GEN8_##type##_IER(which), 0); \ 104 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 105 POSTING_READ(GEN8_##type##_IIR(which)); \ 106 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 107 POSTING_READ(GEN8_##type##_IIR(which)); \ 108 } while (0) 109 110 #define GEN5_IRQ_RESET(type) do { \ 111 I915_WRITE(type##IMR, 0xffffffff); \ 112 POSTING_READ(type##IMR); \ 113 I915_WRITE(type##IER, 0); \ 114 I915_WRITE(type##IIR, 0xffffffff); \ 115 POSTING_READ(type##IIR); \ 116 I915_WRITE(type##IIR, 0xffffffff); \ 117 POSTING_READ(type##IIR); \ 118 } while (0) 119 120 /* 121 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 122 */ 123 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 124 u32 val = I915_READ(reg); \ 125 if (val) { \ 126 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 127 (reg), val); \ 128 I915_WRITE((reg), 0xffffffff); \ 129 POSTING_READ(reg); \ 130 I915_WRITE((reg), 0xffffffff); \ 131 POSTING_READ(reg); \ 132 } \ 133 } while (0) 134 135 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 136 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 137 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 138 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 139 POSTING_READ(GEN8_##type##_IMR(which)); \ 140 } while (0) 141 142 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 143 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 144 I915_WRITE(type##IER, (ier_val)); \ 145 I915_WRITE(type##IMR, (imr_val)); \ 146 POSTING_READ(type##IMR); \ 147 } while (0) 148 149 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 150 151 /* For display hotplug interrupt */ 152 void 153 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 154 { 155 assert_spin_locked(&dev_priv->irq_lock); 156 157 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 158 return; 159 160 if ((dev_priv->irq_mask & mask) != 0) { 161 dev_priv->irq_mask &= ~mask; 162 I915_WRITE(DEIMR, dev_priv->irq_mask); 163 POSTING_READ(DEIMR); 164 } 165 } 166 167 void 168 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 169 { 170 assert_spin_locked(&dev_priv->irq_lock); 171 172 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 173 return; 174 175 if ((dev_priv->irq_mask & mask) != mask) { 176 dev_priv->irq_mask |= mask; 177 I915_WRITE(DEIMR, dev_priv->irq_mask); 178 POSTING_READ(DEIMR); 179 } 180 } 181 182 /** 183 * ilk_update_gt_irq - update GTIMR 184 * @dev_priv: driver private 185 * @interrupt_mask: mask of interrupt bits to update 186 * @enabled_irq_mask: mask of interrupt bits to enable 187 */ 188 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 189 uint32_t interrupt_mask, 190 uint32_t enabled_irq_mask) 191 { 192 assert_spin_locked(&dev_priv->irq_lock); 193 194 WARN_ON(enabled_irq_mask & ~interrupt_mask); 195 196 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 197 return; 198 199 dev_priv->gt_irq_mask &= ~interrupt_mask; 200 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 201 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 202 POSTING_READ(GTIMR); 203 } 204 205 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 206 { 207 ilk_update_gt_irq(dev_priv, mask, mask); 208 } 209 210 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 211 { 212 ilk_update_gt_irq(dev_priv, mask, 0); 213 } 214 215 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 216 { 217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 218 } 219 220 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 221 { 222 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 223 } 224 225 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 226 { 227 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 228 } 229 230 /** 231 * snb_update_pm_irq - update GEN6_PMIMR 232 * @dev_priv: driver private 233 * @interrupt_mask: mask of interrupt bits to update 234 * @enabled_irq_mask: mask of interrupt bits to enable 235 */ 236 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 237 uint32_t interrupt_mask, 238 uint32_t enabled_irq_mask) 239 { 240 uint32_t new_val; 241 242 WARN_ON(enabled_irq_mask & ~interrupt_mask); 243 244 assert_spin_locked(&dev_priv->irq_lock); 245 246 new_val = dev_priv->pm_irq_mask; 247 new_val &= ~interrupt_mask; 248 new_val |= (~enabled_irq_mask & interrupt_mask); 249 250 if (new_val != dev_priv->pm_irq_mask) { 251 dev_priv->pm_irq_mask = new_val; 252 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 253 POSTING_READ(gen6_pm_imr(dev_priv)); 254 } 255 } 256 257 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 258 { 259 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 260 return; 261 262 snb_update_pm_irq(dev_priv, mask, mask); 263 } 264 265 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 266 uint32_t mask) 267 { 268 snb_update_pm_irq(dev_priv, mask, 0); 269 } 270 271 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 274 return; 275 276 __gen6_disable_pm_irq(dev_priv, mask); 277 } 278 279 void gen6_reset_rps_interrupts(struct drm_device *dev) 280 { 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 uint32_t reg = gen6_pm_iir(dev_priv); 283 284 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 285 I915_WRITE(reg, dev_priv->pm_rps_events); 286 I915_WRITE(reg, dev_priv->pm_rps_events); 287 POSTING_READ(reg); 288 dev_priv->rps.pm_iir = 0; 289 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 290 } 291 292 void gen6_enable_rps_interrupts(struct drm_device *dev) 293 { 294 struct drm_i915_private *dev_priv = dev->dev_private; 295 296 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 297 298 WARN_ON(dev_priv->rps.pm_iir); 299 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 300 dev_priv->rps.interrupts_enabled = true; 301 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 302 dev_priv->pm_rps_events); 303 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 304 305 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 306 } 307 308 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 309 { 310 /* 311 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 312 * if GEN6_PM_UP_EI_EXPIRED is masked. 313 * 314 * TODO: verify if this can be reproduced on VLV,CHV. 315 */ 316 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 317 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 318 319 if (INTEL_INFO(dev_priv)->gen >= 8) 320 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 321 322 return mask; 323 } 324 325 void gen6_disable_rps_interrupts(struct drm_device *dev) 326 { 327 struct drm_i915_private *dev_priv = dev->dev_private; 328 329 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 330 dev_priv->rps.interrupts_enabled = false; 331 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 332 333 cancel_work_sync(&dev_priv->rps.work); 334 335 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 336 337 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 338 339 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 340 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 341 ~dev_priv->pm_rps_events); 342 343 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 344 345 /* Wait for pending IRQ handlers to complete (on other CPUs) */ 346 #if 0 347 synchronize_irq(dev->irq); 348 #endif 349 } 350 351 /** 352 * ibx_display_interrupt_update - update SDEIMR 353 * @dev_priv: driver private 354 * @interrupt_mask: mask of interrupt bits to update 355 * @enabled_irq_mask: mask of interrupt bits to enable 356 */ 357 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 358 uint32_t interrupt_mask, 359 uint32_t enabled_irq_mask) 360 { 361 uint32_t sdeimr = I915_READ(SDEIMR); 362 sdeimr &= ~interrupt_mask; 363 sdeimr |= (~enabled_irq_mask & interrupt_mask); 364 365 WARN_ON(enabled_irq_mask & ~interrupt_mask); 366 367 assert_spin_locked(&dev_priv->irq_lock); 368 369 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 370 return; 371 372 I915_WRITE(SDEIMR, sdeimr); 373 POSTING_READ(SDEIMR); 374 } 375 376 static void 377 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 378 u32 enable_mask, u32 status_mask) 379 { 380 u32 reg = PIPESTAT(pipe); 381 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 382 383 assert_spin_locked(&dev_priv->irq_lock); 384 WARN_ON(!intel_irqs_enabled(dev_priv)); 385 386 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 387 status_mask & ~PIPESTAT_INT_STATUS_MASK, 388 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 389 pipe_name(pipe), enable_mask, status_mask)) 390 return; 391 392 if ((pipestat & enable_mask) == enable_mask) 393 return; 394 395 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 396 397 /* Enable the interrupt, clear any pending status */ 398 pipestat |= enable_mask | status_mask; 399 I915_WRITE(reg, pipestat); 400 POSTING_READ(reg); 401 } 402 403 static void 404 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 405 u32 enable_mask, u32 status_mask) 406 { 407 u32 reg = PIPESTAT(pipe); 408 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 409 410 assert_spin_locked(&dev_priv->irq_lock); 411 WARN_ON(!intel_irqs_enabled(dev_priv)); 412 413 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 414 status_mask & ~PIPESTAT_INT_STATUS_MASK, 415 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 416 pipe_name(pipe), enable_mask, status_mask)) 417 return; 418 419 if ((pipestat & enable_mask) == 0) 420 return; 421 422 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 423 424 pipestat &= ~enable_mask; 425 I915_WRITE(reg, pipestat); 426 POSTING_READ(reg); 427 } 428 429 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 430 { 431 u32 enable_mask = status_mask << 16; 432 433 /* 434 * On pipe A we don't support the PSR interrupt yet, 435 * on pipe B and C the same bit MBZ. 436 */ 437 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 438 return 0; 439 /* 440 * On pipe B and C we don't support the PSR interrupt yet, on pipe 441 * A the same bit is for perf counters which we don't use either. 442 */ 443 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 444 return 0; 445 446 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 447 SPRITE0_FLIP_DONE_INT_EN_VLV | 448 SPRITE1_FLIP_DONE_INT_EN_VLV); 449 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 450 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 451 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 452 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 453 454 return enable_mask; 455 } 456 457 void 458 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 459 u32 status_mask) 460 { 461 u32 enable_mask; 462 463 if (IS_VALLEYVIEW(dev_priv->dev)) 464 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 465 status_mask); 466 else 467 enable_mask = status_mask << 16; 468 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 469 } 470 471 void 472 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 473 u32 status_mask) 474 { 475 u32 enable_mask; 476 477 if (IS_VALLEYVIEW(dev_priv->dev)) 478 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 479 status_mask); 480 else 481 enable_mask = status_mask << 16; 482 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 483 } 484 485 /** 486 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 487 */ 488 static void i915_enable_asle_pipestat(struct drm_device *dev) 489 { 490 struct drm_i915_private *dev_priv = dev->dev_private; 491 492 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 493 return; 494 495 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 496 497 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 498 if (INTEL_INFO(dev)->gen >= 4) 499 i915_enable_pipestat(dev_priv, PIPE_A, 500 PIPE_LEGACY_BLC_EVENT_STATUS); 501 502 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 503 } 504 505 /* 506 * This timing diagram depicts the video signal in and 507 * around the vertical blanking period. 508 * 509 * Assumptions about the fictitious mode used in this example: 510 * vblank_start >= 3 511 * vsync_start = vblank_start + 1 512 * vsync_end = vblank_start + 2 513 * vtotal = vblank_start + 3 514 * 515 * start of vblank: 516 * latch double buffered registers 517 * increment frame counter (ctg+) 518 * generate start of vblank interrupt (gen4+) 519 * | 520 * | frame start: 521 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 522 * | may be shifted forward 1-3 extra lines via PIPECONF 523 * | | 524 * | | start of vsync: 525 * | | generate vsync interrupt 526 * | | | 527 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 528 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 529 * ----va---> <-----------------vb--------------------> <--------va------------- 530 * | | <----vs-----> | 531 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 532 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 533 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 534 * | | | 535 * last visible pixel first visible pixel 536 * | increment frame counter (gen3/4) 537 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 538 * 539 * x = horizontal active 540 * _ = horizontal blanking 541 * hs = horizontal sync 542 * va = vertical active 543 * vb = vertical blanking 544 * vs = vertical sync 545 * vbs = vblank_start (number) 546 * 547 * Summary: 548 * - most events happen at the start of horizontal sync 549 * - frame start happens at the start of horizontal blank, 1-4 lines 550 * (depending on PIPECONF settings) after the start of vblank 551 * - gen3/4 pixel and frame counter are synchronized with the start 552 * of horizontal active on the first line of vertical active 553 */ 554 555 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 556 { 557 /* Gen2 doesn't have a hardware frame counter */ 558 return 0; 559 } 560 561 /* Called from drm generic code, passed a 'crtc', which 562 * we use as a pipe index 563 */ 564 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 565 { 566 struct drm_i915_private *dev_priv = dev->dev_private; 567 unsigned long high_frame; 568 unsigned long low_frame; 569 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 570 struct intel_crtc *intel_crtc = 571 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 572 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 573 574 htotal = mode->crtc_htotal; 575 hsync_start = mode->crtc_hsync_start; 576 vbl_start = mode->crtc_vblank_start; 577 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 578 vbl_start = DIV_ROUND_UP(vbl_start, 2); 579 580 /* Convert to pixel count */ 581 vbl_start *= htotal; 582 583 /* Start of vblank event occurs at start of hsync */ 584 vbl_start -= htotal - hsync_start; 585 586 high_frame = PIPEFRAME(pipe); 587 low_frame = PIPEFRAMEPIXEL(pipe); 588 589 /* 590 * High & low register fields aren't synchronized, so make sure 591 * we get a low value that's stable across two reads of the high 592 * register. 593 */ 594 do { 595 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 596 low = I915_READ(low_frame); 597 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 598 } while (high1 != high2); 599 600 high1 >>= PIPE_FRAME_HIGH_SHIFT; 601 pixel = low & PIPE_PIXEL_MASK; 602 low >>= PIPE_FRAME_LOW_SHIFT; 603 604 /* 605 * The frame counter increments at beginning of active. 606 * Cook up a vblank counter by also checking the pixel 607 * counter against vblank start. 608 */ 609 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 610 } 611 612 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 613 { 614 struct drm_i915_private *dev_priv = dev->dev_private; 615 int reg = PIPE_FRMCOUNT_GM45(pipe); 616 617 return I915_READ(reg); 618 } 619 620 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 621 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 622 623 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 624 { 625 struct drm_device *dev = crtc->base.dev; 626 struct drm_i915_private *dev_priv = dev->dev_private; 627 const struct drm_display_mode *mode = &crtc->base.hwmode; 628 enum i915_pipe pipe = crtc->pipe; 629 int position, vtotal; 630 631 vtotal = mode->crtc_vtotal; 632 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 633 vtotal /= 2; 634 635 if (IS_GEN2(dev)) 636 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 637 else 638 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 639 640 /* 641 * On HSW, the DSL reg (0x70000) appears to return 0 if we 642 * read it just before the start of vblank. So try it again 643 * so we don't accidentally end up spanning a vblank frame 644 * increment, causing the pipe_update_end() code to squak at us. 645 * 646 * The nature of this problem means we can't simply check the ISR 647 * bit and return the vblank start value; nor can we use the scanline 648 * debug register in the transcoder as it appears to have the same 649 * problem. We may need to extend this to include other platforms, 650 * but so far testing only shows the problem on HSW. 651 */ 652 if (IS_HASWELL(dev) && !position) { 653 int i, temp; 654 655 for (i = 0; i < 100; i++) { 656 udelay(1); 657 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 658 DSL_LINEMASK_GEN3; 659 if (temp != position) { 660 position = temp; 661 break; 662 } 663 } 664 } 665 666 /* 667 * See update_scanline_offset() for the details on the 668 * scanline_offset adjustment. 669 */ 670 return (position + crtc->scanline_offset) % vtotal; 671 } 672 673 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 674 unsigned int flags, int *vpos, int *hpos, 675 ktime_t *stime, ktime_t *etime) 676 { 677 struct drm_i915_private *dev_priv = dev->dev_private; 678 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 679 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 680 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 681 int position; 682 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 683 bool in_vbl = true; 684 int ret = 0; 685 686 if (WARN_ON(!mode->crtc_clock)) { 687 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 688 "pipe %c\n", pipe_name(pipe)); 689 return 0; 690 } 691 692 htotal = mode->crtc_htotal; 693 hsync_start = mode->crtc_hsync_start; 694 vtotal = mode->crtc_vtotal; 695 vbl_start = mode->crtc_vblank_start; 696 vbl_end = mode->crtc_vblank_end; 697 698 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 699 vbl_start = DIV_ROUND_UP(vbl_start, 2); 700 vbl_end /= 2; 701 vtotal /= 2; 702 } 703 704 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 705 706 /* 707 * Lock uncore.lock, as we will do multiple timing critical raw 708 * register reads, potentially with preemption disabled, so the 709 * following code must not block on uncore.lock. 710 */ 711 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 712 713 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 714 715 /* Get optional system timestamp before query. */ 716 if (stime) 717 *stime = ktime_get(); 718 719 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 720 /* No obvious pixelcount register. Only query vertical 721 * scanout position from Display scan line register. 722 */ 723 position = __intel_get_crtc_scanline(intel_crtc); 724 } else { 725 /* Have access to pixelcount since start of frame. 726 * We can split this into vertical and horizontal 727 * scanout position. 728 */ 729 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 730 731 /* convert to pixel counts */ 732 vbl_start *= htotal; 733 vbl_end *= htotal; 734 vtotal *= htotal; 735 736 /* 737 * In interlaced modes, the pixel counter counts all pixels, 738 * so one field will have htotal more pixels. In order to avoid 739 * the reported position from jumping backwards when the pixel 740 * counter is beyond the length of the shorter field, just 741 * clamp the position the length of the shorter field. This 742 * matches how the scanline counter based position works since 743 * the scanline counter doesn't count the two half lines. 744 */ 745 if (position >= vtotal) 746 position = vtotal - 1; 747 748 /* 749 * Start of vblank interrupt is triggered at start of hsync, 750 * just prior to the first active line of vblank. However we 751 * consider lines to start at the leading edge of horizontal 752 * active. So, should we get here before we've crossed into 753 * the horizontal active of the first line in vblank, we would 754 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 755 * always add htotal-hsync_start to the current pixel position. 756 */ 757 position = (position + htotal - hsync_start) % vtotal; 758 } 759 760 /* Get optional system timestamp after query. */ 761 if (etime) 762 *etime = ktime_get(); 763 764 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 765 766 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 767 768 in_vbl = position >= vbl_start && position < vbl_end; 769 770 /* 771 * While in vblank, position will be negative 772 * counting up towards 0 at vbl_end. And outside 773 * vblank, position will be positive counting 774 * up since vbl_end. 775 */ 776 if (position >= vbl_start) 777 position -= vbl_end; 778 else 779 position += vtotal - vbl_end; 780 781 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 782 *vpos = position; 783 *hpos = 0; 784 } else { 785 *vpos = position / htotal; 786 *hpos = position - (*vpos * htotal); 787 } 788 789 /* In vblank? */ 790 if (in_vbl) 791 ret |= DRM_SCANOUTPOS_IN_VBLANK; 792 793 return ret; 794 } 795 796 int intel_get_crtc_scanline(struct intel_crtc *crtc) 797 { 798 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 799 int position; 800 801 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 802 position = __intel_get_crtc_scanline(crtc); 803 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 804 805 return position; 806 } 807 808 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 809 int *max_error, 810 struct timeval *vblank_time, 811 unsigned flags) 812 { 813 struct drm_crtc *crtc; 814 815 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 816 DRM_ERROR("Invalid crtc %d\n", pipe); 817 return -EINVAL; 818 } 819 820 /* Get drm_crtc to timestamp: */ 821 crtc = intel_get_crtc_for_pipe(dev, pipe); 822 if (crtc == NULL) { 823 DRM_ERROR("Invalid crtc %d\n", pipe); 824 return -EINVAL; 825 } 826 827 if (!crtc->hwmode.crtc_clock) { 828 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 829 return -EBUSY; 830 } 831 832 /* Helper routine in DRM core does all the work: */ 833 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 834 vblank_time, flags, 835 crtc, 836 &crtc->hwmode); 837 } 838 839 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 840 { 841 struct drm_i915_private *dev_priv = dev->dev_private; 842 u32 busy_up, busy_down, max_avg, min_avg; 843 u8 new_delay; 844 845 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 846 847 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 848 849 new_delay = dev_priv->ips.cur_delay; 850 851 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 852 busy_up = I915_READ(RCPREVBSYTUPAVG); 853 busy_down = I915_READ(RCPREVBSYTDNAVG); 854 max_avg = I915_READ(RCBMAXAVG); 855 min_avg = I915_READ(RCBMINAVG); 856 857 /* Handle RCS change request from hw */ 858 if (busy_up > max_avg) { 859 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 860 new_delay = dev_priv->ips.cur_delay - 1; 861 if (new_delay < dev_priv->ips.max_delay) 862 new_delay = dev_priv->ips.max_delay; 863 } else if (busy_down < min_avg) { 864 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 865 new_delay = dev_priv->ips.cur_delay + 1; 866 if (new_delay > dev_priv->ips.min_delay) 867 new_delay = dev_priv->ips.min_delay; 868 } 869 870 if (ironlake_set_drps(dev, new_delay)) 871 dev_priv->ips.cur_delay = new_delay; 872 873 lockmgr(&mchdev_lock, LK_RELEASE); 874 875 return; 876 } 877 878 static void notify_ring(struct intel_engine_cs *ring) 879 { 880 if (!intel_ring_initialized(ring)) 881 return; 882 883 trace_i915_gem_request_notify(ring); 884 885 wake_up_all(&ring->irq_queue); 886 } 887 888 static void vlv_c0_read(struct drm_i915_private *dev_priv, 889 struct intel_rps_ei *ei) 890 { 891 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 892 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 893 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 894 } 895 896 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 897 const struct intel_rps_ei *old, 898 const struct intel_rps_ei *now, 899 int threshold) 900 { 901 u64 time, c0; 902 903 if (old->cz_clock == 0) 904 return false; 905 906 time = now->cz_clock - old->cz_clock; 907 time *= threshold * dev_priv->mem_freq; 908 909 /* Workload can be split between render + media, e.g. SwapBuffers 910 * being blitted in X after being rendered in mesa. To account for 911 * this we need to combine both engines into our activity counter. 912 */ 913 c0 = now->render_c0 - old->render_c0; 914 c0 += now->media_c0 - old->media_c0; 915 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 916 917 return c0 >= time; 918 } 919 920 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 921 { 922 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 923 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 924 } 925 926 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 927 { 928 struct intel_rps_ei now; 929 u32 events = 0; 930 931 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 932 return 0; 933 934 vlv_c0_read(dev_priv, &now); 935 if (now.cz_clock == 0) 936 return 0; 937 938 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 939 if (!vlv_c0_above(dev_priv, 940 &dev_priv->rps.down_ei, &now, 941 dev_priv->rps.down_threshold)) 942 events |= GEN6_PM_RP_DOWN_THRESHOLD; 943 dev_priv->rps.down_ei = now; 944 } 945 946 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 947 if (vlv_c0_above(dev_priv, 948 &dev_priv->rps.up_ei, &now, 949 dev_priv->rps.up_threshold)) 950 events |= GEN6_PM_RP_UP_THRESHOLD; 951 dev_priv->rps.up_ei = now; 952 } 953 954 return events; 955 } 956 957 static bool any_waiters(struct drm_i915_private *dev_priv) 958 { 959 struct intel_engine_cs *ring; 960 int i; 961 962 for_each_ring(ring, dev_priv, i) 963 if (ring->irq_refcount) 964 return true; 965 966 return false; 967 } 968 969 static void gen6_pm_rps_work(struct work_struct *work) 970 { 971 struct drm_i915_private *dev_priv = 972 container_of(work, struct drm_i915_private, rps.work); 973 bool client_boost; 974 int new_delay, adj, min, max; 975 u32 pm_iir; 976 977 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 978 /* Speed up work cancelation during disabling rps interrupts. */ 979 if (!dev_priv->rps.interrupts_enabled) { 980 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 981 return; 982 } 983 pm_iir = dev_priv->rps.pm_iir; 984 dev_priv->rps.pm_iir = 0; 985 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 986 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 987 client_boost = dev_priv->rps.client_boost; 988 dev_priv->rps.client_boost = false; 989 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 990 991 /* Make sure we didn't queue anything we're not going to process. */ 992 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 993 994 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 995 return; 996 997 mutex_lock(&dev_priv->rps.hw_lock); 998 999 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1000 1001 adj = dev_priv->rps.last_adj; 1002 new_delay = dev_priv->rps.cur_freq; 1003 min = dev_priv->rps.min_freq_softlimit; 1004 max = dev_priv->rps.max_freq_softlimit; 1005 1006 if (client_boost) { 1007 new_delay = dev_priv->rps.max_freq_softlimit; 1008 adj = 0; 1009 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1010 if (adj > 0) 1011 adj *= 2; 1012 else /* CHV needs even encode values */ 1013 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1014 /* 1015 * For better performance, jump directly 1016 * to RPe if we're below it. 1017 */ 1018 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1019 new_delay = dev_priv->rps.efficient_freq; 1020 adj = 0; 1021 } 1022 } else if (any_waiters(dev_priv)) { 1023 adj = 0; 1024 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1025 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1026 new_delay = dev_priv->rps.efficient_freq; 1027 else 1028 new_delay = dev_priv->rps.min_freq_softlimit; 1029 adj = 0; 1030 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1031 if (adj < 0) 1032 adj *= 2; 1033 else /* CHV needs even encode values */ 1034 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1035 } else { /* unknown event */ 1036 adj = 0; 1037 } 1038 1039 dev_priv->rps.last_adj = adj; 1040 1041 /* sysfs frequency interfaces may have snuck in while servicing the 1042 * interrupt 1043 */ 1044 new_delay += adj; 1045 new_delay = clamp_t(int, new_delay, min, max); 1046 1047 intel_set_rps(dev_priv->dev, new_delay); 1048 1049 mutex_unlock(&dev_priv->rps.hw_lock); 1050 } 1051 1052 /** 1053 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1054 * occurred. 1055 * @work: workqueue struct 1056 * 1057 * Doesn't actually do anything except notify userspace. As a consequence of 1058 * this event, userspace should try to remap the bad rows since statistically 1059 * it is likely the same row is more likely to go bad again. 1060 */ 1061 static void ivybridge_parity_work(struct work_struct *work) 1062 { 1063 struct drm_i915_private *dev_priv = 1064 container_of(work, struct drm_i915_private, l3_parity.error_work); 1065 u32 error_status, row, bank, subbank; 1066 char *parity_event[6]; 1067 uint32_t misccpctl; 1068 uint8_t slice = 0; 1069 1070 /* We must turn off DOP level clock gating to access the L3 registers. 1071 * In order to prevent a get/put style interface, acquire struct mutex 1072 * any time we access those registers. 1073 */ 1074 mutex_lock(&dev_priv->dev->struct_mutex); 1075 1076 /* If we've screwed up tracking, just let the interrupt fire again */ 1077 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1078 goto out; 1079 1080 misccpctl = I915_READ(GEN7_MISCCPCTL); 1081 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1082 POSTING_READ(GEN7_MISCCPCTL); 1083 1084 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1085 u32 reg; 1086 1087 slice--; 1088 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1089 break; 1090 1091 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1092 1093 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1094 1095 error_status = I915_READ(reg); 1096 row = GEN7_PARITY_ERROR_ROW(error_status); 1097 bank = GEN7_PARITY_ERROR_BANK(error_status); 1098 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1099 1100 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1101 POSTING_READ(reg); 1102 1103 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1104 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1105 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1106 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1107 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1108 parity_event[5] = NULL; 1109 1110 #if 0 1111 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1112 KOBJ_CHANGE, parity_event); 1113 #endif 1114 1115 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1116 slice, row, bank, subbank); 1117 1118 kfree(parity_event[4]); 1119 kfree(parity_event[3]); 1120 kfree(parity_event[2]); 1121 kfree(parity_event[1]); 1122 } 1123 1124 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1125 1126 out: 1127 WARN_ON(dev_priv->l3_parity.which_slice); 1128 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1129 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1130 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1131 1132 mutex_unlock(&dev_priv->dev->struct_mutex); 1133 } 1134 1135 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1136 { 1137 struct drm_i915_private *dev_priv = dev->dev_private; 1138 1139 if (!HAS_L3_DPF(dev)) 1140 return; 1141 1142 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1143 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1144 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1145 1146 iir &= GT_PARITY_ERROR(dev); 1147 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1148 dev_priv->l3_parity.which_slice |= 1 << 1; 1149 1150 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1151 dev_priv->l3_parity.which_slice |= 1 << 0; 1152 1153 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1154 } 1155 1156 static void ilk_gt_irq_handler(struct drm_device *dev, 1157 struct drm_i915_private *dev_priv, 1158 u32 gt_iir) 1159 { 1160 if (gt_iir & 1161 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1162 notify_ring(&dev_priv->ring[RCS]); 1163 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1164 notify_ring(&dev_priv->ring[VCS]); 1165 } 1166 1167 static void snb_gt_irq_handler(struct drm_device *dev, 1168 struct drm_i915_private *dev_priv, 1169 u32 gt_iir) 1170 { 1171 1172 if (gt_iir & 1173 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1174 notify_ring(&dev_priv->ring[RCS]); 1175 if (gt_iir & GT_BSD_USER_INTERRUPT) 1176 notify_ring(&dev_priv->ring[VCS]); 1177 if (gt_iir & GT_BLT_USER_INTERRUPT) 1178 notify_ring(&dev_priv->ring[BCS]); 1179 1180 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1181 GT_BSD_CS_ERROR_INTERRUPT | 1182 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1183 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1184 1185 if (gt_iir & GT_PARITY_ERROR(dev)) 1186 ivybridge_parity_error_irq_handler(dev, gt_iir); 1187 } 1188 1189 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1190 u32 master_ctl) 1191 { 1192 1193 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1194 u32 tmp = I915_READ_FW(GEN8_GT_IIR(0)); 1195 if (tmp) { 1196 I915_WRITE_FW(GEN8_GT_IIR(0), tmp); 1197 1198 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1199 intel_lrc_irq_handler(&dev_priv->ring[RCS]); 1200 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT)) 1201 notify_ring(&dev_priv->ring[RCS]); 1202 1203 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1204 intel_lrc_irq_handler(&dev_priv->ring[BCS]); 1205 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT)) 1206 notify_ring(&dev_priv->ring[BCS]); 1207 } else 1208 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1209 } 1210 1211 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1212 u32 tmp = I915_READ_FW(GEN8_GT_IIR(1)); 1213 if (tmp) { 1214 I915_WRITE_FW(GEN8_GT_IIR(1), tmp); 1215 1216 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1217 intel_lrc_irq_handler(&dev_priv->ring[VCS]); 1218 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT)) 1219 notify_ring(&dev_priv->ring[VCS]); 1220 1221 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1222 intel_lrc_irq_handler(&dev_priv->ring[VCS2]); 1223 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT)) 1224 notify_ring(&dev_priv->ring[VCS2]); 1225 } else 1226 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1227 } 1228 1229 if (master_ctl & GEN8_GT_VECS_IRQ) { 1230 u32 tmp = I915_READ_FW(GEN8_GT_IIR(3)); 1231 if (tmp) { 1232 I915_WRITE_FW(GEN8_GT_IIR(3), tmp); 1233 1234 if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1235 intel_lrc_irq_handler(&dev_priv->ring[VECS]); 1236 if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT)) 1237 notify_ring(&dev_priv->ring[VECS]); 1238 } else 1239 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1240 } 1241 1242 if (master_ctl & GEN8_GT_PM_IRQ) { 1243 u32 tmp = I915_READ_FW(GEN8_GT_IIR(2)); 1244 if (tmp & dev_priv->pm_rps_events) { 1245 I915_WRITE_FW(GEN8_GT_IIR(2), 1246 tmp & dev_priv->pm_rps_events); 1247 gen6_rps_irq_handler(dev_priv, tmp); 1248 } else 1249 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1250 } 1251 1252 } 1253 1254 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1255 { 1256 switch (port) { 1257 case PORT_A: 1258 return val & BXT_PORTA_HOTPLUG_LONG_DETECT; 1259 case PORT_B: 1260 return val & PORTB_HOTPLUG_LONG_DETECT; 1261 case PORT_C: 1262 return val & PORTC_HOTPLUG_LONG_DETECT; 1263 case PORT_D: 1264 return val & PORTD_HOTPLUG_LONG_DETECT; 1265 default: 1266 return false; 1267 } 1268 } 1269 1270 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1271 { 1272 switch (port) { 1273 case PORT_B: 1274 return val & PORTB_HOTPLUG_LONG_DETECT; 1275 case PORT_C: 1276 return val & PORTC_HOTPLUG_LONG_DETECT; 1277 case PORT_D: 1278 return val & PORTD_HOTPLUG_LONG_DETECT; 1279 case PORT_E: 1280 return val & PORTE_HOTPLUG_LONG_DETECT; 1281 default: 1282 return false; 1283 } 1284 } 1285 1286 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1287 { 1288 switch (port) { 1289 case PORT_B: 1290 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1291 case PORT_C: 1292 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1293 case PORT_D: 1294 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1295 default: 1296 return false; 1297 } 1298 } 1299 1300 /* Get a bit mask of pins that have triggered, and which ones may be long. */ 1301 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1302 u32 hotplug_trigger, u32 dig_hotplug_reg, 1303 const u32 hpd[HPD_NUM_PINS], 1304 bool long_pulse_detect(enum port port, u32 val)) 1305 { 1306 enum port port; 1307 int i; 1308 1309 *pin_mask = 0; 1310 *long_mask = 0; 1311 1312 for_each_hpd_pin(i) { 1313 if ((hpd[i] & hotplug_trigger) == 0) 1314 continue; 1315 1316 *pin_mask |= BIT(i); 1317 1318 if (!intel_hpd_pin_to_port(i, &port)) 1319 continue; 1320 1321 if (long_pulse_detect(port, dig_hotplug_reg)) 1322 *long_mask |= BIT(i); 1323 } 1324 1325 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1326 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1327 1328 } 1329 1330 static void gmbus_irq_handler(struct drm_device *dev) 1331 { 1332 struct drm_i915_private *dev_priv = dev->dev_private; 1333 1334 wake_up_all(&dev_priv->gmbus_wait_queue); 1335 } 1336 1337 static void dp_aux_irq_handler(struct drm_device *dev) 1338 { 1339 struct drm_i915_private *dev_priv = dev->dev_private; 1340 1341 wake_up_all(&dev_priv->gmbus_wait_queue); 1342 } 1343 1344 #if defined(CONFIG_DEBUG_FS) 1345 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1346 uint32_t crc0, uint32_t crc1, 1347 uint32_t crc2, uint32_t crc3, 1348 uint32_t crc4) 1349 { 1350 struct drm_i915_private *dev_priv = dev->dev_private; 1351 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1352 struct intel_pipe_crc_entry *entry; 1353 int head, tail; 1354 1355 spin_lock(&pipe_crc->lock); 1356 1357 if (!pipe_crc->entries) { 1358 spin_unlock(&pipe_crc->lock); 1359 DRM_DEBUG_KMS("spurious interrupt\n"); 1360 return; 1361 } 1362 1363 head = pipe_crc->head; 1364 tail = pipe_crc->tail; 1365 1366 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1367 spin_unlock(&pipe_crc->lock); 1368 DRM_ERROR("CRC buffer overflowing\n"); 1369 return; 1370 } 1371 1372 entry = &pipe_crc->entries[head]; 1373 1374 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1375 entry->crc[0] = crc0; 1376 entry->crc[1] = crc1; 1377 entry->crc[2] = crc2; 1378 entry->crc[3] = crc3; 1379 entry->crc[4] = crc4; 1380 1381 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1382 pipe_crc->head = head; 1383 1384 spin_unlock(&pipe_crc->lock); 1385 1386 wake_up_interruptible(&pipe_crc->wq); 1387 } 1388 #else 1389 static inline void 1390 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1391 uint32_t crc0, uint32_t crc1, 1392 uint32_t crc2, uint32_t crc3, 1393 uint32_t crc4) {} 1394 #endif 1395 1396 1397 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1398 { 1399 struct drm_i915_private *dev_priv = dev->dev_private; 1400 1401 display_pipe_crc_irq_handler(dev, pipe, 1402 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1403 0, 0, 0, 0); 1404 } 1405 1406 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1407 { 1408 struct drm_i915_private *dev_priv = dev->dev_private; 1409 1410 display_pipe_crc_irq_handler(dev, pipe, 1411 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1412 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1413 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1414 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1415 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1416 } 1417 1418 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1419 { 1420 struct drm_i915_private *dev_priv = dev->dev_private; 1421 uint32_t res1, res2; 1422 1423 if (INTEL_INFO(dev)->gen >= 3) 1424 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1425 else 1426 res1 = 0; 1427 1428 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1429 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1430 else 1431 res2 = 0; 1432 1433 display_pipe_crc_irq_handler(dev, pipe, 1434 I915_READ(PIPE_CRC_RES_RED(pipe)), 1435 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1436 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1437 res1, res2); 1438 } 1439 1440 /* The RPS events need forcewake, so we add them to a work queue and mask their 1441 * IMR bits until the work is done. Other interrupts can be processed without 1442 * the work queue. */ 1443 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1444 { 1445 if (pm_iir & dev_priv->pm_rps_events) { 1446 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1447 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1448 if (dev_priv->rps.interrupts_enabled) { 1449 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1450 queue_work(dev_priv->wq, &dev_priv->rps.work); 1451 } 1452 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1453 } 1454 1455 if (INTEL_INFO(dev_priv)->gen >= 8) 1456 return; 1457 1458 if (HAS_VEBOX(dev_priv->dev)) { 1459 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1460 notify_ring(&dev_priv->ring[VECS]); 1461 1462 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1463 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1464 } 1465 } 1466 1467 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1468 { 1469 if (!drm_handle_vblank(dev, pipe)) 1470 return false; 1471 1472 return true; 1473 } 1474 1475 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1476 { 1477 struct drm_i915_private *dev_priv = dev->dev_private; 1478 u32 pipe_stats[I915_MAX_PIPES] = { }; 1479 int pipe; 1480 1481 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1482 for_each_pipe(dev_priv, pipe) { 1483 int reg; 1484 u32 mask, iir_bit = 0; 1485 1486 /* 1487 * PIPESTAT bits get signalled even when the interrupt is 1488 * disabled with the mask bits, and some of the status bits do 1489 * not generate interrupts at all (like the underrun bit). Hence 1490 * we need to be careful that we only handle what we want to 1491 * handle. 1492 */ 1493 1494 /* fifo underruns are filterered in the underrun handler. */ 1495 mask = PIPE_FIFO_UNDERRUN_STATUS; 1496 1497 switch (pipe) { 1498 case PIPE_A: 1499 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1500 break; 1501 case PIPE_B: 1502 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1503 break; 1504 case PIPE_C: 1505 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1506 break; 1507 } 1508 if (iir & iir_bit) 1509 mask |= dev_priv->pipestat_irq_mask[pipe]; 1510 1511 if (!mask) 1512 continue; 1513 1514 reg = PIPESTAT(pipe); 1515 mask |= PIPESTAT_INT_ENABLE_MASK; 1516 pipe_stats[pipe] = I915_READ(reg) & mask; 1517 1518 /* 1519 * Clear the PIPE*STAT regs before the IIR 1520 */ 1521 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1522 PIPESTAT_INT_STATUS_MASK)) 1523 I915_WRITE(reg, pipe_stats[pipe]); 1524 } 1525 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1526 1527 for_each_pipe(dev_priv, pipe) { 1528 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1529 intel_pipe_handle_vblank(dev, pipe)) 1530 intel_check_page_flip(dev, pipe); 1531 1532 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1533 intel_prepare_page_flip(dev, pipe); 1534 intel_finish_page_flip(dev, pipe); 1535 } 1536 1537 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1538 i9xx_pipe_crc_irq_handler(dev, pipe); 1539 1540 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1541 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1542 } 1543 1544 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1545 gmbus_irq_handler(dev); 1546 } 1547 1548 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1549 { 1550 struct drm_i915_private *dev_priv = dev->dev_private; 1551 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1552 u32 pin_mask, long_mask; 1553 1554 if (!hotplug_status) 1555 return; 1556 1557 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1558 /* 1559 * Make sure hotplug status is cleared before we clear IIR, or else we 1560 * may miss hotplug events. 1561 */ 1562 POSTING_READ(PORT_HOTPLUG_STAT); 1563 1564 if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) { 1565 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1566 1567 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1568 hotplug_trigger, hpd_status_g4x, 1569 i9xx_port_hotplug_long_detect); 1570 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1571 1572 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1573 dp_aux_irq_handler(dev); 1574 } else { 1575 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1576 1577 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1578 hotplug_trigger, hpd_status_i915, 1579 i9xx_port_hotplug_long_detect); 1580 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1581 } 1582 } 1583 1584 static irqreturn_t valleyview_irq_handler(void *arg) 1585 { 1586 struct drm_device *dev = arg; 1587 struct drm_i915_private *dev_priv = dev->dev_private; 1588 u32 iir, gt_iir, pm_iir; 1589 1590 if (!intel_irqs_enabled(dev_priv)) 1591 return IRQ_NONE; 1592 1593 while (true) { 1594 /* Find, clear, then process each source of interrupt */ 1595 1596 gt_iir = I915_READ(GTIIR); 1597 if (gt_iir) 1598 I915_WRITE(GTIIR, gt_iir); 1599 1600 pm_iir = I915_READ(GEN6_PMIIR); 1601 if (pm_iir) 1602 I915_WRITE(GEN6_PMIIR, pm_iir); 1603 1604 iir = I915_READ(VLV_IIR); 1605 if (iir) { 1606 /* Consume port before clearing IIR or we'll miss events */ 1607 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1608 i9xx_hpd_irq_handler(dev); 1609 I915_WRITE(VLV_IIR, iir); 1610 } 1611 1612 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1613 goto out; 1614 1615 if (gt_iir) 1616 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1617 if (pm_iir) 1618 gen6_rps_irq_handler(dev_priv, pm_iir); 1619 /* Call regardless, as some status bits might not be 1620 * signalled in iir */ 1621 valleyview_pipestat_irq_handler(dev, iir); 1622 } 1623 1624 out: 1625 return; 1626 } 1627 1628 static irqreturn_t cherryview_irq_handler(void *arg) 1629 { 1630 struct drm_device *dev = arg; 1631 struct drm_i915_private *dev_priv = dev->dev_private; 1632 u32 master_ctl, iir; 1633 1634 if (!intel_irqs_enabled(dev_priv)) 1635 return IRQ_NONE; 1636 1637 for (;;) { 1638 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1639 iir = I915_READ(VLV_IIR); 1640 1641 if (master_ctl == 0 && iir == 0) 1642 break; 1643 1644 1645 I915_WRITE(GEN8_MASTER_IRQ, 0); 1646 1647 /* Find, clear, then process each source of interrupt */ 1648 1649 if (iir) { 1650 /* Consume port before clearing IIR or we'll miss events */ 1651 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1652 i9xx_hpd_irq_handler(dev); 1653 I915_WRITE(VLV_IIR, iir); 1654 } 1655 1656 gen8_gt_irq_handler(dev_priv, master_ctl); 1657 1658 /* Call regardless, as some status bits might not be 1659 * signalled in iir */ 1660 valleyview_pipestat_irq_handler(dev, iir); 1661 1662 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1663 POSTING_READ(GEN8_MASTER_IRQ); 1664 } 1665 1666 } 1667 1668 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1669 { 1670 struct drm_i915_private *dev_priv = dev->dev_private; 1671 int pipe; 1672 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1673 1674 if (hotplug_trigger) { 1675 u32 dig_hotplug_reg, pin_mask, long_mask; 1676 1677 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1678 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1679 1680 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1681 dig_hotplug_reg, hpd_ibx, 1682 pch_port_hotplug_long_detect); 1683 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1684 } 1685 1686 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1687 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1688 SDE_AUDIO_POWER_SHIFT); 1689 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1690 port_name(port)); 1691 } 1692 1693 if (pch_iir & SDE_AUX_MASK) 1694 dp_aux_irq_handler(dev); 1695 1696 if (pch_iir & SDE_GMBUS) 1697 gmbus_irq_handler(dev); 1698 1699 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1700 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1701 1702 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1703 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1704 1705 if (pch_iir & SDE_POISON) 1706 DRM_ERROR("PCH poison interrupt\n"); 1707 1708 if (pch_iir & SDE_FDI_MASK) 1709 for_each_pipe(dev_priv, pipe) 1710 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1711 pipe_name(pipe), 1712 I915_READ(FDI_RX_IIR(pipe))); 1713 1714 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1715 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1716 1717 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1718 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1719 1720 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1721 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1722 1723 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1724 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1725 } 1726 1727 static void ivb_err_int_handler(struct drm_device *dev) 1728 { 1729 struct drm_i915_private *dev_priv = dev->dev_private; 1730 u32 err_int = I915_READ(GEN7_ERR_INT); 1731 enum i915_pipe pipe; 1732 1733 if (err_int & ERR_INT_POISON) 1734 DRM_ERROR("Poison interrupt\n"); 1735 1736 for_each_pipe(dev_priv, pipe) { 1737 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 1738 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1739 1740 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1741 if (IS_IVYBRIDGE(dev)) 1742 ivb_pipe_crc_irq_handler(dev, pipe); 1743 else 1744 hsw_pipe_crc_irq_handler(dev, pipe); 1745 } 1746 } 1747 1748 I915_WRITE(GEN7_ERR_INT, err_int); 1749 } 1750 1751 static void cpt_serr_int_handler(struct drm_device *dev) 1752 { 1753 struct drm_i915_private *dev_priv = dev->dev_private; 1754 u32 serr_int = I915_READ(SERR_INT); 1755 1756 if (serr_int & SERR_INT_POISON) 1757 DRM_ERROR("PCH poison interrupt\n"); 1758 1759 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1760 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 1761 1762 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1763 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1764 1765 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1766 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 1767 1768 I915_WRITE(SERR_INT, serr_int); 1769 } 1770 1771 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1772 { 1773 struct drm_i915_private *dev_priv = dev->dev_private; 1774 int pipe; 1775 u32 hotplug_trigger; 1776 1777 if (HAS_PCH_SPT(dev)) 1778 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT; 1779 else 1780 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1781 1782 if (hotplug_trigger) { 1783 u32 dig_hotplug_reg, pin_mask, long_mask; 1784 1785 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1786 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1787 1788 if (HAS_PCH_SPT(dev)) { 1789 intel_get_hpd_pins(&pin_mask, &long_mask, 1790 hotplug_trigger, 1791 dig_hotplug_reg, hpd_spt, 1792 pch_port_hotplug_long_detect); 1793 1794 /* detect PORTE HP event */ 1795 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 1796 if (pch_port_hotplug_long_detect(PORT_E, 1797 dig_hotplug_reg)) 1798 long_mask |= 1 << HPD_PORT_E; 1799 } else 1800 intel_get_hpd_pins(&pin_mask, &long_mask, 1801 hotplug_trigger, 1802 dig_hotplug_reg, hpd_cpt, 1803 pch_port_hotplug_long_detect); 1804 1805 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1806 } 1807 1808 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1809 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1810 SDE_AUDIO_POWER_SHIFT_CPT); 1811 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1812 port_name(port)); 1813 } 1814 1815 if (pch_iir & SDE_AUX_MASK_CPT) 1816 dp_aux_irq_handler(dev); 1817 1818 if (pch_iir & SDE_GMBUS_CPT) 1819 gmbus_irq_handler(dev); 1820 1821 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1822 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1823 1824 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1825 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1826 1827 if (pch_iir & SDE_FDI_MASK_CPT) 1828 for_each_pipe(dev_priv, pipe) 1829 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1830 pipe_name(pipe), 1831 I915_READ(FDI_RX_IIR(pipe))); 1832 1833 if (pch_iir & SDE_ERROR_CPT) 1834 cpt_serr_int_handler(dev); 1835 } 1836 1837 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1838 { 1839 struct drm_i915_private *dev_priv = dev->dev_private; 1840 enum i915_pipe pipe; 1841 1842 if (de_iir & DE_AUX_CHANNEL_A) 1843 dp_aux_irq_handler(dev); 1844 1845 if (de_iir & DE_GSE) 1846 intel_opregion_asle_intr(dev); 1847 1848 if (de_iir & DE_POISON) 1849 DRM_ERROR("Poison interrupt\n"); 1850 1851 for_each_pipe(dev_priv, pipe) { 1852 if (de_iir & DE_PIPE_VBLANK(pipe) && 1853 intel_pipe_handle_vblank(dev, pipe)) 1854 intel_check_page_flip(dev, pipe); 1855 1856 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1857 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1858 1859 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1860 i9xx_pipe_crc_irq_handler(dev, pipe); 1861 1862 /* plane/pipes map 1:1 on ilk+ */ 1863 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1864 intel_prepare_page_flip(dev, pipe); 1865 intel_finish_page_flip_plane(dev, pipe); 1866 } 1867 } 1868 1869 /* check event from PCH */ 1870 if (de_iir & DE_PCH_EVENT) { 1871 u32 pch_iir = I915_READ(SDEIIR); 1872 1873 if (HAS_PCH_CPT(dev)) 1874 cpt_irq_handler(dev, pch_iir); 1875 else 1876 ibx_irq_handler(dev, pch_iir); 1877 1878 /* should clear PCH hotplug event before clear CPU irq */ 1879 I915_WRITE(SDEIIR, pch_iir); 1880 } 1881 1882 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1883 ironlake_rps_change_irq_handler(dev); 1884 } 1885 1886 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1887 { 1888 struct drm_i915_private *dev_priv = dev->dev_private; 1889 enum i915_pipe pipe; 1890 1891 if (de_iir & DE_ERR_INT_IVB) 1892 ivb_err_int_handler(dev); 1893 1894 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1895 dp_aux_irq_handler(dev); 1896 1897 if (de_iir & DE_GSE_IVB) 1898 intel_opregion_asle_intr(dev); 1899 1900 for_each_pipe(dev_priv, pipe) { 1901 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 1902 intel_pipe_handle_vblank(dev, pipe)) 1903 intel_check_page_flip(dev, pipe); 1904 1905 /* plane/pipes map 1:1 on ilk+ */ 1906 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 1907 intel_prepare_page_flip(dev, pipe); 1908 intel_finish_page_flip_plane(dev, pipe); 1909 } 1910 } 1911 1912 /* check event from PCH */ 1913 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1914 u32 pch_iir = I915_READ(SDEIIR); 1915 1916 cpt_irq_handler(dev, pch_iir); 1917 1918 /* clear PCH hotplug event before clear CPU irq */ 1919 I915_WRITE(SDEIIR, pch_iir); 1920 } 1921 } 1922 1923 /* 1924 * To handle irqs with the minimum potential races with fresh interrupts, we: 1925 * 1 - Disable Master Interrupt Control. 1926 * 2 - Find the source(s) of the interrupt. 1927 * 3 - Clear the Interrupt Identity bits (IIR). 1928 * 4 - Process the interrupt(s) that had bits set in the IIRs. 1929 * 5 - Re-enable Master Interrupt Control. 1930 */ 1931 static irqreturn_t ironlake_irq_handler(void *arg) 1932 { 1933 struct drm_device *dev = arg; 1934 struct drm_i915_private *dev_priv = dev->dev_private; 1935 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1936 1937 if (!intel_irqs_enabled(dev_priv)) 1938 return IRQ_NONE; 1939 1940 /* We get interrupts on unclaimed registers, so check for this before we 1941 * do any I915_{READ,WRITE}. */ 1942 intel_uncore_check_errors(dev); 1943 1944 /* disable master interrupt before clearing iir */ 1945 de_ier = I915_READ(DEIER); 1946 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1947 POSTING_READ(DEIER); 1948 1949 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1950 * interrupts will will be stored on its back queue, and then we'll be 1951 * able to process them after we restore SDEIER (as soon as we restore 1952 * it, we'll get an interrupt if SDEIIR still has something to process 1953 * due to its back queue). */ 1954 if (!HAS_PCH_NOP(dev)) { 1955 sde_ier = I915_READ(SDEIER); 1956 I915_WRITE(SDEIER, 0); 1957 POSTING_READ(SDEIER); 1958 } 1959 1960 /* Find, clear, then process each source of interrupt */ 1961 1962 gt_iir = I915_READ(GTIIR); 1963 if (gt_iir) { 1964 I915_WRITE(GTIIR, gt_iir); 1965 if (INTEL_INFO(dev)->gen >= 6) 1966 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1967 else 1968 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1969 } 1970 1971 de_iir = I915_READ(DEIIR); 1972 if (de_iir) { 1973 I915_WRITE(DEIIR, de_iir); 1974 if (INTEL_INFO(dev)->gen >= 7) 1975 ivb_display_irq_handler(dev, de_iir); 1976 else 1977 ilk_display_irq_handler(dev, de_iir); 1978 } 1979 1980 if (INTEL_INFO(dev)->gen >= 6) { 1981 u32 pm_iir = I915_READ(GEN6_PMIIR); 1982 if (pm_iir) { 1983 I915_WRITE(GEN6_PMIIR, pm_iir); 1984 gen6_rps_irq_handler(dev_priv, pm_iir); 1985 } 1986 } 1987 1988 I915_WRITE(DEIER, de_ier); 1989 POSTING_READ(DEIER); 1990 if (!HAS_PCH_NOP(dev)) { 1991 I915_WRITE(SDEIER, sde_ier); 1992 POSTING_READ(SDEIER); 1993 } 1994 1995 } 1996 1997 static void bxt_hpd_handler(struct drm_device *dev, uint32_t iir_status) 1998 { 1999 struct drm_i915_private *dev_priv = dev->dev_private; 2000 u32 hp_control, hp_trigger; 2001 u32 pin_mask, long_mask; 2002 2003 /* Get the status */ 2004 hp_trigger = iir_status & BXT_DE_PORT_HOTPLUG_MASK; 2005 hp_control = I915_READ(BXT_HOTPLUG_CTL); 2006 2007 /* Hotplug not enabled ? */ 2008 if (!(hp_control & BXT_HOTPLUG_CTL_MASK)) { 2009 DRM_ERROR("Interrupt when HPD disabled\n"); 2010 return; 2011 } 2012 2013 /* Clear sticky bits in hpd status */ 2014 I915_WRITE(BXT_HOTPLUG_CTL, hp_control); 2015 2016 intel_get_hpd_pins(&pin_mask, &long_mask, hp_trigger, hp_control, 2017 hpd_bxt, bxt_port_hotplug_long_detect); 2018 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2019 } 2020 2021 static irqreturn_t gen8_irq_handler(void *arg) 2022 { 2023 struct drm_device *dev = arg; 2024 struct drm_i915_private *dev_priv = dev->dev_private; 2025 u32 master_ctl; 2026 uint32_t tmp = 0; 2027 enum i915_pipe pipe; 2028 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2029 2030 if (!intel_irqs_enabled(dev_priv)) 2031 return IRQ_NONE; 2032 2033 if (IS_GEN9(dev)) 2034 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2035 GEN9_AUX_CHANNEL_D; 2036 2037 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2038 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2039 if (!master_ctl) 2040 return IRQ_NONE; 2041 2042 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2043 2044 /* Find, clear, then process each source of interrupt */ 2045 2046 gen8_gt_irq_handler(dev_priv, master_ctl); 2047 2048 if (master_ctl & GEN8_DE_MISC_IRQ) { 2049 tmp = I915_READ(GEN8_DE_MISC_IIR); 2050 if (tmp) { 2051 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2052 if (tmp & GEN8_DE_MISC_GSE) 2053 intel_opregion_asle_intr(dev); 2054 else 2055 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2056 } 2057 else 2058 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2059 } 2060 2061 if (master_ctl & GEN8_DE_PORT_IRQ) { 2062 tmp = I915_READ(GEN8_DE_PORT_IIR); 2063 if (tmp) { 2064 bool found = false; 2065 2066 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2067 2068 if (tmp & aux_mask) { 2069 dp_aux_irq_handler(dev); 2070 found = true; 2071 } 2072 2073 if (IS_BROXTON(dev) && tmp & BXT_DE_PORT_HOTPLUG_MASK) { 2074 bxt_hpd_handler(dev, tmp); 2075 found = true; 2076 } 2077 2078 if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) { 2079 gmbus_irq_handler(dev); 2080 found = true; 2081 } 2082 2083 if (!found) 2084 DRM_ERROR("Unexpected DE Port interrupt\n"); 2085 } 2086 else 2087 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2088 } 2089 2090 for_each_pipe(dev_priv, pipe) { 2091 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2092 2093 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2094 continue; 2095 2096 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2097 if (pipe_iir) { 2098 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2099 2100 if (pipe_iir & GEN8_PIPE_VBLANK && 2101 intel_pipe_handle_vblank(dev, pipe)) 2102 intel_check_page_flip(dev, pipe); 2103 2104 if (IS_GEN9(dev)) 2105 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2106 else 2107 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2108 2109 if (flip_done) { 2110 intel_prepare_page_flip(dev, pipe); 2111 intel_finish_page_flip_plane(dev, pipe); 2112 } 2113 2114 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2115 hsw_pipe_crc_irq_handler(dev, pipe); 2116 2117 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2118 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2119 pipe); 2120 2121 2122 if (IS_GEN9(dev)) 2123 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2124 else 2125 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2126 2127 if (fault_errors) 2128 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2129 pipe_name(pipe), 2130 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2131 } else 2132 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2133 } 2134 2135 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2136 master_ctl & GEN8_DE_PCH_IRQ) { 2137 /* 2138 * FIXME(BDW): Assume for now that the new interrupt handling 2139 * scheme also closed the SDE interrupt handling race we've seen 2140 * on older pch-split platforms. But this needs testing. 2141 */ 2142 u32 pch_iir = I915_READ(SDEIIR); 2143 if (pch_iir) { 2144 I915_WRITE(SDEIIR, pch_iir); 2145 cpt_irq_handler(dev, pch_iir); 2146 } else 2147 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2148 2149 } 2150 2151 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2152 POSTING_READ_FW(GEN8_MASTER_IRQ); 2153 2154 } 2155 2156 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2157 bool reset_completed) 2158 { 2159 struct intel_engine_cs *ring; 2160 int i; 2161 2162 /* 2163 * Notify all waiters for GPU completion events that reset state has 2164 * been changed, and that they need to restart their wait after 2165 * checking for potential errors (and bail out to drop locks if there is 2166 * a gpu reset pending so that i915_error_work_func can acquire them). 2167 */ 2168 2169 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2170 for_each_ring(ring, dev_priv, i) 2171 wake_up_all(&ring->irq_queue); 2172 2173 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2174 wake_up_all(&dev_priv->pending_flip_queue); 2175 2176 /* 2177 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2178 * reset state is cleared. 2179 */ 2180 if (reset_completed) 2181 wake_up_all(&dev_priv->gpu_error.reset_queue); 2182 } 2183 2184 /** 2185 * i915_reset_and_wakeup - do process context error handling work 2186 * 2187 * Fire an error uevent so userspace can see that a hang or error 2188 * was detected. 2189 */ 2190 static void i915_reset_and_wakeup(struct drm_device *dev) 2191 { 2192 struct drm_i915_private *dev_priv = to_i915(dev); 2193 struct i915_gpu_error *error = &dev_priv->gpu_error; 2194 #if 0 2195 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2196 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2197 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2198 #endif 2199 int ret; 2200 2201 #if 0 2202 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2203 #endif 2204 2205 /* 2206 * Note that there's only one work item which does gpu resets, so we 2207 * need not worry about concurrent gpu resets potentially incrementing 2208 * error->reset_counter twice. We only need to take care of another 2209 * racing irq/hangcheck declaring the gpu dead for a second time. A 2210 * quick check for that is good enough: schedule_work ensures the 2211 * correct ordering between hang detection and this work item, and since 2212 * the reset in-progress bit is only ever set by code outside of this 2213 * work we don't need to worry about any other races. 2214 */ 2215 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2216 DRM_DEBUG_DRIVER("resetting chip\n"); 2217 #if 0 2218 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2219 reset_event); 2220 #endif 2221 2222 /* 2223 * In most cases it's guaranteed that we get here with an RPM 2224 * reference held, for example because there is a pending GPU 2225 * request that won't finish until the reset is done. This 2226 * isn't the case at least when we get here by doing a 2227 * simulated reset via debugs, so get an RPM reference. 2228 */ 2229 intel_runtime_pm_get(dev_priv); 2230 2231 intel_prepare_reset(dev); 2232 2233 /* 2234 * All state reset _must_ be completed before we update the 2235 * reset counter, for otherwise waiters might miss the reset 2236 * pending state and not properly drop locks, resulting in 2237 * deadlocks with the reset work. 2238 */ 2239 ret = i915_reset(dev); 2240 2241 intel_finish_reset(dev); 2242 2243 intel_runtime_pm_put(dev_priv); 2244 2245 if (ret == 0) { 2246 /* 2247 * After all the gem state is reset, increment the reset 2248 * counter and wake up everyone waiting for the reset to 2249 * complete. 2250 * 2251 * Since unlock operations are a one-sided barrier only, 2252 * we need to insert a barrier here to order any seqno 2253 * updates before 2254 * the counter increment. 2255 */ 2256 smp_mb__before_atomic(); 2257 atomic_inc(&dev_priv->gpu_error.reset_counter); 2258 2259 #if 0 2260 kobject_uevent_env(&dev->primary->kdev->kobj, 2261 KOBJ_CHANGE, reset_done_event); 2262 #endif 2263 } else { 2264 atomic_or(I915_WEDGED, &error->reset_counter); 2265 } 2266 2267 /* 2268 * Note: The wake_up also serves as a memory barrier so that 2269 * waiters see the update value of the reset counter atomic_t. 2270 */ 2271 i915_error_wake_up(dev_priv, true); 2272 } 2273 } 2274 2275 static void i915_report_and_clear_eir(struct drm_device *dev) 2276 { 2277 struct drm_i915_private *dev_priv = dev->dev_private; 2278 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2279 u32 eir = I915_READ(EIR); 2280 int pipe, i; 2281 2282 if (!eir) 2283 return; 2284 2285 pr_err("render error detected, EIR: 0x%08x\n", eir); 2286 2287 #if 0 2288 i915_get_extra_instdone(dev, instdone); 2289 #endif 2290 2291 if (IS_G4X(dev)) { 2292 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2293 u32 ipeir = I915_READ(IPEIR_I965); 2294 2295 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2296 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2297 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2298 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2299 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2300 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2301 I915_WRITE(IPEIR_I965, ipeir); 2302 POSTING_READ(IPEIR_I965); 2303 } 2304 if (eir & GM45_ERROR_PAGE_TABLE) { 2305 u32 pgtbl_err = I915_READ(PGTBL_ER); 2306 pr_err("page table error\n"); 2307 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2308 I915_WRITE(PGTBL_ER, pgtbl_err); 2309 POSTING_READ(PGTBL_ER); 2310 } 2311 } 2312 2313 if (!IS_GEN2(dev)) { 2314 if (eir & I915_ERROR_PAGE_TABLE) { 2315 u32 pgtbl_err = I915_READ(PGTBL_ER); 2316 pr_err("page table error\n"); 2317 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2318 I915_WRITE(PGTBL_ER, pgtbl_err); 2319 POSTING_READ(PGTBL_ER); 2320 } 2321 } 2322 2323 if (eir & I915_ERROR_MEMORY_REFRESH) { 2324 pr_err("memory refresh error:\n"); 2325 for_each_pipe(dev_priv, pipe) 2326 pr_err("pipe %c stat: 0x%08x\n", 2327 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2328 /* pipestat has already been acked */ 2329 } 2330 if (eir & I915_ERROR_INSTRUCTION) { 2331 pr_err("instruction error\n"); 2332 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2333 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2334 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2335 if (INTEL_INFO(dev)->gen < 4) { 2336 u32 ipeir = I915_READ(IPEIR); 2337 2338 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2339 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2340 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2341 I915_WRITE(IPEIR, ipeir); 2342 POSTING_READ(IPEIR); 2343 } else { 2344 u32 ipeir = I915_READ(IPEIR_I965); 2345 2346 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2347 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2348 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2349 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2350 I915_WRITE(IPEIR_I965, ipeir); 2351 POSTING_READ(IPEIR_I965); 2352 } 2353 } 2354 2355 I915_WRITE(EIR, eir); 2356 POSTING_READ(EIR); 2357 eir = I915_READ(EIR); 2358 if (eir) { 2359 /* 2360 * some errors might have become stuck, 2361 * mask them. 2362 */ 2363 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2364 I915_WRITE(EMR, I915_READ(EMR) | eir); 2365 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2366 } 2367 } 2368 2369 /** 2370 * i915_handle_error - handle a gpu error 2371 * @dev: drm device 2372 * 2373 * Do some basic checking of regsiter state at error time and 2374 * dump it to the syslog. Also call i915_capture_error_state() to make 2375 * sure we get a record and make it available in debugfs. Fire a uevent 2376 * so userspace knows something bad happened (should trigger collection 2377 * of a ring dump etc.). 2378 */ 2379 void i915_handle_error(struct drm_device *dev, bool wedged, 2380 const char *fmt, ...) 2381 { 2382 struct drm_i915_private *dev_priv = dev->dev_private; 2383 #if 0 2384 va_list args; 2385 char error_msg[80]; 2386 2387 va_start(args, fmt); 2388 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2389 va_end(args); 2390 2391 i915_capture_error_state(dev, wedged, error_msg); 2392 #endif 2393 i915_report_and_clear_eir(dev); 2394 2395 if (wedged) { 2396 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2397 &dev_priv->gpu_error.reset_counter); 2398 2399 /* 2400 * Wakeup waiting processes so that the reset function 2401 * i915_reset_and_wakeup doesn't deadlock trying to grab 2402 * various locks. By bumping the reset counter first, the woken 2403 * processes will see a reset in progress and back off, 2404 * releasing their locks and then wait for the reset completion. 2405 * We must do this for _all_ gpu waiters that might hold locks 2406 * that the reset work needs to acquire. 2407 * 2408 * Note: The wake_up serves as the required memory barrier to 2409 * ensure that the waiters see the updated value of the reset 2410 * counter atomic_t. 2411 */ 2412 i915_error_wake_up(dev_priv, false); 2413 } 2414 2415 i915_reset_and_wakeup(dev); 2416 } 2417 2418 /* Called from drm generic code, passed 'crtc' which 2419 * we use as a pipe index 2420 */ 2421 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2422 { 2423 struct drm_i915_private *dev_priv = dev->dev_private; 2424 2425 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2426 if (INTEL_INFO(dev)->gen >= 4) 2427 i915_enable_pipestat(dev_priv, pipe, 2428 PIPE_START_VBLANK_INTERRUPT_STATUS); 2429 else 2430 i915_enable_pipestat(dev_priv, pipe, 2431 PIPE_VBLANK_INTERRUPT_STATUS); 2432 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2433 2434 return 0; 2435 } 2436 2437 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2438 { 2439 struct drm_i915_private *dev_priv = dev->dev_private; 2440 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2441 DE_PIPE_VBLANK(pipe); 2442 2443 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2444 ironlake_enable_display_irq(dev_priv, bit); 2445 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2446 2447 return 0; 2448 } 2449 2450 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2451 { 2452 struct drm_i915_private *dev_priv = dev->dev_private; 2453 2454 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2455 i915_enable_pipestat(dev_priv, pipe, 2456 PIPE_START_VBLANK_INTERRUPT_STATUS); 2457 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2458 2459 return 0; 2460 } 2461 2462 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2463 { 2464 struct drm_i915_private *dev_priv = dev->dev_private; 2465 2466 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2467 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2468 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2469 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2470 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2471 return 0; 2472 } 2473 2474 /* Called from drm generic code, passed 'crtc' which 2475 * we use as a pipe index 2476 */ 2477 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2478 { 2479 struct drm_i915_private *dev_priv = dev->dev_private; 2480 2481 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2482 i915_disable_pipestat(dev_priv, pipe, 2483 PIPE_VBLANK_INTERRUPT_STATUS | 2484 PIPE_START_VBLANK_INTERRUPT_STATUS); 2485 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2486 } 2487 2488 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2489 { 2490 struct drm_i915_private *dev_priv = dev->dev_private; 2491 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2492 DE_PIPE_VBLANK(pipe); 2493 2494 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2495 ironlake_disable_display_irq(dev_priv, bit); 2496 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2497 } 2498 2499 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2500 { 2501 struct drm_i915_private *dev_priv = dev->dev_private; 2502 2503 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2504 i915_disable_pipestat(dev_priv, pipe, 2505 PIPE_START_VBLANK_INTERRUPT_STATUS); 2506 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2507 } 2508 2509 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2510 { 2511 struct drm_i915_private *dev_priv = dev->dev_private; 2512 2513 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2514 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2515 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2516 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2517 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2518 } 2519 2520 static bool 2521 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2522 { 2523 return (list_empty(&ring->request_list) || 2524 i915_seqno_passed(seqno, ring->last_submitted_seqno)); 2525 } 2526 2527 static bool 2528 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2529 { 2530 if (INTEL_INFO(dev)->gen >= 8) { 2531 return (ipehr >> 23) == 0x1c; 2532 } else { 2533 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2534 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2535 MI_SEMAPHORE_REGISTER); 2536 } 2537 } 2538 2539 static struct intel_engine_cs * 2540 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2541 { 2542 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2543 struct intel_engine_cs *signaller; 2544 int i; 2545 2546 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2547 for_each_ring(signaller, dev_priv, i) { 2548 if (ring == signaller) 2549 continue; 2550 2551 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2552 return signaller; 2553 } 2554 } else { 2555 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2556 2557 for_each_ring(signaller, dev_priv, i) { 2558 if(ring == signaller) 2559 continue; 2560 2561 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2562 return signaller; 2563 } 2564 } 2565 2566 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n", 2567 ring->id, ipehr, offset); 2568 2569 return NULL; 2570 } 2571 2572 static struct intel_engine_cs * 2573 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2574 { 2575 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2576 u32 cmd, ipehr, head; 2577 u64 offset = 0; 2578 int i, backwards; 2579 2580 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2581 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2582 return NULL; 2583 2584 /* 2585 * HEAD is likely pointing to the dword after the actual command, 2586 * so scan backwards until we find the MBOX. But limit it to just 3 2587 * or 4 dwords depending on the semaphore wait command size. 2588 * Note that we don't care about ACTHD here since that might 2589 * point at at batch, and semaphores are always emitted into the 2590 * ringbuffer itself. 2591 */ 2592 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2593 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2594 2595 for (i = backwards; i; --i) { 2596 /* 2597 * Be paranoid and presume the hw has gone off into the wild - 2598 * our ring is smaller than what the hardware (and hence 2599 * HEAD_ADDR) allows. Also handles wrap-around. 2600 */ 2601 head &= ring->buffer->size - 1; 2602 2603 /* This here seems to blow up */ 2604 cmd = ioread32(ring->buffer->virtual_start + head); 2605 if (cmd == ipehr) 2606 break; 2607 2608 head -= 4; 2609 } 2610 2611 if (!i) 2612 return NULL; 2613 2614 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2615 if (INTEL_INFO(ring->dev)->gen >= 8) { 2616 offset = ioread32(ring->buffer->virtual_start + head + 12); 2617 offset <<= 32; 2618 offset = ioread32(ring->buffer->virtual_start + head + 8); 2619 } 2620 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2621 } 2622 2623 static int semaphore_passed(struct intel_engine_cs *ring) 2624 { 2625 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2626 struct intel_engine_cs *signaller; 2627 u32 seqno; 2628 2629 ring->hangcheck.deadlock++; 2630 2631 signaller = semaphore_waits_for(ring, &seqno); 2632 if (signaller == NULL) 2633 return -1; 2634 2635 /* Prevent pathological recursion due to driver bugs */ 2636 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2637 return -1; 2638 2639 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2640 return 1; 2641 2642 /* cursory check for an unkickable deadlock */ 2643 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2644 semaphore_passed(signaller) < 0) 2645 return -1; 2646 2647 return 0; 2648 } 2649 2650 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2651 { 2652 struct intel_engine_cs *ring; 2653 int i; 2654 2655 for_each_ring(ring, dev_priv, i) 2656 ring->hangcheck.deadlock = 0; 2657 } 2658 2659 static enum intel_ring_hangcheck_action 2660 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2661 { 2662 struct drm_device *dev = ring->dev; 2663 struct drm_i915_private *dev_priv = dev->dev_private; 2664 u32 tmp; 2665 2666 if (acthd != ring->hangcheck.acthd) { 2667 if (acthd > ring->hangcheck.max_acthd) { 2668 ring->hangcheck.max_acthd = acthd; 2669 return HANGCHECK_ACTIVE; 2670 } 2671 2672 return HANGCHECK_ACTIVE_LOOP; 2673 } 2674 2675 if (IS_GEN2(dev)) 2676 return HANGCHECK_HUNG; 2677 2678 /* Is the chip hanging on a WAIT_FOR_EVENT? 2679 * If so we can simply poke the RB_WAIT bit 2680 * and break the hang. This should work on 2681 * all but the second generation chipsets. 2682 */ 2683 tmp = I915_READ_CTL(ring); 2684 if (tmp & RING_WAIT) { 2685 i915_handle_error(dev, false, 2686 "Kicking stuck wait on %s", 2687 ring->name); 2688 I915_WRITE_CTL(ring, tmp); 2689 return HANGCHECK_KICK; 2690 } 2691 2692 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2693 switch (semaphore_passed(ring)) { 2694 default: 2695 return HANGCHECK_HUNG; 2696 case 1: 2697 i915_handle_error(dev, false, 2698 "Kicking stuck semaphore on %s", 2699 ring->name); 2700 I915_WRITE_CTL(ring, tmp); 2701 return HANGCHECK_KICK; 2702 case 0: 2703 return HANGCHECK_WAIT; 2704 } 2705 } 2706 2707 return HANGCHECK_HUNG; 2708 } 2709 2710 /* 2711 * This is called when the chip hasn't reported back with completed 2712 * batchbuffers in a long time. We keep track per ring seqno progress and 2713 * if there are no progress, hangcheck score for that ring is increased. 2714 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2715 * we kick the ring. If we see no progress on three subsequent calls 2716 * we assume chip is wedged and try to fix it by resetting the chip. 2717 */ 2718 static void i915_hangcheck_elapsed(struct work_struct *work) 2719 { 2720 struct drm_i915_private *dev_priv = 2721 container_of(work, typeof(*dev_priv), 2722 gpu_error.hangcheck_work.work); 2723 struct drm_device *dev = dev_priv->dev; 2724 struct intel_engine_cs *ring; 2725 int i; 2726 int busy_count = 0, rings_hung = 0; 2727 bool stuck[I915_NUM_RINGS] = { 0 }; 2728 #define BUSY 1 2729 #define KICK 5 2730 #define HUNG 20 2731 2732 if (!i915.enable_hangcheck) 2733 return; 2734 2735 for_each_ring(ring, dev_priv, i) { 2736 u64 acthd; 2737 u32 seqno; 2738 bool busy = true; 2739 2740 semaphore_clear_deadlocks(dev_priv); 2741 2742 seqno = ring->get_seqno(ring, false); 2743 acthd = intel_ring_get_active_head(ring); 2744 2745 if (ring->hangcheck.seqno == seqno) { 2746 if (ring_idle(ring, seqno)) { 2747 ring->hangcheck.action = HANGCHECK_IDLE; 2748 2749 if (waitqueue_active(&ring->irq_queue)) { 2750 /* Issue a wake-up to catch stuck h/w. */ 2751 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2752 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2753 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2754 ring->name); 2755 else 2756 DRM_INFO("Fake missed irq on %s\n", 2757 ring->name); 2758 wake_up_all(&ring->irq_queue); 2759 } 2760 /* Safeguard against driver failure */ 2761 ring->hangcheck.score += BUSY; 2762 } else 2763 busy = false; 2764 } else { 2765 /* We always increment the hangcheck score 2766 * if the ring is busy and still processing 2767 * the same request, so that no single request 2768 * can run indefinitely (such as a chain of 2769 * batches). The only time we do not increment 2770 * the hangcheck score on this ring, if this 2771 * ring is in a legitimate wait for another 2772 * ring. In that case the waiting ring is a 2773 * victim and we want to be sure we catch the 2774 * right culprit. Then every time we do kick 2775 * the ring, add a small increment to the 2776 * score so that we can catch a batch that is 2777 * being repeatedly kicked and so responsible 2778 * for stalling the machine. 2779 */ 2780 ring->hangcheck.action = ring_stuck(ring, 2781 acthd); 2782 2783 switch (ring->hangcheck.action) { 2784 case HANGCHECK_IDLE: 2785 case HANGCHECK_WAIT: 2786 case HANGCHECK_ACTIVE: 2787 break; 2788 case HANGCHECK_ACTIVE_LOOP: 2789 ring->hangcheck.score += BUSY; 2790 break; 2791 case HANGCHECK_KICK: 2792 ring->hangcheck.score += KICK; 2793 break; 2794 case HANGCHECK_HUNG: 2795 ring->hangcheck.score += HUNG; 2796 stuck[i] = true; 2797 break; 2798 } 2799 } 2800 } else { 2801 ring->hangcheck.action = HANGCHECK_ACTIVE; 2802 2803 /* Gradually reduce the count so that we catch DoS 2804 * attempts across multiple batches. 2805 */ 2806 if (ring->hangcheck.score > 0) 2807 ring->hangcheck.score--; 2808 2809 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 2810 } 2811 2812 ring->hangcheck.seqno = seqno; 2813 ring->hangcheck.acthd = acthd; 2814 busy_count += busy; 2815 } 2816 2817 for_each_ring(ring, dev_priv, i) { 2818 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2819 DRM_INFO("%s on %s\n", 2820 stuck[i] ? "stuck" : "no progress", 2821 ring->name); 2822 rings_hung++; 2823 } 2824 } 2825 2826 if (rings_hung) 2827 return i915_handle_error(dev, true, "Ring hung"); 2828 2829 if (busy_count) 2830 /* Reset timer case chip hangs without another request 2831 * being added */ 2832 i915_queue_hangcheck(dev); 2833 } 2834 2835 void i915_queue_hangcheck(struct drm_device *dev) 2836 { 2837 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 2838 2839 if (!i915.enable_hangcheck) 2840 return; 2841 2842 /* Don't continually defer the hangcheck so that it is always run at 2843 * least once after work has been scheduled on any ring. Otherwise, 2844 * we will ignore a hung ring if a second ring is kept busy. 2845 */ 2846 2847 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 2848 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 2849 } 2850 2851 static void ibx_irq_reset(struct drm_device *dev) 2852 { 2853 struct drm_i915_private *dev_priv = dev->dev_private; 2854 2855 if (HAS_PCH_NOP(dev)) 2856 return; 2857 2858 GEN5_IRQ_RESET(SDE); 2859 2860 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 2861 I915_WRITE(SERR_INT, 0xffffffff); 2862 } 2863 2864 /* 2865 * SDEIER is also touched by the interrupt handler to work around missed PCH 2866 * interrupts. Hence we can't update it after the interrupt handler is enabled - 2867 * instead we unconditionally enable all PCH interrupt sources here, but then 2868 * only unmask them as needed with SDEIMR. 2869 * 2870 * This function needs to be called before interrupts are enabled. 2871 */ 2872 static void ibx_irq_pre_postinstall(struct drm_device *dev) 2873 { 2874 struct drm_i915_private *dev_priv = dev->dev_private; 2875 2876 if (HAS_PCH_NOP(dev)) 2877 return; 2878 2879 WARN_ON(I915_READ(SDEIER) != 0); 2880 I915_WRITE(SDEIER, 0xffffffff); 2881 POSTING_READ(SDEIER); 2882 } 2883 2884 static void gen5_gt_irq_reset(struct drm_device *dev) 2885 { 2886 struct drm_i915_private *dev_priv = dev->dev_private; 2887 2888 GEN5_IRQ_RESET(GT); 2889 if (INTEL_INFO(dev)->gen >= 6) 2890 GEN5_IRQ_RESET(GEN6_PM); 2891 } 2892 2893 /* drm_dma.h hooks 2894 */ 2895 static void ironlake_irq_reset(struct drm_device *dev) 2896 { 2897 struct drm_i915_private *dev_priv = dev->dev_private; 2898 2899 I915_WRITE(HWSTAM, 0xffffffff); 2900 2901 GEN5_IRQ_RESET(DE); 2902 if (IS_GEN7(dev)) 2903 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 2904 2905 gen5_gt_irq_reset(dev); 2906 2907 ibx_irq_reset(dev); 2908 } 2909 2910 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 2911 { 2912 enum i915_pipe pipe; 2913 2914 I915_WRITE(PORT_HOTPLUG_EN, 0); 2915 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2916 2917 for_each_pipe(dev_priv, pipe) 2918 I915_WRITE(PIPESTAT(pipe), 0xffff); 2919 2920 GEN5_IRQ_RESET(VLV_); 2921 } 2922 2923 static void valleyview_irq_preinstall(struct drm_device *dev) 2924 { 2925 struct drm_i915_private *dev_priv = dev->dev_private; 2926 2927 /* VLV magic */ 2928 I915_WRITE(VLV_IMR, 0); 2929 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2930 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2931 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2932 2933 gen5_gt_irq_reset(dev); 2934 2935 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2936 2937 vlv_display_irq_reset(dev_priv); 2938 } 2939 2940 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 2941 { 2942 GEN8_IRQ_RESET_NDX(GT, 0); 2943 GEN8_IRQ_RESET_NDX(GT, 1); 2944 GEN8_IRQ_RESET_NDX(GT, 2); 2945 GEN8_IRQ_RESET_NDX(GT, 3); 2946 } 2947 2948 static void gen8_irq_reset(struct drm_device *dev) 2949 { 2950 struct drm_i915_private *dev_priv = dev->dev_private; 2951 int pipe; 2952 2953 I915_WRITE(GEN8_MASTER_IRQ, 0); 2954 POSTING_READ(GEN8_MASTER_IRQ); 2955 2956 gen8_gt_irq_reset(dev_priv); 2957 2958 for_each_pipe(dev_priv, pipe) 2959 if (intel_display_power_is_enabled(dev_priv, 2960 POWER_DOMAIN_PIPE(pipe))) 2961 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 2962 2963 GEN5_IRQ_RESET(GEN8_DE_PORT_); 2964 GEN5_IRQ_RESET(GEN8_DE_MISC_); 2965 GEN5_IRQ_RESET(GEN8_PCU_); 2966 2967 if (HAS_PCH_SPLIT(dev)) 2968 ibx_irq_reset(dev); 2969 } 2970 2971 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 2972 unsigned int pipe_mask) 2973 { 2974 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 2975 2976 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2977 if (pipe_mask & 1 << PIPE_A) 2978 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, 2979 dev_priv->de_irq_mask[PIPE_A], 2980 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); 2981 if (pipe_mask & 1 << PIPE_B) 2982 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, 2983 dev_priv->de_irq_mask[PIPE_B], 2984 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 2985 if (pipe_mask & 1 << PIPE_C) 2986 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, 2987 dev_priv->de_irq_mask[PIPE_C], 2988 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 2989 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2990 } 2991 2992 static void cherryview_irq_preinstall(struct drm_device *dev) 2993 { 2994 struct drm_i915_private *dev_priv = dev->dev_private; 2995 2996 I915_WRITE(GEN8_MASTER_IRQ, 0); 2997 POSTING_READ(GEN8_MASTER_IRQ); 2998 2999 gen8_gt_irq_reset(dev_priv); 3000 3001 GEN5_IRQ_RESET(GEN8_PCU_); 3002 3003 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3004 3005 vlv_display_irq_reset(dev_priv); 3006 } 3007 3008 static void ibx_hpd_irq_setup(struct drm_device *dev) 3009 { 3010 struct drm_i915_private *dev_priv = dev->dev_private; 3011 struct intel_encoder *intel_encoder; 3012 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3013 3014 if (HAS_PCH_IBX(dev)) { 3015 hotplug_irqs = SDE_HOTPLUG_MASK; 3016 for_each_intel_encoder(dev, intel_encoder) 3017 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3018 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3019 } else if (HAS_PCH_SPT(dev)) { 3020 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3021 for_each_intel_encoder(dev, intel_encoder) 3022 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3023 enabled_irqs |= hpd_spt[intel_encoder->hpd_pin]; 3024 } else { 3025 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3026 for_each_intel_encoder(dev, intel_encoder) 3027 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3028 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3029 } 3030 3031 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3032 3033 /* 3034 * Enable digital hotplug on the PCH, and configure the DP short pulse 3035 * duration to 2ms (which is the minimum in the Display Port spec) 3036 * 3037 * This register is the same on all known PCH chips. 3038 */ 3039 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3040 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3041 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3042 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3043 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3044 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3045 3046 /* enable SPT PORTE hot plug */ 3047 if (HAS_PCH_SPT(dev)) { 3048 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3049 hotplug |= PORTE_HOTPLUG_ENABLE; 3050 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3051 } 3052 } 3053 3054 static void bxt_hpd_irq_setup(struct drm_device *dev) 3055 { 3056 struct drm_i915_private *dev_priv = dev->dev_private; 3057 struct intel_encoder *intel_encoder; 3058 u32 hotplug_port = 0; 3059 u32 hotplug_ctrl; 3060 3061 /* Now, enable HPD */ 3062 for_each_intel_encoder(dev, intel_encoder) { 3063 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state 3064 == HPD_ENABLED) 3065 hotplug_port |= hpd_bxt[intel_encoder->hpd_pin]; 3066 } 3067 3068 /* Mask all HPD control bits */ 3069 hotplug_ctrl = I915_READ(BXT_HOTPLUG_CTL) & ~BXT_HOTPLUG_CTL_MASK; 3070 3071 /* Enable requested port in hotplug control */ 3072 /* TODO: implement (short) HPD support on port A */ 3073 WARN_ON_ONCE(hotplug_port & BXT_DE_PORT_HP_DDIA); 3074 if (hotplug_port & BXT_DE_PORT_HP_DDIB) 3075 hotplug_ctrl |= BXT_DDIB_HPD_ENABLE; 3076 if (hotplug_port & BXT_DE_PORT_HP_DDIC) 3077 hotplug_ctrl |= BXT_DDIC_HPD_ENABLE; 3078 I915_WRITE(BXT_HOTPLUG_CTL, hotplug_ctrl); 3079 3080 /* Unmask DDI hotplug in IMR */ 3081 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IMR) & ~hotplug_port; 3082 I915_WRITE(GEN8_DE_PORT_IMR, hotplug_ctrl); 3083 3084 /* Enable DDI hotplug in IER */ 3085 hotplug_ctrl = I915_READ(GEN8_DE_PORT_IER) | hotplug_port; 3086 I915_WRITE(GEN8_DE_PORT_IER, hotplug_ctrl); 3087 POSTING_READ(GEN8_DE_PORT_IER); 3088 } 3089 3090 static void ibx_irq_postinstall(struct drm_device *dev) 3091 { 3092 struct drm_i915_private *dev_priv = dev->dev_private; 3093 u32 mask; 3094 3095 if (HAS_PCH_NOP(dev)) 3096 return; 3097 3098 if (HAS_PCH_IBX(dev)) 3099 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3100 else 3101 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3102 3103 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3104 I915_WRITE(SDEIMR, ~mask); 3105 } 3106 3107 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3108 { 3109 struct drm_i915_private *dev_priv = dev->dev_private; 3110 u32 pm_irqs, gt_irqs; 3111 3112 pm_irqs = gt_irqs = 0; 3113 3114 dev_priv->gt_irq_mask = ~0; 3115 if (HAS_L3_DPF(dev)) { 3116 /* L3 parity interrupt is always unmasked. */ 3117 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3118 gt_irqs |= GT_PARITY_ERROR(dev); 3119 } 3120 3121 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3122 if (IS_GEN5(dev)) { 3123 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3124 ILK_BSD_USER_INTERRUPT; 3125 } else { 3126 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3127 } 3128 3129 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3130 3131 if (INTEL_INFO(dev)->gen >= 6) { 3132 /* 3133 * RPS interrupts will get enabled/disabled on demand when RPS 3134 * itself is enabled/disabled. 3135 */ 3136 if (HAS_VEBOX(dev)) 3137 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3138 3139 dev_priv->pm_irq_mask = 0xffffffff; 3140 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3141 } 3142 } 3143 3144 static int ironlake_irq_postinstall(struct drm_device *dev) 3145 { 3146 struct drm_i915_private *dev_priv = dev->dev_private; 3147 u32 display_mask, extra_mask; 3148 3149 if (INTEL_INFO(dev)->gen >= 7) { 3150 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3151 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3152 DE_PLANEB_FLIP_DONE_IVB | 3153 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3154 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3155 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3156 } else { 3157 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3158 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3159 DE_AUX_CHANNEL_A | 3160 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3161 DE_POISON); 3162 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3163 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3164 } 3165 3166 dev_priv->irq_mask = ~display_mask; 3167 3168 I915_WRITE(HWSTAM, 0xeffe); 3169 3170 ibx_irq_pre_postinstall(dev); 3171 3172 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3173 3174 gen5_gt_irq_postinstall(dev); 3175 3176 ibx_irq_postinstall(dev); 3177 3178 if (IS_IRONLAKE_M(dev)) { 3179 /* Enable PCU event interrupts 3180 * 3181 * spinlocking not required here for correctness since interrupt 3182 * setup is guaranteed to run in single-threaded context. But we 3183 * need it to make the assert_spin_locked happy. */ 3184 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3185 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3186 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3187 } 3188 3189 return 0; 3190 } 3191 3192 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3193 { 3194 u32 pipestat_mask; 3195 u32 iir_mask; 3196 enum i915_pipe pipe; 3197 3198 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3199 PIPE_FIFO_UNDERRUN_STATUS; 3200 3201 for_each_pipe(dev_priv, pipe) 3202 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3203 POSTING_READ(PIPESTAT(PIPE_A)); 3204 3205 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3206 PIPE_CRC_DONE_INTERRUPT_STATUS; 3207 3208 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3209 for_each_pipe(dev_priv, pipe) 3210 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3211 3212 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3213 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3214 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3215 if (IS_CHERRYVIEW(dev_priv)) 3216 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3217 dev_priv->irq_mask &= ~iir_mask; 3218 3219 I915_WRITE(VLV_IIR, iir_mask); 3220 I915_WRITE(VLV_IIR, iir_mask); 3221 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3222 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3223 POSTING_READ(VLV_IMR); 3224 } 3225 3226 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3227 { 3228 u32 pipestat_mask; 3229 u32 iir_mask; 3230 enum i915_pipe pipe; 3231 3232 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3233 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3234 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3235 if (IS_CHERRYVIEW(dev_priv)) 3236 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3237 3238 dev_priv->irq_mask |= iir_mask; 3239 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3240 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3241 I915_WRITE(VLV_IIR, iir_mask); 3242 I915_WRITE(VLV_IIR, iir_mask); 3243 POSTING_READ(VLV_IIR); 3244 3245 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3246 PIPE_CRC_DONE_INTERRUPT_STATUS; 3247 3248 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3249 for_each_pipe(dev_priv, pipe) 3250 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3251 3252 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3253 PIPE_FIFO_UNDERRUN_STATUS; 3254 3255 for_each_pipe(dev_priv, pipe) 3256 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3257 POSTING_READ(PIPESTAT(PIPE_A)); 3258 } 3259 3260 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3261 { 3262 assert_spin_locked(&dev_priv->irq_lock); 3263 3264 if (dev_priv->display_irqs_enabled) 3265 return; 3266 3267 dev_priv->display_irqs_enabled = true; 3268 3269 if (intel_irqs_enabled(dev_priv)) 3270 valleyview_display_irqs_install(dev_priv); 3271 } 3272 3273 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3274 { 3275 assert_spin_locked(&dev_priv->irq_lock); 3276 3277 if (!dev_priv->display_irqs_enabled) 3278 return; 3279 3280 dev_priv->display_irqs_enabled = false; 3281 3282 if (intel_irqs_enabled(dev_priv)) 3283 valleyview_display_irqs_uninstall(dev_priv); 3284 } 3285 3286 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3287 { 3288 dev_priv->irq_mask = ~0; 3289 3290 I915_WRITE(PORT_HOTPLUG_EN, 0); 3291 POSTING_READ(PORT_HOTPLUG_EN); 3292 3293 I915_WRITE(VLV_IIR, 0xffffffff); 3294 I915_WRITE(VLV_IIR, 0xffffffff); 3295 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3296 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3297 POSTING_READ(VLV_IMR); 3298 3299 /* Interrupt setup is already guaranteed to be single-threaded, this is 3300 * just to make the assert_spin_locked check happy. */ 3301 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3302 if (dev_priv->display_irqs_enabled) 3303 valleyview_display_irqs_install(dev_priv); 3304 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3305 } 3306 3307 static int valleyview_irq_postinstall(struct drm_device *dev) 3308 { 3309 struct drm_i915_private *dev_priv = dev->dev_private; 3310 3311 vlv_display_irq_postinstall(dev_priv); 3312 3313 gen5_gt_irq_postinstall(dev); 3314 3315 /* ack & enable invalid PTE error interrupts */ 3316 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3317 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3318 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3319 #endif 3320 3321 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3322 3323 return 0; 3324 } 3325 3326 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3327 { 3328 /* These are interrupts we'll toggle with the ring mask register */ 3329 uint32_t gt_interrupts[] = { 3330 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3331 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3332 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3333 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3334 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3335 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3336 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3337 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3338 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3339 0, 3340 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3341 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3342 }; 3343 3344 dev_priv->pm_irq_mask = 0xffffffff; 3345 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3346 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3347 /* 3348 * RPS interrupts will get enabled/disabled on demand when RPS itself 3349 * is enabled/disabled. 3350 */ 3351 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3352 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3353 } 3354 3355 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3356 { 3357 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3358 uint32_t de_pipe_enables; 3359 int pipe; 3360 u32 de_port_en = GEN8_AUX_CHANNEL_A; 3361 3362 if (IS_GEN9(dev_priv)) { 3363 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3364 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3365 de_port_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3366 GEN9_AUX_CHANNEL_D; 3367 3368 if (IS_BROXTON(dev_priv)) 3369 de_port_en |= BXT_DE_PORT_GMBUS; 3370 } else 3371 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3372 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3373 3374 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3375 GEN8_PIPE_FIFO_UNDERRUN; 3376 3377 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3378 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3379 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3380 3381 for_each_pipe(dev_priv, pipe) 3382 if (intel_display_power_is_enabled(dev_priv, 3383 POWER_DOMAIN_PIPE(pipe))) 3384 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3385 dev_priv->de_irq_mask[pipe], 3386 de_pipe_enables); 3387 3388 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_en, de_port_en); 3389 } 3390 3391 static int gen8_irq_postinstall(struct drm_device *dev) 3392 { 3393 struct drm_i915_private *dev_priv = dev->dev_private; 3394 3395 if (HAS_PCH_SPLIT(dev)) 3396 ibx_irq_pre_postinstall(dev); 3397 3398 gen8_gt_irq_postinstall(dev_priv); 3399 gen8_de_irq_postinstall(dev_priv); 3400 3401 if (HAS_PCH_SPLIT(dev)) 3402 ibx_irq_postinstall(dev); 3403 3404 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3405 POSTING_READ(GEN8_MASTER_IRQ); 3406 3407 return 0; 3408 } 3409 3410 static int cherryview_irq_postinstall(struct drm_device *dev) 3411 { 3412 struct drm_i915_private *dev_priv = dev->dev_private; 3413 3414 vlv_display_irq_postinstall(dev_priv); 3415 3416 gen8_gt_irq_postinstall(dev_priv); 3417 3418 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3419 POSTING_READ(GEN8_MASTER_IRQ); 3420 3421 return 0; 3422 } 3423 3424 static void gen8_irq_uninstall(struct drm_device *dev) 3425 { 3426 struct drm_i915_private *dev_priv = dev->dev_private; 3427 3428 if (!dev_priv) 3429 return; 3430 3431 gen8_irq_reset(dev); 3432 } 3433 3434 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3435 { 3436 /* Interrupt setup is already guaranteed to be single-threaded, this is 3437 * just to make the assert_spin_locked check happy. */ 3438 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3439 if (dev_priv->display_irqs_enabled) 3440 valleyview_display_irqs_uninstall(dev_priv); 3441 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3442 3443 vlv_display_irq_reset(dev_priv); 3444 3445 dev_priv->irq_mask = ~0; 3446 } 3447 3448 static void valleyview_irq_uninstall(struct drm_device *dev) 3449 { 3450 struct drm_i915_private *dev_priv = dev->dev_private; 3451 3452 if (!dev_priv) 3453 return; 3454 3455 I915_WRITE(VLV_MASTER_IER, 0); 3456 3457 gen5_gt_irq_reset(dev); 3458 3459 I915_WRITE(HWSTAM, 0xffffffff); 3460 3461 vlv_display_irq_uninstall(dev_priv); 3462 } 3463 3464 static void cherryview_irq_uninstall(struct drm_device *dev) 3465 { 3466 struct drm_i915_private *dev_priv = dev->dev_private; 3467 3468 if (!dev_priv) 3469 return; 3470 3471 I915_WRITE(GEN8_MASTER_IRQ, 0); 3472 POSTING_READ(GEN8_MASTER_IRQ); 3473 3474 gen8_gt_irq_reset(dev_priv); 3475 3476 GEN5_IRQ_RESET(GEN8_PCU_); 3477 3478 vlv_display_irq_uninstall(dev_priv); 3479 } 3480 3481 static void ironlake_irq_uninstall(struct drm_device *dev) 3482 { 3483 struct drm_i915_private *dev_priv = dev->dev_private; 3484 3485 if (!dev_priv) 3486 return; 3487 3488 ironlake_irq_reset(dev); 3489 } 3490 3491 static void i8xx_irq_preinstall(struct drm_device * dev) 3492 { 3493 struct drm_i915_private *dev_priv = dev->dev_private; 3494 int pipe; 3495 3496 for_each_pipe(dev_priv, pipe) 3497 I915_WRITE(PIPESTAT(pipe), 0); 3498 I915_WRITE16(IMR, 0xffff); 3499 I915_WRITE16(IER, 0x0); 3500 POSTING_READ16(IER); 3501 } 3502 3503 static int i8xx_irq_postinstall(struct drm_device *dev) 3504 { 3505 struct drm_i915_private *dev_priv = dev->dev_private; 3506 3507 I915_WRITE16(EMR, 3508 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3509 3510 /* Unmask the interrupts that we always want on. */ 3511 dev_priv->irq_mask = 3512 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3513 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3514 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3515 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3516 I915_WRITE16(IMR, dev_priv->irq_mask); 3517 3518 I915_WRITE16(IER, 3519 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3520 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3521 I915_USER_INTERRUPT); 3522 POSTING_READ16(IER); 3523 3524 /* Interrupt setup is already guaranteed to be single-threaded, this is 3525 * just to make the assert_spin_locked check happy. */ 3526 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3527 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3528 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3529 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3530 3531 return 0; 3532 } 3533 3534 /* 3535 * Returns true when a page flip has completed. 3536 */ 3537 static bool i8xx_handle_vblank(struct drm_device *dev, 3538 int plane, int pipe, u32 iir) 3539 { 3540 struct drm_i915_private *dev_priv = dev->dev_private; 3541 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3542 3543 if (!intel_pipe_handle_vblank(dev, pipe)) 3544 return false; 3545 3546 if ((iir & flip_pending) == 0) 3547 goto check_page_flip; 3548 3549 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3550 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3551 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3552 * the flip is completed (no longer pending). Since this doesn't raise 3553 * an interrupt per se, we watch for the change at vblank. 3554 */ 3555 if (I915_READ16(ISR) & flip_pending) 3556 goto check_page_flip; 3557 3558 intel_prepare_page_flip(dev, plane); 3559 intel_finish_page_flip(dev, pipe); 3560 return true; 3561 3562 check_page_flip: 3563 intel_check_page_flip(dev, pipe); 3564 return false; 3565 } 3566 3567 static irqreturn_t i8xx_irq_handler(void *arg) 3568 { 3569 struct drm_device *dev = arg; 3570 struct drm_i915_private *dev_priv = dev->dev_private; 3571 u16 iir, new_iir; 3572 u32 pipe_stats[2]; 3573 int pipe; 3574 u16 flip_mask = 3575 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3576 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3577 3578 if (!intel_irqs_enabled(dev_priv)) 3579 return IRQ_NONE; 3580 3581 iir = I915_READ16(IIR); 3582 if (iir == 0) 3583 return; 3584 3585 while (iir & ~flip_mask) { 3586 /* Can't rely on pipestat interrupt bit in iir as it might 3587 * have been cleared after the pipestat interrupt was received. 3588 * It doesn't set the bit in iir again, but it still produces 3589 * interrupts (for non-MSI). 3590 */ 3591 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3592 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3593 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3594 3595 for_each_pipe(dev_priv, pipe) { 3596 int reg = PIPESTAT(pipe); 3597 pipe_stats[pipe] = I915_READ(reg); 3598 3599 /* 3600 * Clear the PIPE*STAT regs before the IIR 3601 */ 3602 if (pipe_stats[pipe] & 0x8000ffff) 3603 I915_WRITE(reg, pipe_stats[pipe]); 3604 } 3605 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3606 3607 I915_WRITE16(IIR, iir & ~flip_mask); 3608 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3609 3610 if (iir & I915_USER_INTERRUPT) 3611 notify_ring(&dev_priv->ring[RCS]); 3612 3613 for_each_pipe(dev_priv, pipe) { 3614 int plane = pipe; 3615 if (HAS_FBC(dev)) 3616 plane = !plane; 3617 3618 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3619 i8xx_handle_vblank(dev, plane, pipe, iir)) 3620 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3621 3622 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3623 i9xx_pipe_crc_irq_handler(dev, pipe); 3624 3625 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3626 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3627 pipe); 3628 } 3629 3630 iir = new_iir; 3631 } 3632 3633 } 3634 3635 static void i8xx_irq_uninstall(struct drm_device * dev) 3636 { 3637 struct drm_i915_private *dev_priv = dev->dev_private; 3638 int pipe; 3639 3640 for_each_pipe(dev_priv, pipe) { 3641 /* Clear enable bits; then clear status bits */ 3642 I915_WRITE(PIPESTAT(pipe), 0); 3643 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3644 } 3645 I915_WRITE16(IMR, 0xffff); 3646 I915_WRITE16(IER, 0x0); 3647 I915_WRITE16(IIR, I915_READ16(IIR)); 3648 } 3649 3650 static void i915_irq_preinstall(struct drm_device * dev) 3651 { 3652 struct drm_i915_private *dev_priv = dev->dev_private; 3653 int pipe; 3654 3655 if (I915_HAS_HOTPLUG(dev)) { 3656 I915_WRITE(PORT_HOTPLUG_EN, 0); 3657 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3658 } 3659 3660 I915_WRITE16(HWSTAM, 0xeffe); 3661 for_each_pipe(dev_priv, pipe) 3662 I915_WRITE(PIPESTAT(pipe), 0); 3663 I915_WRITE(IMR, 0xffffffff); 3664 I915_WRITE(IER, 0x0); 3665 POSTING_READ(IER); 3666 } 3667 3668 static int i915_irq_postinstall(struct drm_device *dev) 3669 { 3670 struct drm_i915_private *dev_priv = dev->dev_private; 3671 u32 enable_mask; 3672 3673 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3674 3675 /* Unmask the interrupts that we always want on. */ 3676 dev_priv->irq_mask = 3677 ~(I915_ASLE_INTERRUPT | 3678 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3679 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3680 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3681 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3682 3683 enable_mask = 3684 I915_ASLE_INTERRUPT | 3685 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3686 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3687 I915_USER_INTERRUPT; 3688 3689 if (I915_HAS_HOTPLUG(dev)) { 3690 I915_WRITE(PORT_HOTPLUG_EN, 0); 3691 POSTING_READ(PORT_HOTPLUG_EN); 3692 3693 /* Enable in IER... */ 3694 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3695 /* and unmask in IMR */ 3696 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3697 } 3698 3699 I915_WRITE(IMR, dev_priv->irq_mask); 3700 I915_WRITE(IER, enable_mask); 3701 POSTING_READ(IER); 3702 3703 i915_enable_asle_pipestat(dev); 3704 3705 /* Interrupt setup is already guaranteed to be single-threaded, this is 3706 * just to make the assert_spin_locked check happy. */ 3707 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3708 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3709 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3710 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3711 3712 return 0; 3713 } 3714 3715 /* 3716 * Returns true when a page flip has completed. 3717 */ 3718 static bool i915_handle_vblank(struct drm_device *dev, 3719 int plane, int pipe, u32 iir) 3720 { 3721 struct drm_i915_private *dev_priv = dev->dev_private; 3722 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3723 3724 if (!intel_pipe_handle_vblank(dev, pipe)) 3725 return false; 3726 3727 if ((iir & flip_pending) == 0) 3728 goto check_page_flip; 3729 3730 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3731 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3732 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3733 * the flip is completed (no longer pending). Since this doesn't raise 3734 * an interrupt per se, we watch for the change at vblank. 3735 */ 3736 if (I915_READ(ISR) & flip_pending) 3737 goto check_page_flip; 3738 3739 intel_prepare_page_flip(dev, plane); 3740 intel_finish_page_flip(dev, pipe); 3741 return true; 3742 3743 check_page_flip: 3744 intel_check_page_flip(dev, pipe); 3745 return false; 3746 } 3747 3748 static irqreturn_t i915_irq_handler(void *arg) 3749 { 3750 struct drm_device *dev = arg; 3751 struct drm_i915_private *dev_priv = dev->dev_private; 3752 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3753 u32 flip_mask = 3754 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3755 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3756 int pipe; 3757 3758 if (!intel_irqs_enabled(dev_priv)) 3759 return IRQ_NONE; 3760 3761 iir = I915_READ(IIR); 3762 do { 3763 bool irq_received = (iir & ~flip_mask) != 0; 3764 bool blc_event = false; 3765 3766 /* Can't rely on pipestat interrupt bit in iir as it might 3767 * have been cleared after the pipestat interrupt was received. 3768 * It doesn't set the bit in iir again, but it still produces 3769 * interrupts (for non-MSI). 3770 */ 3771 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3772 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3773 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3774 3775 for_each_pipe(dev_priv, pipe) { 3776 int reg = PIPESTAT(pipe); 3777 pipe_stats[pipe] = I915_READ(reg); 3778 3779 /* Clear the PIPE*STAT regs before the IIR */ 3780 if (pipe_stats[pipe] & 0x8000ffff) { 3781 I915_WRITE(reg, pipe_stats[pipe]); 3782 irq_received = true; 3783 } 3784 } 3785 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3786 3787 if (!irq_received) 3788 break; 3789 3790 /* Consume port. Then clear IIR or we'll miss events */ 3791 if (I915_HAS_HOTPLUG(dev) && 3792 iir & I915_DISPLAY_PORT_INTERRUPT) 3793 i9xx_hpd_irq_handler(dev); 3794 3795 I915_WRITE(IIR, iir & ~flip_mask); 3796 new_iir = I915_READ(IIR); /* Flush posted writes */ 3797 3798 if (iir & I915_USER_INTERRUPT) 3799 notify_ring(&dev_priv->ring[RCS]); 3800 3801 for_each_pipe(dev_priv, pipe) { 3802 int plane = pipe; 3803 if (HAS_FBC(dev)) 3804 plane = !plane; 3805 3806 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3807 i915_handle_vblank(dev, plane, pipe, iir)) 3808 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3809 3810 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3811 blc_event = true; 3812 3813 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3814 i9xx_pipe_crc_irq_handler(dev, pipe); 3815 3816 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3817 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3818 pipe); 3819 } 3820 3821 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3822 intel_opregion_asle_intr(dev); 3823 3824 /* With MSI, interrupts are only generated when iir 3825 * transitions from zero to nonzero. If another bit got 3826 * set while we were handling the existing iir bits, then 3827 * we would never get another interrupt. 3828 * 3829 * This is fine on non-MSI as well, as if we hit this path 3830 * we avoid exiting the interrupt handler only to generate 3831 * another one. 3832 * 3833 * Note that for MSI this could cause a stray interrupt report 3834 * if an interrupt landed in the time between writing IIR and 3835 * the posting read. This should be rare enough to never 3836 * trigger the 99% of 100,000 interrupts test for disabling 3837 * stray interrupts. 3838 */ 3839 iir = new_iir; 3840 } while (iir & ~flip_mask); 3841 3842 } 3843 3844 static void i915_irq_uninstall(struct drm_device * dev) 3845 { 3846 struct drm_i915_private *dev_priv = dev->dev_private; 3847 int pipe; 3848 3849 if (I915_HAS_HOTPLUG(dev)) { 3850 I915_WRITE(PORT_HOTPLUG_EN, 0); 3851 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3852 } 3853 3854 I915_WRITE16(HWSTAM, 0xffff); 3855 for_each_pipe(dev_priv, pipe) { 3856 /* Clear enable bits; then clear status bits */ 3857 I915_WRITE(PIPESTAT(pipe), 0); 3858 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3859 } 3860 I915_WRITE(IMR, 0xffffffff); 3861 I915_WRITE(IER, 0x0); 3862 3863 I915_WRITE(IIR, I915_READ(IIR)); 3864 } 3865 3866 static void i965_irq_preinstall(struct drm_device * dev) 3867 { 3868 struct drm_i915_private *dev_priv = dev->dev_private; 3869 int pipe; 3870 3871 I915_WRITE(PORT_HOTPLUG_EN, 0); 3872 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3873 3874 I915_WRITE(HWSTAM, 0xeffe); 3875 for_each_pipe(dev_priv, pipe) 3876 I915_WRITE(PIPESTAT(pipe), 0); 3877 I915_WRITE(IMR, 0xffffffff); 3878 I915_WRITE(IER, 0x0); 3879 POSTING_READ(IER); 3880 } 3881 3882 static int i965_irq_postinstall(struct drm_device *dev) 3883 { 3884 struct drm_i915_private *dev_priv = dev->dev_private; 3885 u32 enable_mask; 3886 u32 error_mask; 3887 3888 /* Unmask the interrupts that we always want on. */ 3889 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3890 I915_DISPLAY_PORT_INTERRUPT | 3891 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3892 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3893 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3894 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3895 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3896 3897 enable_mask = ~dev_priv->irq_mask; 3898 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3899 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3900 enable_mask |= I915_USER_INTERRUPT; 3901 3902 if (IS_G4X(dev)) 3903 enable_mask |= I915_BSD_USER_INTERRUPT; 3904 3905 /* Interrupt setup is already guaranteed to be single-threaded, this is 3906 * just to make the assert_spin_locked check happy. */ 3907 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3908 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3909 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3910 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3911 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3912 3913 /* 3914 * Enable some error detection, note the instruction error mask 3915 * bit is reserved, so we leave it masked. 3916 */ 3917 if (IS_G4X(dev)) { 3918 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3919 GM45_ERROR_MEM_PRIV | 3920 GM45_ERROR_CP_PRIV | 3921 I915_ERROR_MEMORY_REFRESH); 3922 } else { 3923 error_mask = ~(I915_ERROR_PAGE_TABLE | 3924 I915_ERROR_MEMORY_REFRESH); 3925 } 3926 I915_WRITE(EMR, error_mask); 3927 3928 I915_WRITE(IMR, dev_priv->irq_mask); 3929 I915_WRITE(IER, enable_mask); 3930 POSTING_READ(IER); 3931 3932 I915_WRITE(PORT_HOTPLUG_EN, 0); 3933 POSTING_READ(PORT_HOTPLUG_EN); 3934 3935 i915_enable_asle_pipestat(dev); 3936 3937 return 0; 3938 } 3939 3940 static void i915_hpd_irq_setup(struct drm_device *dev) 3941 { 3942 struct drm_i915_private *dev_priv = dev->dev_private; 3943 struct intel_encoder *intel_encoder; 3944 u32 hotplug_en; 3945 3946 assert_spin_locked(&dev_priv->irq_lock); 3947 3948 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3949 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3950 /* Note HDMI and DP share hotplug bits */ 3951 /* enable bits are the same for all generations */ 3952 for_each_intel_encoder(dev, intel_encoder) 3953 if (dev_priv->hotplug.stats[intel_encoder->hpd_pin].state == HPD_ENABLED) 3954 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3955 /* Programming the CRT detection parameters tends 3956 to generate a spurious hotplug event about three 3957 seconds later. So just do it once. 3958 */ 3959 if (IS_G4X(dev)) 3960 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3961 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3962 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3963 3964 /* Ignore TV since it's buggy */ 3965 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3966 } 3967 3968 static irqreturn_t i965_irq_handler(void *arg) 3969 { 3970 struct drm_device *dev = arg; 3971 struct drm_i915_private *dev_priv = dev->dev_private; 3972 u32 iir, new_iir; 3973 u32 pipe_stats[I915_MAX_PIPES]; 3974 int pipe; 3975 u32 flip_mask = 3976 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3977 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3978 3979 if (!intel_irqs_enabled(dev_priv)) 3980 return IRQ_NONE; 3981 3982 iir = I915_READ(IIR); 3983 3984 for (;;) { 3985 bool irq_received = (iir & ~flip_mask) != 0; 3986 bool blc_event = false; 3987 3988 /* Can't rely on pipestat interrupt bit in iir as it might 3989 * have been cleared after the pipestat interrupt was received. 3990 * It doesn't set the bit in iir again, but it still produces 3991 * interrupts (for non-MSI). 3992 */ 3993 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3994 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3995 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3996 3997 for_each_pipe(dev_priv, pipe) { 3998 int reg = PIPESTAT(pipe); 3999 pipe_stats[pipe] = I915_READ(reg); 4000 4001 /* 4002 * Clear the PIPE*STAT regs before the IIR 4003 */ 4004 if (pipe_stats[pipe] & 0x8000ffff) { 4005 I915_WRITE(reg, pipe_stats[pipe]); 4006 irq_received = true; 4007 } 4008 } 4009 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4010 4011 if (!irq_received) 4012 break; 4013 4014 /* Consume port. Then clear IIR or we'll miss events */ 4015 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4016 i9xx_hpd_irq_handler(dev); 4017 4018 I915_WRITE(IIR, iir & ~flip_mask); 4019 new_iir = I915_READ(IIR); /* Flush posted writes */ 4020 4021 if (iir & I915_USER_INTERRUPT) 4022 notify_ring(&dev_priv->ring[RCS]); 4023 if (iir & I915_BSD_USER_INTERRUPT) 4024 notify_ring(&dev_priv->ring[VCS]); 4025 4026 for_each_pipe(dev_priv, pipe) { 4027 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4028 i915_handle_vblank(dev, pipe, pipe, iir)) 4029 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4030 4031 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4032 blc_event = true; 4033 4034 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4035 i9xx_pipe_crc_irq_handler(dev, pipe); 4036 4037 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4038 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4039 } 4040 4041 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4042 intel_opregion_asle_intr(dev); 4043 4044 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4045 gmbus_irq_handler(dev); 4046 4047 /* With MSI, interrupts are only generated when iir 4048 * transitions from zero to nonzero. If another bit got 4049 * set while we were handling the existing iir bits, then 4050 * we would never get another interrupt. 4051 * 4052 * This is fine on non-MSI as well, as if we hit this path 4053 * we avoid exiting the interrupt handler only to generate 4054 * another one. 4055 * 4056 * Note that for MSI this could cause a stray interrupt report 4057 * if an interrupt landed in the time between writing IIR and 4058 * the posting read. This should be rare enough to never 4059 * trigger the 99% of 100,000 interrupts test for disabling 4060 * stray interrupts. 4061 */ 4062 iir = new_iir; 4063 } 4064 4065 } 4066 4067 static void i965_irq_uninstall(struct drm_device * dev) 4068 { 4069 struct drm_i915_private *dev_priv = dev->dev_private; 4070 int pipe; 4071 4072 if (!dev_priv) 4073 return; 4074 4075 I915_WRITE(PORT_HOTPLUG_EN, 0); 4076 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4077 4078 I915_WRITE(HWSTAM, 0xffffffff); 4079 for_each_pipe(dev_priv, pipe) 4080 I915_WRITE(PIPESTAT(pipe), 0); 4081 I915_WRITE(IMR, 0xffffffff); 4082 I915_WRITE(IER, 0x0); 4083 4084 for_each_pipe(dev_priv, pipe) 4085 I915_WRITE(PIPESTAT(pipe), 4086 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4087 I915_WRITE(IIR, I915_READ(IIR)); 4088 } 4089 4090 /** 4091 * intel_irq_init - initializes irq support 4092 * @dev_priv: i915 device instance 4093 * 4094 * This function initializes all the irq support including work items, timers 4095 * and all the vtables. It does not setup the interrupt itself though. 4096 */ 4097 void intel_irq_init(struct drm_i915_private *dev_priv) 4098 { 4099 struct drm_device *dev = dev_priv->dev; 4100 4101 intel_hpd_init_work(dev_priv); 4102 4103 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4104 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4105 4106 /* Let's track the enabled rps events */ 4107 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4108 /* WaGsvRC0ResidencyMethod:vlv */ 4109 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4110 else 4111 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4112 4113 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4114 i915_hangcheck_elapsed); 4115 4116 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4117 4118 if (IS_GEN2(dev_priv)) { 4119 dev->max_vblank_count = 0; 4120 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4121 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4122 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4123 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4124 } else { 4125 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4126 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4127 } 4128 4129 /* 4130 * Opt out of the vblank disable timer on everything except gen2. 4131 * Gen2 doesn't have a hardware frame counter and so depends on 4132 * vblank interrupts to produce sane vblank seuquence numbers. 4133 */ 4134 if (!IS_GEN2(dev_priv)) 4135 dev->vblank_disable_immediate = true; 4136 4137 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4138 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4139 4140 if (IS_CHERRYVIEW(dev_priv)) { 4141 dev->driver->irq_handler = cherryview_irq_handler; 4142 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4143 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4144 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4145 dev->driver->enable_vblank = valleyview_enable_vblank; 4146 dev->driver->disable_vblank = valleyview_disable_vblank; 4147 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4148 } else if (IS_VALLEYVIEW(dev_priv)) { 4149 dev->driver->irq_handler = valleyview_irq_handler; 4150 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4151 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4152 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4153 dev->driver->enable_vblank = valleyview_enable_vblank; 4154 dev->driver->disable_vblank = valleyview_disable_vblank; 4155 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4156 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4157 dev->driver->irq_handler = gen8_irq_handler; 4158 dev->driver->irq_preinstall = gen8_irq_reset; 4159 dev->driver->irq_postinstall = gen8_irq_postinstall; 4160 dev->driver->irq_uninstall = gen8_irq_uninstall; 4161 dev->driver->enable_vblank = gen8_enable_vblank; 4162 dev->driver->disable_vblank = gen8_disable_vblank; 4163 if (HAS_PCH_SPLIT(dev)) 4164 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4165 else 4166 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4167 } else if (HAS_PCH_SPLIT(dev)) { 4168 dev->driver->irq_handler = ironlake_irq_handler; 4169 dev->driver->irq_preinstall = ironlake_irq_reset; 4170 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4171 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4172 dev->driver->enable_vblank = ironlake_enable_vblank; 4173 dev->driver->disable_vblank = ironlake_disable_vblank; 4174 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4175 } else { 4176 if (INTEL_INFO(dev_priv)->gen == 2) { 4177 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4178 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4179 dev->driver->irq_handler = i8xx_irq_handler; 4180 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4181 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4182 dev->driver->irq_preinstall = i915_irq_preinstall; 4183 dev->driver->irq_postinstall = i915_irq_postinstall; 4184 dev->driver->irq_uninstall = i915_irq_uninstall; 4185 dev->driver->irq_handler = i915_irq_handler; 4186 } else { 4187 dev->driver->irq_preinstall = i965_irq_preinstall; 4188 dev->driver->irq_postinstall = i965_irq_postinstall; 4189 dev->driver->irq_uninstall = i965_irq_uninstall; 4190 dev->driver->irq_handler = i965_irq_handler; 4191 } 4192 if (I915_HAS_HOTPLUG(dev_priv)) 4193 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4194 dev->driver->enable_vblank = i915_enable_vblank; 4195 dev->driver->disable_vblank = i915_disable_vblank; 4196 } 4197 } 4198 4199 /** 4200 * intel_irq_install - enables the hardware interrupt 4201 * @dev_priv: i915 device instance 4202 * 4203 * This function enables the hardware interrupt handling, but leaves the hotplug 4204 * handling still disabled. It is called after intel_irq_init(). 4205 * 4206 * In the driver load and resume code we need working interrupts in a few places 4207 * but don't want to deal with the hassle of concurrent probe and hotplug 4208 * workers. Hence the split into this two-stage approach. 4209 */ 4210 int intel_irq_install(struct drm_i915_private *dev_priv) 4211 { 4212 /* 4213 * We enable some interrupt sources in our postinstall hooks, so mark 4214 * interrupts as enabled _before_ actually enabling them to avoid 4215 * special cases in our ordering checks. 4216 */ 4217 dev_priv->pm.irqs_enabled = true; 4218 4219 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4220 } 4221 4222 /** 4223 * intel_irq_uninstall - finilizes all irq handling 4224 * @dev_priv: i915 device instance 4225 * 4226 * This stops interrupt and hotplug handling and unregisters and frees all 4227 * resources acquired in the init functions. 4228 */ 4229 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4230 { 4231 drm_irq_uninstall(dev_priv->dev); 4232 intel_hpd_cancel_work(dev_priv); 4233 dev_priv->pm.irqs_enabled = false; 4234 } 4235 4236 /** 4237 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4238 * @dev_priv: i915 device instance 4239 * 4240 * This function is used to disable interrupts at runtime, both in the runtime 4241 * pm and the system suspend/resume code. 4242 */ 4243 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4244 { 4245 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4246 dev_priv->pm.irqs_enabled = false; 4247 #if 0 4248 synchronize_irq(dev_priv->dev->irq); 4249 #endif 4250 } 4251 4252 /** 4253 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4254 * @dev_priv: i915 device instance 4255 * 4256 * This function is used to enable interrupts at runtime, both in the runtime 4257 * pm and the system suspend/resume code. 4258 */ 4259 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4260 { 4261 dev_priv->pm.irqs_enabled = true; 4262 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4263 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4264 } 4265