1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 /** 36 * DOC: interrupt handling 37 * 38 * These functions provide the basic support for enabling and disabling the 39 * interrupt handling support. There's a lot more functionality in i915_irq.c 40 * and related files, but that will be described in separate chapters. 41 */ 42 43 static const u32 hpd_ibx[HPD_NUM_PINS] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 49 }; 50 51 static const u32 hpd_cpt[HPD_NUM_PINS] = { 52 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 53 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 54 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 55 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 56 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 57 }; 58 59 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 60 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 61 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 62 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 63 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 64 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 65 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 66 }; 67 68 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 69 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 70 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 71 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 72 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 73 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 74 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 75 }; 76 77 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */ 78 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 79 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 80 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 81 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 82 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 83 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 84 }; 85 86 /* IIR can theoretically queue up two events. Be paranoid. */ 87 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 88 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 89 POSTING_READ(GEN8_##type##_IMR(which)); \ 90 I915_WRITE(GEN8_##type##_IER(which), 0); \ 91 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 92 POSTING_READ(GEN8_##type##_IIR(which)); \ 93 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 94 POSTING_READ(GEN8_##type##_IIR(which)); \ 95 } while (0) 96 97 #define GEN5_IRQ_RESET(type) do { \ 98 I915_WRITE(type##IMR, 0xffffffff); \ 99 POSTING_READ(type##IMR); \ 100 I915_WRITE(type##IER, 0); \ 101 I915_WRITE(type##IIR, 0xffffffff); \ 102 POSTING_READ(type##IIR); \ 103 I915_WRITE(type##IIR, 0xffffffff); \ 104 POSTING_READ(type##IIR); \ 105 } while (0) 106 107 /* 108 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 109 */ 110 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 111 u32 val = I915_READ(reg); \ 112 if (val) { \ 113 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 114 (reg), val); \ 115 I915_WRITE((reg), 0xffffffff); \ 116 POSTING_READ(reg); \ 117 I915_WRITE((reg), 0xffffffff); \ 118 POSTING_READ(reg); \ 119 } \ 120 } while (0) 121 122 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 123 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 124 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 125 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 126 POSTING_READ(GEN8_##type##_IMR(which)); \ 127 } while (0) 128 129 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 130 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 131 I915_WRITE(type##IER, (ier_val)); \ 132 I915_WRITE(type##IMR, (imr_val)); \ 133 POSTING_READ(type##IMR); \ 134 } while (0) 135 136 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 137 138 /* For display hotplug interrupt */ 139 void 140 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 141 { 142 assert_spin_locked(&dev_priv->irq_lock); 143 144 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 145 return; 146 147 if ((dev_priv->irq_mask & mask) != 0) { 148 dev_priv->irq_mask &= ~mask; 149 I915_WRITE(DEIMR, dev_priv->irq_mask); 150 POSTING_READ(DEIMR); 151 } 152 } 153 154 void 155 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 156 { 157 assert_spin_locked(&dev_priv->irq_lock); 158 159 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 160 return; 161 162 if ((dev_priv->irq_mask & mask) != mask) { 163 dev_priv->irq_mask |= mask; 164 I915_WRITE(DEIMR, dev_priv->irq_mask); 165 POSTING_READ(DEIMR); 166 } 167 } 168 169 /** 170 * ilk_update_gt_irq - update GTIMR 171 * @dev_priv: driver private 172 * @interrupt_mask: mask of interrupt bits to update 173 * @enabled_irq_mask: mask of interrupt bits to enable 174 */ 175 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 176 uint32_t interrupt_mask, 177 uint32_t enabled_irq_mask) 178 { 179 assert_spin_locked(&dev_priv->irq_lock); 180 181 WARN_ON(enabled_irq_mask & ~interrupt_mask); 182 183 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 184 return; 185 186 dev_priv->gt_irq_mask &= ~interrupt_mask; 187 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 188 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 189 POSTING_READ(GTIMR); 190 } 191 192 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 193 { 194 ilk_update_gt_irq(dev_priv, mask, mask); 195 } 196 197 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 198 { 199 ilk_update_gt_irq(dev_priv, mask, 0); 200 } 201 202 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) 203 { 204 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 205 } 206 207 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) 208 { 209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 210 } 211 212 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) 213 { 214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 215 } 216 217 /** 218 * snb_update_pm_irq - update GEN6_PMIMR 219 * @dev_priv: driver private 220 * @interrupt_mask: mask of interrupt bits to update 221 * @enabled_irq_mask: mask of interrupt bits to enable 222 */ 223 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 224 uint32_t interrupt_mask, 225 uint32_t enabled_irq_mask) 226 { 227 uint32_t new_val; 228 229 WARN_ON(enabled_irq_mask & ~interrupt_mask); 230 231 assert_spin_locked(&dev_priv->irq_lock); 232 233 new_val = dev_priv->pm_irq_mask; 234 new_val &= ~interrupt_mask; 235 new_val |= (~enabled_irq_mask & interrupt_mask); 236 237 if (new_val != dev_priv->pm_irq_mask) { 238 dev_priv->pm_irq_mask = new_val; 239 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 240 POSTING_READ(gen6_pm_imr(dev_priv)); 241 } 242 } 243 244 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 245 { 246 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 247 return; 248 249 snb_update_pm_irq(dev_priv, mask, mask); 250 } 251 252 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 253 uint32_t mask) 254 { 255 snb_update_pm_irq(dev_priv, mask, 0); 256 } 257 258 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 259 { 260 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 261 return; 262 263 __gen6_disable_pm_irq(dev_priv, mask); 264 } 265 266 void gen6_reset_rps_interrupts(struct drm_device *dev) 267 { 268 struct drm_i915_private *dev_priv = dev->dev_private; 269 uint32_t reg = gen6_pm_iir(dev_priv); 270 271 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 272 I915_WRITE(reg, dev_priv->pm_rps_events); 273 I915_WRITE(reg, dev_priv->pm_rps_events); 274 POSTING_READ(reg); 275 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 276 } 277 278 void gen6_enable_rps_interrupts(struct drm_device *dev) 279 { 280 struct drm_i915_private *dev_priv = dev->dev_private; 281 282 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 283 284 WARN_ON(dev_priv->rps.pm_iir); 285 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 286 dev_priv->rps.interrupts_enabled = true; 287 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 288 dev_priv->pm_rps_events); 289 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 290 291 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 292 } 293 294 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 295 { 296 /* 297 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 298 * if GEN6_PM_UP_EI_EXPIRED is masked. 299 * 300 * TODO: verify if this can be reproduced on VLV,CHV. 301 */ 302 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 303 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 304 305 if (INTEL_INFO(dev_priv)->gen >= 8) 306 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 307 308 return mask; 309 } 310 311 void gen6_disable_rps_interrupts(struct drm_device *dev) 312 { 313 struct drm_i915_private *dev_priv = dev->dev_private; 314 315 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 316 dev_priv->rps.interrupts_enabled = false; 317 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 318 319 cancel_work_sync(&dev_priv->rps.work); 320 321 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 322 323 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 324 325 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 326 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 327 ~dev_priv->pm_rps_events); 328 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 329 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); 330 331 dev_priv->rps.pm_iir = 0; 332 333 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 334 } 335 336 /** 337 * ibx_display_interrupt_update - update SDEIMR 338 * @dev_priv: driver private 339 * @interrupt_mask: mask of interrupt bits to update 340 * @enabled_irq_mask: mask of interrupt bits to enable 341 */ 342 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 343 uint32_t interrupt_mask, 344 uint32_t enabled_irq_mask) 345 { 346 uint32_t sdeimr = I915_READ(SDEIMR); 347 sdeimr &= ~interrupt_mask; 348 sdeimr |= (~enabled_irq_mask & interrupt_mask); 349 350 WARN_ON(enabled_irq_mask & ~interrupt_mask); 351 352 assert_spin_locked(&dev_priv->irq_lock); 353 354 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 355 return; 356 357 I915_WRITE(SDEIMR, sdeimr); 358 POSTING_READ(SDEIMR); 359 } 360 361 static void 362 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 363 u32 enable_mask, u32 status_mask) 364 { 365 u32 reg = PIPESTAT(pipe); 366 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 367 368 assert_spin_locked(&dev_priv->irq_lock); 369 WARN_ON(!intel_irqs_enabled(dev_priv)); 370 371 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 372 status_mask & ~PIPESTAT_INT_STATUS_MASK, 373 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 374 pipe_name(pipe), enable_mask, status_mask)) 375 return; 376 377 if ((pipestat & enable_mask) == enable_mask) 378 return; 379 380 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 381 382 /* Enable the interrupt, clear any pending status */ 383 pipestat |= enable_mask | status_mask; 384 I915_WRITE(reg, pipestat); 385 POSTING_READ(reg); 386 } 387 388 static void 389 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 390 u32 enable_mask, u32 status_mask) 391 { 392 u32 reg = PIPESTAT(pipe); 393 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 394 395 assert_spin_locked(&dev_priv->irq_lock); 396 WARN_ON(!intel_irqs_enabled(dev_priv)); 397 398 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 399 status_mask & ~PIPESTAT_INT_STATUS_MASK, 400 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 401 pipe_name(pipe), enable_mask, status_mask)) 402 return; 403 404 if ((pipestat & enable_mask) == 0) 405 return; 406 407 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 408 409 pipestat &= ~enable_mask; 410 I915_WRITE(reg, pipestat); 411 POSTING_READ(reg); 412 } 413 414 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 415 { 416 u32 enable_mask = status_mask << 16; 417 418 /* 419 * On pipe A we don't support the PSR interrupt yet, 420 * on pipe B and C the same bit MBZ. 421 */ 422 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 423 return 0; 424 /* 425 * On pipe B and C we don't support the PSR interrupt yet, on pipe 426 * A the same bit is for perf counters which we don't use either. 427 */ 428 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 429 return 0; 430 431 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 432 SPRITE0_FLIP_DONE_INT_EN_VLV | 433 SPRITE1_FLIP_DONE_INT_EN_VLV); 434 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 435 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 436 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 437 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 438 439 return enable_mask; 440 } 441 442 void 443 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 444 u32 status_mask) 445 { 446 u32 enable_mask; 447 448 if (IS_VALLEYVIEW(dev_priv->dev)) 449 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 450 status_mask); 451 else 452 enable_mask = status_mask << 16; 453 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 454 } 455 456 void 457 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 458 u32 status_mask) 459 { 460 u32 enable_mask; 461 462 if (IS_VALLEYVIEW(dev_priv->dev)) 463 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 464 status_mask); 465 else 466 enable_mask = status_mask << 16; 467 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 468 } 469 470 /** 471 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 472 */ 473 static void i915_enable_asle_pipestat(struct drm_device *dev) 474 { 475 struct drm_i915_private *dev_priv = dev->dev_private; 476 477 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 478 return; 479 480 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 481 482 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 483 if (INTEL_INFO(dev)->gen >= 4) 484 i915_enable_pipestat(dev_priv, PIPE_A, 485 PIPE_LEGACY_BLC_EVENT_STATUS); 486 487 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 488 } 489 490 /** 491 * i915_pipe_enabled - check if a pipe is enabled 492 * @dev: DRM device 493 * @pipe: pipe to check 494 * 495 * Reading certain registers when the pipe is disabled can hang the chip. 496 * Use this routine to make sure the PLL is running and the pipe is active 497 * before reading such registers if unsure. 498 */ 499 static int 500 i915_pipe_enabled(struct drm_device *dev, int pipe) 501 { 502 struct drm_i915_private *dev_priv = dev->dev_private; 503 504 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 505 /* Locking is horribly broken here, but whatever. */ 506 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 507 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 508 509 return intel_crtc->active; 510 } else { 511 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 512 } 513 } 514 515 /* 516 * This timing diagram depicts the video signal in and 517 * around the vertical blanking period. 518 * 519 * Assumptions about the fictitious mode used in this example: 520 * vblank_start >= 3 521 * vsync_start = vblank_start + 1 522 * vsync_end = vblank_start + 2 523 * vtotal = vblank_start + 3 524 * 525 * start of vblank: 526 * latch double buffered registers 527 * increment frame counter (ctg+) 528 * generate start of vblank interrupt (gen4+) 529 * | 530 * | frame start: 531 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 532 * | may be shifted forward 1-3 extra lines via PIPECONF 533 * | | 534 * | | start of vsync: 535 * | | generate vsync interrupt 536 * | | | 537 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 538 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 539 * ----va---> <-----------------vb--------------------> <--------va------------- 540 * | | <----vs-----> | 541 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 542 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 543 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 544 * | | | 545 * last visible pixel first visible pixel 546 * | increment frame counter (gen3/4) 547 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 548 * 549 * x = horizontal active 550 * _ = horizontal blanking 551 * hs = horizontal sync 552 * va = vertical active 553 * vb = vertical blanking 554 * vs = vertical sync 555 * vbs = vblank_start (number) 556 * 557 * Summary: 558 * - most events happen at the start of horizontal sync 559 * - frame start happens at the start of horizontal blank, 1-4 lines 560 * (depending on PIPECONF settings) after the start of vblank 561 * - gen3/4 pixel and frame counter are synchronized with the start 562 * of horizontal active on the first line of vertical active 563 */ 564 565 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 566 { 567 /* Gen2 doesn't have a hardware frame counter */ 568 return 0; 569 } 570 571 /* Called from drm generic code, passed a 'crtc', which 572 * we use as a pipe index 573 */ 574 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 575 { 576 struct drm_i915_private *dev_priv = dev->dev_private; 577 unsigned long high_frame; 578 unsigned long low_frame; 579 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 580 581 if (!i915_pipe_enabled(dev, pipe)) { 582 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 583 "pipe %c\n", pipe_name(pipe)); 584 return 0; 585 } 586 587 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 588 struct intel_crtc *intel_crtc = 589 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 590 const struct drm_display_mode *mode = 591 &intel_crtc->config->base.adjusted_mode; 592 593 htotal = mode->crtc_htotal; 594 hsync_start = mode->crtc_hsync_start; 595 vbl_start = mode->crtc_vblank_start; 596 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 597 vbl_start = DIV_ROUND_UP(vbl_start, 2); 598 } else { 599 enum transcoder cpu_transcoder = (enum transcoder) pipe; 600 601 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 602 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 603 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 604 if ((I915_READ(PIPECONF(cpu_transcoder)) & 605 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 606 vbl_start = DIV_ROUND_UP(vbl_start, 2); 607 } 608 609 /* Convert to pixel count */ 610 vbl_start *= htotal; 611 612 /* Start of vblank event occurs at start of hsync */ 613 vbl_start -= htotal - hsync_start; 614 615 high_frame = PIPEFRAME(pipe); 616 low_frame = PIPEFRAMEPIXEL(pipe); 617 618 /* 619 * High & low register fields aren't synchronized, so make sure 620 * we get a low value that's stable across two reads of the high 621 * register. 622 */ 623 do { 624 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 625 low = I915_READ(low_frame); 626 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 627 } while (high1 != high2); 628 629 high1 >>= PIPE_FRAME_HIGH_SHIFT; 630 pixel = low & PIPE_PIXEL_MASK; 631 low >>= PIPE_FRAME_LOW_SHIFT; 632 633 /* 634 * The frame counter increments at beginning of active. 635 * Cook up a vblank counter by also checking the pixel 636 * counter against vblank start. 637 */ 638 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 639 } 640 641 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 642 { 643 struct drm_i915_private *dev_priv = dev->dev_private; 644 int reg = PIPE_FRMCOUNT_GM45(pipe); 645 646 if (!i915_pipe_enabled(dev, pipe)) { 647 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 648 "pipe %c\n", pipe_name(pipe)); 649 return 0; 650 } 651 652 return I915_READ(reg); 653 } 654 655 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 656 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 657 658 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 659 { 660 struct drm_device *dev = crtc->base.dev; 661 struct drm_i915_private *dev_priv = dev->dev_private; 662 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 663 enum i915_pipe pipe = crtc->pipe; 664 int position, vtotal; 665 666 vtotal = mode->crtc_vtotal; 667 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 668 vtotal /= 2; 669 670 if (IS_GEN2(dev)) 671 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 672 else 673 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 674 675 /* 676 * See update_scanline_offset() for the details on the 677 * scanline_offset adjustment. 678 */ 679 return (position + crtc->scanline_offset) % vtotal; 680 } 681 682 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 683 unsigned int flags, int *vpos, int *hpos, 684 ktime_t *stime, ktime_t *etime) 685 { 686 struct drm_i915_private *dev_priv = dev->dev_private; 687 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 689 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 690 int position; 691 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 692 bool in_vbl = true; 693 int ret = 0; 694 695 if (!intel_crtc->active) { 696 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 697 "pipe %c\n", pipe_name(pipe)); 698 return 0; 699 } 700 701 htotal = mode->crtc_htotal; 702 hsync_start = mode->crtc_hsync_start; 703 vtotal = mode->crtc_vtotal; 704 vbl_start = mode->crtc_vblank_start; 705 vbl_end = mode->crtc_vblank_end; 706 707 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 708 vbl_start = DIV_ROUND_UP(vbl_start, 2); 709 vbl_end /= 2; 710 vtotal /= 2; 711 } 712 713 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 714 715 /* 716 * Lock uncore.lock, as we will do multiple timing critical raw 717 * register reads, potentially with preemption disabled, so the 718 * following code must not block on uncore.lock. 719 */ 720 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 721 722 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 723 724 /* Get optional system timestamp before query. */ 725 if (stime) 726 *stime = ktime_get(); 727 728 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 729 /* No obvious pixelcount register. Only query vertical 730 * scanout position from Display scan line register. 731 */ 732 position = __intel_get_crtc_scanline(intel_crtc); 733 } else { 734 /* Have access to pixelcount since start of frame. 735 * We can split this into vertical and horizontal 736 * scanout position. 737 */ 738 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 739 740 /* convert to pixel counts */ 741 vbl_start *= htotal; 742 vbl_end *= htotal; 743 vtotal *= htotal; 744 745 /* 746 * In interlaced modes, the pixel counter counts all pixels, 747 * so one field will have htotal more pixels. In order to avoid 748 * the reported position from jumping backwards when the pixel 749 * counter is beyond the length of the shorter field, just 750 * clamp the position the length of the shorter field. This 751 * matches how the scanline counter based position works since 752 * the scanline counter doesn't count the two half lines. 753 */ 754 if (position >= vtotal) 755 position = vtotal - 1; 756 757 /* 758 * Start of vblank interrupt is triggered at start of hsync, 759 * just prior to the first active line of vblank. However we 760 * consider lines to start at the leading edge of horizontal 761 * active. So, should we get here before we've crossed into 762 * the horizontal active of the first line in vblank, we would 763 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 764 * always add htotal-hsync_start to the current pixel position. 765 */ 766 position = (position + htotal - hsync_start) % vtotal; 767 } 768 769 /* Get optional system timestamp after query. */ 770 if (etime) 771 *etime = ktime_get(); 772 773 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 774 775 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 776 777 in_vbl = position >= vbl_start && position < vbl_end; 778 779 /* 780 * While in vblank, position will be negative 781 * counting up towards 0 at vbl_end. And outside 782 * vblank, position will be positive counting 783 * up since vbl_end. 784 */ 785 if (position >= vbl_start) 786 position -= vbl_end; 787 else 788 position += vtotal - vbl_end; 789 790 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 791 *vpos = position; 792 *hpos = 0; 793 } else { 794 *vpos = position / htotal; 795 *hpos = position - (*vpos * htotal); 796 } 797 798 /* In vblank? */ 799 if (in_vbl) 800 ret |= DRM_SCANOUTPOS_IN_VBLANK; 801 802 return ret; 803 } 804 805 int intel_get_crtc_scanline(struct intel_crtc *crtc) 806 { 807 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 808 int position; 809 810 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 811 position = __intel_get_crtc_scanline(crtc); 812 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 813 814 return position; 815 } 816 817 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 818 int *max_error, 819 struct timeval *vblank_time, 820 unsigned flags) 821 { 822 struct drm_crtc *crtc; 823 824 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 825 DRM_ERROR("Invalid crtc %d\n", pipe); 826 return -EINVAL; 827 } 828 829 /* Get drm_crtc to timestamp: */ 830 crtc = intel_get_crtc_for_pipe(dev, pipe); 831 if (crtc == NULL) { 832 DRM_ERROR("Invalid crtc %d\n", pipe); 833 return -EINVAL; 834 } 835 836 if (!crtc->enabled) { 837 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 838 return -EBUSY; 839 } 840 841 /* Helper routine in DRM core does all the work: */ 842 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 843 vblank_time, flags, 844 crtc, 845 &to_intel_crtc(crtc)->config->base.adjusted_mode); 846 } 847 848 static bool intel_hpd_irq_event(struct drm_device *dev, 849 struct drm_connector *connector) 850 { 851 enum drm_connector_status old_status; 852 853 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 854 old_status = connector->status; 855 856 connector->status = connector->funcs->detect(connector, false); 857 if (old_status == connector->status) 858 return false; 859 860 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 861 connector->base.id, 862 connector->name, 863 drm_get_connector_status_name(old_status), 864 drm_get_connector_status_name(connector->status)); 865 866 return true; 867 } 868 869 static void i915_digport_work_func(struct work_struct *work) 870 { 871 struct drm_i915_private *dev_priv = 872 container_of(work, struct drm_i915_private, dig_port_work); 873 u32 long_port_mask, short_port_mask; 874 struct intel_digital_port *intel_dig_port; 875 int i, ret; 876 u32 old_bits = 0; 877 878 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 879 long_port_mask = dev_priv->long_hpd_port_mask; 880 dev_priv->long_hpd_port_mask = 0; 881 short_port_mask = dev_priv->short_hpd_port_mask; 882 dev_priv->short_hpd_port_mask = 0; 883 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 884 885 for (i = 0; i < I915_MAX_PORTS; i++) { 886 bool valid = false; 887 bool long_hpd = false; 888 intel_dig_port = dev_priv->hpd_irq_port[i]; 889 if (!intel_dig_port || !intel_dig_port->hpd_pulse) 890 continue; 891 892 if (long_port_mask & (1 << i)) { 893 valid = true; 894 long_hpd = true; 895 } else if (short_port_mask & (1 << i)) 896 valid = true; 897 898 if (valid) { 899 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); 900 if (ret == true) { 901 /* if we get true fallback to old school hpd */ 902 old_bits |= (1 << intel_dig_port->base.hpd_pin); 903 } 904 } 905 } 906 907 if (old_bits) { 908 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 909 dev_priv->hpd_event_bits |= old_bits; 910 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 911 schedule_work(&dev_priv->hotplug_work); 912 } 913 } 914 915 /* 916 * Handle hotplug events outside the interrupt handler proper. 917 */ 918 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 919 920 static void i915_hotplug_work_func(struct work_struct *work) 921 { 922 struct drm_i915_private *dev_priv = 923 container_of(work, struct drm_i915_private, hotplug_work); 924 struct drm_device *dev = dev_priv->dev; 925 struct drm_mode_config *mode_config = &dev->mode_config; 926 struct intel_connector *intel_connector; 927 struct intel_encoder *intel_encoder; 928 struct drm_connector *connector; 929 bool hpd_disabled = false; 930 bool changed = false; 931 u32 hpd_event_bits; 932 933 mutex_lock(&mode_config->mutex); 934 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 935 936 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 937 938 hpd_event_bits = dev_priv->hpd_event_bits; 939 dev_priv->hpd_event_bits = 0; 940 list_for_each_entry(connector, &mode_config->connector_list, head) { 941 intel_connector = to_intel_connector(connector); 942 if (!intel_connector->encoder) 943 continue; 944 intel_encoder = intel_connector->encoder; 945 if (intel_encoder->hpd_pin > HPD_NONE && 946 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 947 connector->polled == DRM_CONNECTOR_POLL_HPD) { 948 DRM_INFO("HPD interrupt storm detected on connector %s: " 949 "switching from hotplug detection to polling\n", 950 connector->name); 951 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 952 connector->polled = DRM_CONNECTOR_POLL_CONNECT 953 | DRM_CONNECTOR_POLL_DISCONNECT; 954 hpd_disabled = true; 955 } 956 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 957 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 958 connector->name, intel_encoder->hpd_pin); 959 } 960 } 961 /* if there were no outputs to poll, poll was disabled, 962 * therefore make sure it's enabled when disabling HPD on 963 * some connectors */ 964 if (hpd_disabled) { 965 drm_kms_helper_poll_enable(dev); 966 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, 967 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 968 } 969 970 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 971 972 list_for_each_entry(connector, &mode_config->connector_list, head) { 973 intel_connector = to_intel_connector(connector); 974 if (!intel_connector->encoder) 975 continue; 976 intel_encoder = intel_connector->encoder; 977 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 978 if (intel_encoder->hot_plug) 979 intel_encoder->hot_plug(intel_encoder); 980 if (intel_hpd_irq_event(dev, connector)) 981 changed = true; 982 } 983 } 984 mutex_unlock(&mode_config->mutex); 985 986 if (changed) 987 drm_kms_helper_hotplug_event(dev); 988 } 989 990 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 991 { 992 struct drm_i915_private *dev_priv = dev->dev_private; 993 u32 busy_up, busy_down, max_avg, min_avg; 994 u8 new_delay; 995 996 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 997 998 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 999 1000 new_delay = dev_priv->ips.cur_delay; 1001 1002 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1003 busy_up = I915_READ(RCPREVBSYTUPAVG); 1004 busy_down = I915_READ(RCPREVBSYTDNAVG); 1005 max_avg = I915_READ(RCBMAXAVG); 1006 min_avg = I915_READ(RCBMINAVG); 1007 1008 /* Handle RCS change request from hw */ 1009 if (busy_up > max_avg) { 1010 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1011 new_delay = dev_priv->ips.cur_delay - 1; 1012 if (new_delay < dev_priv->ips.max_delay) 1013 new_delay = dev_priv->ips.max_delay; 1014 } else if (busy_down < min_avg) { 1015 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1016 new_delay = dev_priv->ips.cur_delay + 1; 1017 if (new_delay > dev_priv->ips.min_delay) 1018 new_delay = dev_priv->ips.min_delay; 1019 } 1020 1021 if (ironlake_set_drps(dev, new_delay)) 1022 dev_priv->ips.cur_delay = new_delay; 1023 1024 lockmgr(&mchdev_lock, LK_RELEASE); 1025 1026 return; 1027 } 1028 1029 static void notify_ring(struct drm_device *dev, 1030 struct intel_engine_cs *ring) 1031 { 1032 if (!intel_ring_initialized(ring)) 1033 return; 1034 1035 trace_i915_gem_request_notify(ring); 1036 1037 wake_up_all(&ring->irq_queue); 1038 } 1039 1040 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1041 struct intel_rps_ei *rps_ei) 1042 { 1043 u32 cz_ts, cz_freq_khz; 1044 u32 render_count, media_count; 1045 u32 elapsed_render, elapsed_media, elapsed_time; 1046 u32 residency = 0; 1047 1048 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1049 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); 1050 1051 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); 1052 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); 1053 1054 if (rps_ei->cz_clock == 0) { 1055 rps_ei->cz_clock = cz_ts; 1056 rps_ei->render_c0 = render_count; 1057 rps_ei->media_c0 = media_count; 1058 1059 return dev_priv->rps.cur_freq; 1060 } 1061 1062 elapsed_time = cz_ts - rps_ei->cz_clock; 1063 rps_ei->cz_clock = cz_ts; 1064 1065 elapsed_render = render_count - rps_ei->render_c0; 1066 rps_ei->render_c0 = render_count; 1067 1068 elapsed_media = media_count - rps_ei->media_c0; 1069 rps_ei->media_c0 = media_count; 1070 1071 /* Convert all the counters into common unit of milli sec */ 1072 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; 1073 elapsed_render /= cz_freq_khz; 1074 elapsed_media /= cz_freq_khz; 1075 1076 /* 1077 * Calculate overall C0 residency percentage 1078 * only if elapsed time is non zero 1079 */ 1080 if (elapsed_time) { 1081 residency = 1082 ((max(elapsed_render, elapsed_media) * 100) 1083 / elapsed_time); 1084 } 1085 1086 return residency; 1087 } 1088 1089 /** 1090 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU 1091 * busy-ness calculated from C0 counters of render & media power wells 1092 * @dev_priv: DRM device private 1093 * 1094 */ 1095 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) 1096 { 1097 u32 residency_C0_up = 0, residency_C0_down = 0; 1098 int new_delay, adj; 1099 1100 dev_priv->rps.ei_interrupt_count++; 1101 1102 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 1103 1104 1105 if (dev_priv->rps.up_ei.cz_clock == 0) { 1106 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); 1107 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); 1108 return dev_priv->rps.cur_freq; 1109 } 1110 1111 1112 /* 1113 * To down throttle, C0 residency should be less than down threshold 1114 * for continous EI intervals. So calculate down EI counters 1115 * once in VLV_INT_COUNT_FOR_DOWN_EI 1116 */ 1117 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { 1118 1119 dev_priv->rps.ei_interrupt_count = 0; 1120 1121 residency_C0_down = vlv_c0_residency(dev_priv, 1122 &dev_priv->rps.down_ei); 1123 } else { 1124 residency_C0_up = vlv_c0_residency(dev_priv, 1125 &dev_priv->rps.up_ei); 1126 } 1127 1128 new_delay = dev_priv->rps.cur_freq; 1129 1130 adj = dev_priv->rps.last_adj; 1131 /* C0 residency is greater than UP threshold. Increase Frequency */ 1132 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { 1133 if (adj > 0) 1134 adj *= 2; 1135 else 1136 adj = 1; 1137 1138 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) 1139 new_delay = dev_priv->rps.cur_freq + adj; 1140 1141 /* 1142 * For better performance, jump directly 1143 * to RPe if we're below it. 1144 */ 1145 if (new_delay < dev_priv->rps.efficient_freq) 1146 new_delay = dev_priv->rps.efficient_freq; 1147 1148 } else if (!dev_priv->rps.ei_interrupt_count && 1149 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { 1150 if (adj < 0) 1151 adj *= 2; 1152 else 1153 adj = -1; 1154 /* 1155 * This means, C0 residency is less than down threshold over 1156 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq 1157 */ 1158 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) 1159 new_delay = dev_priv->rps.cur_freq + adj; 1160 } 1161 1162 return new_delay; 1163 } 1164 1165 static void gen6_pm_rps_work(struct work_struct *work) 1166 { 1167 struct drm_i915_private *dev_priv = 1168 container_of(work, struct drm_i915_private, rps.work); 1169 u32 pm_iir; 1170 int new_delay, adj; 1171 1172 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1173 /* Speed up work cancelation during disabling rps interrupts. */ 1174 if (!dev_priv->rps.interrupts_enabled) { 1175 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1176 return; 1177 } 1178 pm_iir = dev_priv->rps.pm_iir; 1179 dev_priv->rps.pm_iir = 0; 1180 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1181 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1182 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1183 1184 /* Make sure we didn't queue anything we're not going to process. */ 1185 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1186 1187 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1188 return; 1189 1190 mutex_lock(&dev_priv->rps.hw_lock); 1191 1192 adj = dev_priv->rps.last_adj; 1193 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1194 if (adj > 0) 1195 adj *= 2; 1196 else { 1197 /* CHV needs even encode values */ 1198 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; 1199 } 1200 new_delay = dev_priv->rps.cur_freq + adj; 1201 1202 /* 1203 * For better performance, jump directly 1204 * to RPe if we're below it. 1205 */ 1206 if (new_delay < dev_priv->rps.efficient_freq) 1207 new_delay = dev_priv->rps.efficient_freq; 1208 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1209 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1210 new_delay = dev_priv->rps.efficient_freq; 1211 else 1212 new_delay = dev_priv->rps.min_freq_softlimit; 1213 adj = 0; 1214 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1215 new_delay = vlv_calc_delay_from_C0_counters(dev_priv); 1216 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1217 if (adj < 0) 1218 adj *= 2; 1219 else { 1220 /* CHV needs even encode values */ 1221 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; 1222 } 1223 new_delay = dev_priv->rps.cur_freq + adj; 1224 } else { /* unknown event */ 1225 new_delay = dev_priv->rps.cur_freq; 1226 } 1227 1228 /* sysfs frequency interfaces may have snuck in while servicing the 1229 * interrupt 1230 */ 1231 new_delay = clamp_t(int, new_delay, 1232 dev_priv->rps.min_freq_softlimit, 1233 dev_priv->rps.max_freq_softlimit); 1234 1235 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1236 1237 if (IS_VALLEYVIEW(dev_priv->dev)) 1238 valleyview_set_rps(dev_priv->dev, new_delay); 1239 else 1240 gen6_set_rps(dev_priv->dev, new_delay); 1241 1242 mutex_unlock(&dev_priv->rps.hw_lock); 1243 } 1244 1245 /** 1246 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1247 * occurred. 1248 * @work: workqueue struct 1249 * 1250 * Doesn't actually do anything except notify userspace. As a consequence of 1251 * this event, userspace should try to remap the bad rows since statistically 1252 * it is likely the same row is more likely to go bad again. 1253 */ 1254 static void ivybridge_parity_work(struct work_struct *work) 1255 { 1256 struct drm_i915_private *dev_priv = 1257 container_of(work, struct drm_i915_private, l3_parity.error_work); 1258 u32 error_status, row, bank, subbank; 1259 char *parity_event[6]; 1260 uint32_t misccpctl; 1261 uint8_t slice = 0; 1262 1263 /* We must turn off DOP level clock gating to access the L3 registers. 1264 * In order to prevent a get/put style interface, acquire struct mutex 1265 * any time we access those registers. 1266 */ 1267 mutex_lock(&dev_priv->dev->struct_mutex); 1268 1269 /* If we've screwed up tracking, just let the interrupt fire again */ 1270 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1271 goto out; 1272 1273 misccpctl = I915_READ(GEN7_MISCCPCTL); 1274 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1275 POSTING_READ(GEN7_MISCCPCTL); 1276 1277 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1278 u32 reg; 1279 1280 slice--; 1281 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1282 break; 1283 1284 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1285 1286 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1287 1288 error_status = I915_READ(reg); 1289 row = GEN7_PARITY_ERROR_ROW(error_status); 1290 bank = GEN7_PARITY_ERROR_BANK(error_status); 1291 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1292 1293 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1294 POSTING_READ(reg); 1295 1296 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1297 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1298 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1299 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1300 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1301 parity_event[5] = NULL; 1302 1303 #if 0 1304 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1305 KOBJ_CHANGE, parity_event); 1306 #endif 1307 1308 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1309 slice, row, bank, subbank); 1310 1311 kfree(parity_event[4]); 1312 kfree(parity_event[3]); 1313 kfree(parity_event[2]); 1314 kfree(parity_event[1]); 1315 } 1316 1317 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1318 1319 out: 1320 WARN_ON(dev_priv->l3_parity.which_slice); 1321 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1322 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1323 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1324 1325 mutex_unlock(&dev_priv->dev->struct_mutex); 1326 } 1327 1328 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1329 { 1330 struct drm_i915_private *dev_priv = dev->dev_private; 1331 1332 if (!HAS_L3_DPF(dev)) 1333 return; 1334 1335 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1336 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1337 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1338 1339 iir &= GT_PARITY_ERROR(dev); 1340 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1341 dev_priv->l3_parity.which_slice |= 1 << 1; 1342 1343 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1344 dev_priv->l3_parity.which_slice |= 1 << 0; 1345 1346 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1347 } 1348 1349 static void ilk_gt_irq_handler(struct drm_device *dev, 1350 struct drm_i915_private *dev_priv, 1351 u32 gt_iir) 1352 { 1353 if (gt_iir & 1354 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1355 notify_ring(dev, &dev_priv->ring[RCS]); 1356 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1357 notify_ring(dev, &dev_priv->ring[VCS]); 1358 } 1359 1360 static void snb_gt_irq_handler(struct drm_device *dev, 1361 struct drm_i915_private *dev_priv, 1362 u32 gt_iir) 1363 { 1364 1365 if (gt_iir & 1366 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1367 notify_ring(dev, &dev_priv->ring[RCS]); 1368 if (gt_iir & GT_BSD_USER_INTERRUPT) 1369 notify_ring(dev, &dev_priv->ring[VCS]); 1370 if (gt_iir & GT_BLT_USER_INTERRUPT) 1371 notify_ring(dev, &dev_priv->ring[BCS]); 1372 1373 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1374 GT_BSD_CS_ERROR_INTERRUPT | 1375 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1376 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1377 1378 if (gt_iir & GT_PARITY_ERROR(dev)) 1379 ivybridge_parity_error_irq_handler(dev, gt_iir); 1380 } 1381 1382 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1383 struct drm_i915_private *dev_priv, 1384 u32 master_ctl) 1385 { 1386 struct intel_engine_cs *ring; 1387 u32 rcs, bcs, vcs; 1388 uint32_t tmp = 0; 1389 1390 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1391 tmp = I915_READ(GEN8_GT_IIR(0)); 1392 if (tmp) { 1393 I915_WRITE(GEN8_GT_IIR(0), tmp); 1394 1395 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1396 ring = &dev_priv->ring[RCS]; 1397 if (rcs & GT_RENDER_USER_INTERRUPT) 1398 notify_ring(dev, ring); 1399 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) 1400 intel_lrc_irq_handler(ring); 1401 1402 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1403 ring = &dev_priv->ring[BCS]; 1404 if (bcs & GT_RENDER_USER_INTERRUPT) 1405 notify_ring(dev, ring); 1406 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) 1407 intel_lrc_irq_handler(ring); 1408 } else 1409 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1410 } 1411 1412 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1413 tmp = I915_READ(GEN8_GT_IIR(1)); 1414 if (tmp) { 1415 I915_WRITE(GEN8_GT_IIR(1), tmp); 1416 1417 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1418 ring = &dev_priv->ring[VCS]; 1419 if (vcs & GT_RENDER_USER_INTERRUPT) 1420 notify_ring(dev, ring); 1421 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1422 intel_lrc_irq_handler(ring); 1423 1424 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1425 ring = &dev_priv->ring[VCS2]; 1426 if (vcs & GT_RENDER_USER_INTERRUPT) 1427 notify_ring(dev, ring); 1428 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1429 intel_lrc_irq_handler(ring); 1430 } else 1431 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1432 } 1433 1434 if (master_ctl & GEN8_GT_PM_IRQ) { 1435 tmp = I915_READ(GEN8_GT_IIR(2)); 1436 if (tmp & dev_priv->pm_rps_events) { 1437 I915_WRITE(GEN8_GT_IIR(2), 1438 tmp & dev_priv->pm_rps_events); 1439 gen6_rps_irq_handler(dev_priv, tmp); 1440 } else 1441 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1442 } 1443 1444 if (master_ctl & GEN8_GT_VECS_IRQ) { 1445 tmp = I915_READ(GEN8_GT_IIR(3)); 1446 if (tmp) { 1447 I915_WRITE(GEN8_GT_IIR(3), tmp); 1448 1449 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1450 ring = &dev_priv->ring[VECS]; 1451 if (vcs & GT_RENDER_USER_INTERRUPT) 1452 notify_ring(dev, ring); 1453 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) 1454 intel_lrc_irq_handler(ring); 1455 } else 1456 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1457 } 1458 1459 } 1460 1461 #define HPD_STORM_DETECT_PERIOD 1000 1462 #define HPD_STORM_THRESHOLD 5 1463 1464 static int pch_port_to_hotplug_shift(enum port port) 1465 { 1466 switch (port) { 1467 case PORT_A: 1468 case PORT_E: 1469 default: 1470 return -1; 1471 case PORT_B: 1472 return 0; 1473 case PORT_C: 1474 return 8; 1475 case PORT_D: 1476 return 16; 1477 } 1478 } 1479 1480 static int i915_port_to_hotplug_shift(enum port port) 1481 { 1482 switch (port) { 1483 case PORT_A: 1484 case PORT_E: 1485 default: 1486 return -1; 1487 case PORT_B: 1488 return 17; 1489 case PORT_C: 1490 return 19; 1491 case PORT_D: 1492 return 21; 1493 } 1494 } 1495 1496 static inline enum port get_port_from_pin(enum hpd_pin pin) 1497 { 1498 switch (pin) { 1499 case HPD_PORT_B: 1500 return PORT_B; 1501 case HPD_PORT_C: 1502 return PORT_C; 1503 case HPD_PORT_D: 1504 return PORT_D; 1505 default: 1506 return PORT_A; /* no hpd */ 1507 } 1508 } 1509 1510 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1511 u32 hotplug_trigger, 1512 u32 dig_hotplug_reg, 1513 const u32 hpd[HPD_NUM_PINS]) 1514 { 1515 struct drm_i915_private *dev_priv = dev->dev_private; 1516 int i; 1517 enum port port; 1518 bool storm_detected = false; 1519 bool queue_dig = false, queue_hp = false; 1520 u32 dig_shift; 1521 u32 dig_port_mask = 0; 1522 1523 if (!hotplug_trigger) 1524 return; 1525 1526 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", 1527 hotplug_trigger, dig_hotplug_reg); 1528 1529 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1530 for (i = 1; i < HPD_NUM_PINS; i++) { 1531 if (!(hpd[i] & hotplug_trigger)) 1532 continue; 1533 1534 port = get_port_from_pin(i); 1535 if (port && dev_priv->hpd_irq_port[port]) { 1536 bool long_hpd; 1537 1538 if (HAS_PCH_SPLIT(dev)) { 1539 dig_shift = pch_port_to_hotplug_shift(port); 1540 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1541 } else { 1542 dig_shift = i915_port_to_hotplug_shift(port); 1543 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1544 } 1545 1546 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1547 port_name(port), 1548 long_hpd ? "long" : "short"); 1549 /* for long HPD pulses we want to have the digital queue happen, 1550 but we still want HPD storm detection to function. */ 1551 if (long_hpd) { 1552 dev_priv->long_hpd_port_mask |= (1 << port); 1553 dig_port_mask |= hpd[i]; 1554 } else { 1555 /* for short HPD just trigger the digital queue */ 1556 dev_priv->short_hpd_port_mask |= (1 << port); 1557 hotplug_trigger &= ~hpd[i]; 1558 } 1559 queue_dig = true; 1560 } 1561 } 1562 1563 for (i = 1; i < HPD_NUM_PINS; i++) { 1564 if (hpd[i] & hotplug_trigger && 1565 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1566 /* 1567 * On GMCH platforms the interrupt mask bits only 1568 * prevent irq generation, not the setting of the 1569 * hotplug bits itself. So only WARN about unexpected 1570 * interrupts on saner platforms. 1571 */ 1572 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1573 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1574 hotplug_trigger, i, hpd[i]); 1575 1576 continue; 1577 } 1578 1579 if (!(hpd[i] & hotplug_trigger) || 1580 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1581 continue; 1582 1583 if (!(dig_port_mask & hpd[i])) { 1584 dev_priv->hpd_event_bits |= (1 << i); 1585 queue_hp = true; 1586 } 1587 1588 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1589 dev_priv->hpd_stats[i].hpd_last_jiffies 1590 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1591 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1592 dev_priv->hpd_stats[i].hpd_cnt = 0; 1593 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1594 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1595 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1596 dev_priv->hpd_event_bits &= ~(1 << i); 1597 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1598 storm_detected = true; 1599 } else { 1600 dev_priv->hpd_stats[i].hpd_cnt++; 1601 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1602 dev_priv->hpd_stats[i].hpd_cnt); 1603 } 1604 } 1605 1606 if (storm_detected) 1607 dev_priv->display.hpd_irq_setup(dev); 1608 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1609 1610 /* 1611 * Our hotplug handler can grab modeset locks (by calling down into the 1612 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1613 * queue for otherwise the flush_work in the pageflip code will 1614 * deadlock. 1615 */ 1616 if (queue_dig) 1617 schedule_work(&dev_priv->dig_port_work); 1618 if (queue_hp) 1619 schedule_work(&dev_priv->hotplug_work); 1620 } 1621 1622 static void gmbus_irq_handler(struct drm_device *dev) 1623 { 1624 struct drm_i915_private *dev_priv = dev->dev_private; 1625 1626 wake_up_all(&dev_priv->gmbus_wait_queue); 1627 } 1628 1629 static void dp_aux_irq_handler(struct drm_device *dev) 1630 { 1631 struct drm_i915_private *dev_priv = dev->dev_private; 1632 1633 wake_up_all(&dev_priv->gmbus_wait_queue); 1634 } 1635 1636 #if defined(CONFIG_DEBUG_FS) 1637 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1638 uint32_t crc0, uint32_t crc1, 1639 uint32_t crc2, uint32_t crc3, 1640 uint32_t crc4) 1641 { 1642 struct drm_i915_private *dev_priv = dev->dev_private; 1643 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1644 struct intel_pipe_crc_entry *entry; 1645 int head, tail; 1646 1647 spin_lock(&pipe_crc->lock); 1648 1649 if (!pipe_crc->entries) { 1650 spin_unlock(&pipe_crc->lock); 1651 DRM_DEBUG_KMS("spurious interrupt\n"); 1652 return; 1653 } 1654 1655 head = pipe_crc->head; 1656 tail = pipe_crc->tail; 1657 1658 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1659 spin_unlock(&pipe_crc->lock); 1660 DRM_ERROR("CRC buffer overflowing\n"); 1661 return; 1662 } 1663 1664 entry = &pipe_crc->entries[head]; 1665 1666 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1667 entry->crc[0] = crc0; 1668 entry->crc[1] = crc1; 1669 entry->crc[2] = crc2; 1670 entry->crc[3] = crc3; 1671 entry->crc[4] = crc4; 1672 1673 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1674 pipe_crc->head = head; 1675 1676 spin_unlock(&pipe_crc->lock); 1677 1678 wake_up_interruptible(&pipe_crc->wq); 1679 } 1680 #else 1681 static inline void 1682 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1683 uint32_t crc0, uint32_t crc1, 1684 uint32_t crc2, uint32_t crc3, 1685 uint32_t crc4) {} 1686 #endif 1687 1688 1689 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1690 { 1691 struct drm_i915_private *dev_priv = dev->dev_private; 1692 1693 display_pipe_crc_irq_handler(dev, pipe, 1694 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1695 0, 0, 0, 0); 1696 } 1697 1698 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1699 { 1700 struct drm_i915_private *dev_priv = dev->dev_private; 1701 1702 display_pipe_crc_irq_handler(dev, pipe, 1703 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1704 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1705 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1706 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1707 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1708 } 1709 1710 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1711 { 1712 struct drm_i915_private *dev_priv = dev->dev_private; 1713 uint32_t res1, res2; 1714 1715 if (INTEL_INFO(dev)->gen >= 3) 1716 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1717 else 1718 res1 = 0; 1719 1720 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1721 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1722 else 1723 res2 = 0; 1724 1725 display_pipe_crc_irq_handler(dev, pipe, 1726 I915_READ(PIPE_CRC_RES_RED(pipe)), 1727 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1728 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1729 res1, res2); 1730 } 1731 1732 /* The RPS events need forcewake, so we add them to a work queue and mask their 1733 * IMR bits until the work is done. Other interrupts can be processed without 1734 * the work queue. */ 1735 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1736 { 1737 /* TODO: RPS on GEN9+ is not supported yet. */ 1738 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 1739 "GEN9+: unexpected RPS IRQ\n")) 1740 return; 1741 1742 if (pm_iir & dev_priv->pm_rps_events) { 1743 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1744 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1745 if (dev_priv->rps.interrupts_enabled) { 1746 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1747 queue_work(dev_priv->wq, &dev_priv->rps.work); 1748 } 1749 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1750 } 1751 1752 if (INTEL_INFO(dev_priv)->gen >= 8) 1753 return; 1754 1755 if (HAS_VEBOX(dev_priv->dev)) { 1756 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1757 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1758 1759 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1760 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1761 } 1762 } 1763 1764 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1765 { 1766 if (!drm_handle_vblank(dev, pipe)) 1767 return false; 1768 1769 return true; 1770 } 1771 1772 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1773 { 1774 struct drm_i915_private *dev_priv = dev->dev_private; 1775 u32 pipe_stats[I915_MAX_PIPES] = { }; 1776 int pipe; 1777 1778 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1779 for_each_pipe(dev_priv, pipe) { 1780 int reg; 1781 u32 mask, iir_bit = 0; 1782 1783 /* 1784 * PIPESTAT bits get signalled even when the interrupt is 1785 * disabled with the mask bits, and some of the status bits do 1786 * not generate interrupts at all (like the underrun bit). Hence 1787 * we need to be careful that we only handle what we want to 1788 * handle. 1789 */ 1790 1791 /* fifo underruns are filterered in the underrun handler. */ 1792 mask = PIPE_FIFO_UNDERRUN_STATUS; 1793 1794 switch (pipe) { 1795 case PIPE_A: 1796 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1797 break; 1798 case PIPE_B: 1799 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1800 break; 1801 case PIPE_C: 1802 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1803 break; 1804 } 1805 if (iir & iir_bit) 1806 mask |= dev_priv->pipestat_irq_mask[pipe]; 1807 1808 if (!mask) 1809 continue; 1810 1811 reg = PIPESTAT(pipe); 1812 mask |= PIPESTAT_INT_ENABLE_MASK; 1813 pipe_stats[pipe] = I915_READ(reg) & mask; 1814 1815 /* 1816 * Clear the PIPE*STAT regs before the IIR 1817 */ 1818 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1819 PIPESTAT_INT_STATUS_MASK)) 1820 I915_WRITE(reg, pipe_stats[pipe]); 1821 } 1822 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1823 1824 for_each_pipe(dev_priv, pipe) { 1825 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1826 intel_pipe_handle_vblank(dev, pipe)) 1827 intel_check_page_flip(dev, pipe); 1828 1829 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1830 intel_prepare_page_flip(dev, pipe); 1831 intel_finish_page_flip(dev, pipe); 1832 } 1833 1834 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1835 i9xx_pipe_crc_irq_handler(dev, pipe); 1836 1837 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1838 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1839 } 1840 1841 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1842 gmbus_irq_handler(dev); 1843 } 1844 1845 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1846 { 1847 struct drm_i915_private *dev_priv = dev->dev_private; 1848 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1849 1850 if (hotplug_status) { 1851 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1852 /* 1853 * Make sure hotplug status is cleared before we clear IIR, or else we 1854 * may miss hotplug events. 1855 */ 1856 POSTING_READ(PORT_HOTPLUG_STAT); 1857 1858 if (IS_G4X(dev)) { 1859 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1860 1861 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); 1862 } else { 1863 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1864 1865 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); 1866 } 1867 1868 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1869 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1870 dp_aux_irq_handler(dev); 1871 } 1872 } 1873 1874 static irqreturn_t valleyview_irq_handler(void *arg) 1875 { 1876 struct drm_device *dev = arg; 1877 struct drm_i915_private *dev_priv = dev->dev_private; 1878 u32 iir, gt_iir, pm_iir; 1879 1880 if (!intel_irqs_enabled(dev_priv)) 1881 return IRQ_NONE; 1882 1883 while (true) { 1884 /* Find, clear, then process each source of interrupt */ 1885 1886 gt_iir = I915_READ(GTIIR); 1887 if (gt_iir) 1888 I915_WRITE(GTIIR, gt_iir); 1889 1890 pm_iir = I915_READ(GEN6_PMIIR); 1891 if (pm_iir) 1892 I915_WRITE(GEN6_PMIIR, pm_iir); 1893 1894 iir = I915_READ(VLV_IIR); 1895 if (iir) { 1896 /* Consume port before clearing IIR or we'll miss events */ 1897 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1898 i9xx_hpd_irq_handler(dev); 1899 I915_WRITE(VLV_IIR, iir); 1900 } 1901 1902 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1903 goto out; 1904 1905 if (gt_iir) 1906 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1907 if (pm_iir) 1908 gen6_rps_irq_handler(dev_priv, pm_iir); 1909 /* Call regardless, as some status bits might not be 1910 * signalled in iir */ 1911 valleyview_pipestat_irq_handler(dev, iir); 1912 } 1913 1914 out: 1915 return; 1916 } 1917 1918 static irqreturn_t cherryview_irq_handler(void *arg) 1919 { 1920 struct drm_device *dev = arg; 1921 struct drm_i915_private *dev_priv = dev->dev_private; 1922 u32 master_ctl, iir; 1923 1924 if (!intel_irqs_enabled(dev_priv)) 1925 return IRQ_NONE; 1926 1927 for (;;) { 1928 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1929 iir = I915_READ(VLV_IIR); 1930 1931 if (master_ctl == 0 && iir == 0) 1932 break; 1933 1934 1935 I915_WRITE(GEN8_MASTER_IRQ, 0); 1936 1937 /* Find, clear, then process each source of interrupt */ 1938 1939 if (iir) { 1940 /* Consume port before clearing IIR or we'll miss events */ 1941 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1942 i9xx_hpd_irq_handler(dev); 1943 I915_WRITE(VLV_IIR, iir); 1944 } 1945 1946 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1947 1948 /* Call regardless, as some status bits might not be 1949 * signalled in iir */ 1950 valleyview_pipestat_irq_handler(dev, iir); 1951 1952 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1953 POSTING_READ(GEN8_MASTER_IRQ); 1954 } 1955 1956 } 1957 1958 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1959 { 1960 struct drm_i915_private *dev_priv = dev->dev_private; 1961 int pipe; 1962 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1963 u32 dig_hotplug_reg; 1964 1965 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1966 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1967 1968 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); 1969 1970 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1971 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1972 SDE_AUDIO_POWER_SHIFT); 1973 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1974 port_name(port)); 1975 } 1976 1977 if (pch_iir & SDE_AUX_MASK) 1978 dp_aux_irq_handler(dev); 1979 1980 if (pch_iir & SDE_GMBUS) 1981 gmbus_irq_handler(dev); 1982 1983 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1984 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1985 1986 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1987 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1988 1989 if (pch_iir & SDE_POISON) 1990 DRM_ERROR("PCH poison interrupt\n"); 1991 1992 if (pch_iir & SDE_FDI_MASK) 1993 for_each_pipe(dev_priv, pipe) 1994 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1995 pipe_name(pipe), 1996 I915_READ(FDI_RX_IIR(pipe))); 1997 1998 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1999 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2000 2001 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2002 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2003 2004 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2005 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2006 2007 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2008 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2009 } 2010 2011 static void ivb_err_int_handler(struct drm_device *dev) 2012 { 2013 struct drm_i915_private *dev_priv = dev->dev_private; 2014 u32 err_int = I915_READ(GEN7_ERR_INT); 2015 enum i915_pipe pipe; 2016 2017 if (err_int & ERR_INT_POISON) 2018 DRM_ERROR("Poison interrupt\n"); 2019 2020 for_each_pipe(dev_priv, pipe) { 2021 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2022 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2023 2024 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2025 if (IS_IVYBRIDGE(dev)) 2026 ivb_pipe_crc_irq_handler(dev, pipe); 2027 else 2028 hsw_pipe_crc_irq_handler(dev, pipe); 2029 } 2030 } 2031 2032 I915_WRITE(GEN7_ERR_INT, err_int); 2033 } 2034 2035 static void cpt_serr_int_handler(struct drm_device *dev) 2036 { 2037 struct drm_i915_private *dev_priv = dev->dev_private; 2038 u32 serr_int = I915_READ(SERR_INT); 2039 2040 if (serr_int & SERR_INT_POISON) 2041 DRM_ERROR("PCH poison interrupt\n"); 2042 2043 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2044 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2045 2046 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2047 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2048 2049 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2050 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2051 2052 I915_WRITE(SERR_INT, serr_int); 2053 } 2054 2055 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2056 { 2057 struct drm_i915_private *dev_priv = dev->dev_private; 2058 int pipe; 2059 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2060 u32 dig_hotplug_reg; 2061 2062 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2063 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2064 2065 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); 2066 2067 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2068 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2069 SDE_AUDIO_POWER_SHIFT_CPT); 2070 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2071 port_name(port)); 2072 } 2073 2074 if (pch_iir & SDE_AUX_MASK_CPT) 2075 dp_aux_irq_handler(dev); 2076 2077 if (pch_iir & SDE_GMBUS_CPT) 2078 gmbus_irq_handler(dev); 2079 2080 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2081 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2082 2083 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2084 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2085 2086 if (pch_iir & SDE_FDI_MASK_CPT) 2087 for_each_pipe(dev_priv, pipe) 2088 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2089 pipe_name(pipe), 2090 I915_READ(FDI_RX_IIR(pipe))); 2091 2092 if (pch_iir & SDE_ERROR_CPT) 2093 cpt_serr_int_handler(dev); 2094 } 2095 2096 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2097 { 2098 struct drm_i915_private *dev_priv = dev->dev_private; 2099 enum i915_pipe pipe; 2100 2101 if (de_iir & DE_AUX_CHANNEL_A) 2102 dp_aux_irq_handler(dev); 2103 2104 if (de_iir & DE_GSE) 2105 intel_opregion_asle_intr(dev); 2106 2107 if (de_iir & DE_POISON) 2108 DRM_ERROR("Poison interrupt\n"); 2109 2110 for_each_pipe(dev_priv, pipe) { 2111 if (de_iir & DE_PIPE_VBLANK(pipe) && 2112 intel_pipe_handle_vblank(dev, pipe)) 2113 intel_check_page_flip(dev, pipe); 2114 2115 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2116 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2117 2118 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2119 i9xx_pipe_crc_irq_handler(dev, pipe); 2120 2121 /* plane/pipes map 1:1 on ilk+ */ 2122 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2123 intel_prepare_page_flip(dev, pipe); 2124 intel_finish_page_flip_plane(dev, pipe); 2125 } 2126 } 2127 2128 /* check event from PCH */ 2129 if (de_iir & DE_PCH_EVENT) { 2130 u32 pch_iir = I915_READ(SDEIIR); 2131 2132 if (HAS_PCH_CPT(dev)) 2133 cpt_irq_handler(dev, pch_iir); 2134 else 2135 ibx_irq_handler(dev, pch_iir); 2136 2137 /* should clear PCH hotplug event before clear CPU irq */ 2138 I915_WRITE(SDEIIR, pch_iir); 2139 } 2140 2141 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2142 ironlake_rps_change_irq_handler(dev); 2143 } 2144 2145 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2146 { 2147 struct drm_i915_private *dev_priv = dev->dev_private; 2148 enum i915_pipe pipe; 2149 2150 if (de_iir & DE_ERR_INT_IVB) 2151 ivb_err_int_handler(dev); 2152 2153 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2154 dp_aux_irq_handler(dev); 2155 2156 if (de_iir & DE_GSE_IVB) 2157 intel_opregion_asle_intr(dev); 2158 2159 for_each_pipe(dev_priv, pipe) { 2160 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2161 intel_pipe_handle_vblank(dev, pipe)) 2162 intel_check_page_flip(dev, pipe); 2163 2164 /* plane/pipes map 1:1 on ilk+ */ 2165 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2166 intel_prepare_page_flip(dev, pipe); 2167 intel_finish_page_flip_plane(dev, pipe); 2168 } 2169 } 2170 2171 /* check event from PCH */ 2172 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2173 u32 pch_iir = I915_READ(SDEIIR); 2174 2175 cpt_irq_handler(dev, pch_iir); 2176 2177 /* clear PCH hotplug event before clear CPU irq */ 2178 I915_WRITE(SDEIIR, pch_iir); 2179 } 2180 } 2181 2182 /* 2183 * To handle irqs with the minimum potential races with fresh interrupts, we: 2184 * 1 - Disable Master Interrupt Control. 2185 * 2 - Find the source(s) of the interrupt. 2186 * 3 - Clear the Interrupt Identity bits (IIR). 2187 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2188 * 5 - Re-enable Master Interrupt Control. 2189 */ 2190 static irqreturn_t ironlake_irq_handler(void *arg) 2191 { 2192 struct drm_device *dev = arg; 2193 struct drm_i915_private *dev_priv = dev->dev_private; 2194 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2195 2196 if (!intel_irqs_enabled(dev_priv)) 2197 return IRQ_NONE; 2198 2199 /* We get interrupts on unclaimed registers, so check for this before we 2200 * do any I915_{READ,WRITE}. */ 2201 intel_uncore_check_errors(dev); 2202 2203 /* disable master interrupt before clearing iir */ 2204 de_ier = I915_READ(DEIER); 2205 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2206 POSTING_READ(DEIER); 2207 2208 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2209 * interrupts will will be stored on its back queue, and then we'll be 2210 * able to process them after we restore SDEIER (as soon as we restore 2211 * it, we'll get an interrupt if SDEIIR still has something to process 2212 * due to its back queue). */ 2213 if (!HAS_PCH_NOP(dev)) { 2214 sde_ier = I915_READ(SDEIER); 2215 I915_WRITE(SDEIER, 0); 2216 POSTING_READ(SDEIER); 2217 } 2218 2219 /* Find, clear, then process each source of interrupt */ 2220 2221 gt_iir = I915_READ(GTIIR); 2222 if (gt_iir) { 2223 I915_WRITE(GTIIR, gt_iir); 2224 if (INTEL_INFO(dev)->gen >= 6) 2225 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2226 else 2227 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2228 } 2229 2230 de_iir = I915_READ(DEIIR); 2231 if (de_iir) { 2232 I915_WRITE(DEIIR, de_iir); 2233 if (INTEL_INFO(dev)->gen >= 7) 2234 ivb_display_irq_handler(dev, de_iir); 2235 else 2236 ilk_display_irq_handler(dev, de_iir); 2237 } 2238 2239 if (INTEL_INFO(dev)->gen >= 6) { 2240 u32 pm_iir = I915_READ(GEN6_PMIIR); 2241 if (pm_iir) { 2242 I915_WRITE(GEN6_PMIIR, pm_iir); 2243 gen6_rps_irq_handler(dev_priv, pm_iir); 2244 } 2245 } 2246 2247 I915_WRITE(DEIER, de_ier); 2248 POSTING_READ(DEIER); 2249 if (!HAS_PCH_NOP(dev)) { 2250 I915_WRITE(SDEIER, sde_ier); 2251 POSTING_READ(SDEIER); 2252 } 2253 2254 } 2255 2256 static irqreturn_t gen8_irq_handler(void *arg) 2257 { 2258 struct drm_device *dev = arg; 2259 struct drm_i915_private *dev_priv = dev->dev_private; 2260 u32 master_ctl; 2261 uint32_t tmp = 0; 2262 enum i915_pipe pipe; 2263 u32 aux_mask = GEN8_AUX_CHANNEL_A; 2264 2265 if (!intel_irqs_enabled(dev_priv)) 2266 return IRQ_NONE; 2267 2268 if (IS_GEN9(dev)) 2269 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 2270 GEN9_AUX_CHANNEL_D; 2271 2272 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2273 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2274 if (!master_ctl) 2275 return; 2276 2277 I915_WRITE(GEN8_MASTER_IRQ, 0); 2278 POSTING_READ(GEN8_MASTER_IRQ); 2279 2280 /* Find, clear, then process each source of interrupt */ 2281 2282 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2283 2284 if (master_ctl & GEN8_DE_MISC_IRQ) { 2285 tmp = I915_READ(GEN8_DE_MISC_IIR); 2286 if (tmp) { 2287 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2288 if (tmp & GEN8_DE_MISC_GSE) 2289 intel_opregion_asle_intr(dev); 2290 else 2291 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2292 } 2293 else 2294 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2295 } 2296 2297 if (master_ctl & GEN8_DE_PORT_IRQ) { 2298 tmp = I915_READ(GEN8_DE_PORT_IIR); 2299 if (tmp) { 2300 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2301 if (tmp & aux_mask) 2302 dp_aux_irq_handler(dev); 2303 else 2304 DRM_ERROR("Unexpected DE Port interrupt\n"); 2305 } 2306 else 2307 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2308 } 2309 2310 for_each_pipe(dev_priv, pipe) { 2311 uint32_t pipe_iir, flip_done = 0, fault_errors = 0; 2312 2313 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2314 continue; 2315 2316 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2317 if (pipe_iir) { 2318 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2319 2320 if (pipe_iir & GEN8_PIPE_VBLANK && 2321 intel_pipe_handle_vblank(dev, pipe)) 2322 intel_check_page_flip(dev, pipe); 2323 2324 if (IS_GEN9(dev)) 2325 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; 2326 else 2327 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; 2328 2329 if (flip_done) { 2330 intel_prepare_page_flip(dev, pipe); 2331 intel_finish_page_flip_plane(dev, pipe); 2332 } 2333 2334 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2335 hsw_pipe_crc_irq_handler(dev, pipe); 2336 2337 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) 2338 intel_cpu_fifo_underrun_irq_handler(dev_priv, 2339 pipe); 2340 2341 2342 if (IS_GEN9(dev)) 2343 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2344 else 2345 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2346 2347 if (fault_errors) 2348 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2349 pipe_name(pipe), 2350 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2351 } else 2352 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2353 } 2354 2355 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2356 /* 2357 * FIXME(BDW): Assume for now that the new interrupt handling 2358 * scheme also closed the SDE interrupt handling race we've seen 2359 * on older pch-split platforms. But this needs testing. 2360 */ 2361 u32 pch_iir = I915_READ(SDEIIR); 2362 if (pch_iir) { 2363 I915_WRITE(SDEIIR, pch_iir); 2364 cpt_irq_handler(dev, pch_iir); 2365 } else 2366 DRM_ERROR("The master control interrupt lied (SDE)!\n"); 2367 2368 } 2369 2370 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2371 POSTING_READ(GEN8_MASTER_IRQ); 2372 2373 } 2374 2375 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2376 bool reset_completed) 2377 { 2378 struct intel_engine_cs *ring; 2379 int i; 2380 2381 /* 2382 * Notify all waiters for GPU completion events that reset state has 2383 * been changed, and that they need to restart their wait after 2384 * checking for potential errors (and bail out to drop locks if there is 2385 * a gpu reset pending so that i915_error_work_func can acquire them). 2386 */ 2387 2388 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2389 for_each_ring(ring, dev_priv, i) 2390 wake_up_all(&ring->irq_queue); 2391 2392 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2393 wake_up_all(&dev_priv->pending_flip_queue); 2394 2395 /* 2396 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2397 * reset state is cleared. 2398 */ 2399 if (reset_completed) 2400 wake_up_all(&dev_priv->gpu_error.reset_queue); 2401 } 2402 2403 /** 2404 * i915_reset_and_wakeup - do process context error handling work 2405 * 2406 * Fire an error uevent so userspace can see that a hang or error 2407 * was detected. 2408 */ 2409 static void i915_reset_and_wakeup(struct drm_device *dev) 2410 { 2411 struct drm_i915_private *dev_priv = to_i915(dev); 2412 struct i915_gpu_error *error = &dev_priv->gpu_error; 2413 #if 0 2414 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2415 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2416 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2417 #endif 2418 int ret; 2419 2420 #if 0 2421 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2422 #endif 2423 2424 /* 2425 * Note that there's only one work item which does gpu resets, so we 2426 * need not worry about concurrent gpu resets potentially incrementing 2427 * error->reset_counter twice. We only need to take care of another 2428 * racing irq/hangcheck declaring the gpu dead for a second time. A 2429 * quick check for that is good enough: schedule_work ensures the 2430 * correct ordering between hang detection and this work item, and since 2431 * the reset in-progress bit is only ever set by code outside of this 2432 * work we don't need to worry about any other races. 2433 */ 2434 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2435 DRM_DEBUG_DRIVER("resetting chip\n"); 2436 #if 0 2437 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2438 reset_event); 2439 #endif 2440 2441 /* 2442 * In most cases it's guaranteed that we get here with an RPM 2443 * reference held, for example because there is a pending GPU 2444 * request that won't finish until the reset is done. This 2445 * isn't the case at least when we get here by doing a 2446 * simulated reset via debugs, so get an RPM reference. 2447 */ 2448 intel_runtime_pm_get(dev_priv); 2449 2450 intel_prepare_reset(dev); 2451 2452 /* 2453 * All state reset _must_ be completed before we update the 2454 * reset counter, for otherwise waiters might miss the reset 2455 * pending state and not properly drop locks, resulting in 2456 * deadlocks with the reset work. 2457 */ 2458 ret = i915_reset(dev); 2459 2460 intel_finish_reset(dev); 2461 2462 intel_runtime_pm_put(dev_priv); 2463 2464 if (ret == 0) { 2465 /* 2466 * After all the gem state is reset, increment the reset 2467 * counter and wake up everyone waiting for the reset to 2468 * complete. 2469 * 2470 * Since unlock operations are a one-sided barrier only, 2471 * we need to insert a barrier here to order any seqno 2472 * updates before 2473 * the counter increment. 2474 */ 2475 smp_mb__before_atomic(); 2476 atomic_inc(&dev_priv->gpu_error.reset_counter); 2477 2478 #if 0 2479 kobject_uevent_env(&dev->primary->kdev->kobj, 2480 KOBJ_CHANGE, reset_done_event); 2481 #endif 2482 } else { 2483 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2484 } 2485 2486 /* 2487 * Note: The wake_up also serves as a memory barrier so that 2488 * waiters see the update value of the reset counter atomic_t. 2489 */ 2490 i915_error_wake_up(dev_priv, true); 2491 } 2492 } 2493 2494 static void i915_report_and_clear_eir(struct drm_device *dev) 2495 { 2496 struct drm_i915_private *dev_priv = dev->dev_private; 2497 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2498 u32 eir = I915_READ(EIR); 2499 int pipe, i; 2500 2501 if (!eir) 2502 return; 2503 2504 pr_err("render error detected, EIR: 0x%08x\n", eir); 2505 2506 #if 0 2507 i915_get_extra_instdone(dev, instdone); 2508 #endif 2509 2510 if (IS_G4X(dev)) { 2511 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2512 u32 ipeir = I915_READ(IPEIR_I965); 2513 2514 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2515 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2516 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2517 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2518 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2519 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2520 I915_WRITE(IPEIR_I965, ipeir); 2521 POSTING_READ(IPEIR_I965); 2522 } 2523 if (eir & GM45_ERROR_PAGE_TABLE) { 2524 u32 pgtbl_err = I915_READ(PGTBL_ER); 2525 pr_err("page table error\n"); 2526 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2527 I915_WRITE(PGTBL_ER, pgtbl_err); 2528 POSTING_READ(PGTBL_ER); 2529 } 2530 } 2531 2532 if (!IS_GEN2(dev)) { 2533 if (eir & I915_ERROR_PAGE_TABLE) { 2534 u32 pgtbl_err = I915_READ(PGTBL_ER); 2535 pr_err("page table error\n"); 2536 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2537 I915_WRITE(PGTBL_ER, pgtbl_err); 2538 POSTING_READ(PGTBL_ER); 2539 } 2540 } 2541 2542 if (eir & I915_ERROR_MEMORY_REFRESH) { 2543 pr_err("memory refresh error:\n"); 2544 for_each_pipe(dev_priv, pipe) 2545 pr_err("pipe %c stat: 0x%08x\n", 2546 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2547 /* pipestat has already been acked */ 2548 } 2549 if (eir & I915_ERROR_INSTRUCTION) { 2550 pr_err("instruction error\n"); 2551 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2552 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2553 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2554 if (INTEL_INFO(dev)->gen < 4) { 2555 u32 ipeir = I915_READ(IPEIR); 2556 2557 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2558 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2559 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2560 I915_WRITE(IPEIR, ipeir); 2561 POSTING_READ(IPEIR); 2562 } else { 2563 u32 ipeir = I915_READ(IPEIR_I965); 2564 2565 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2566 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2567 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2568 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2569 I915_WRITE(IPEIR_I965, ipeir); 2570 POSTING_READ(IPEIR_I965); 2571 } 2572 } 2573 2574 I915_WRITE(EIR, eir); 2575 POSTING_READ(EIR); 2576 eir = I915_READ(EIR); 2577 if (eir) { 2578 /* 2579 * some errors might have become stuck, 2580 * mask them. 2581 */ 2582 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2583 I915_WRITE(EMR, I915_READ(EMR) | eir); 2584 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2585 } 2586 } 2587 2588 /** 2589 * i915_handle_error - handle a gpu error 2590 * @dev: drm device 2591 * 2592 * Do some basic checking of regsiter state at error time and 2593 * dump it to the syslog. Also call i915_capture_error_state() to make 2594 * sure we get a record and make it available in debugfs. Fire a uevent 2595 * so userspace knows something bad happened (should trigger collection 2596 * of a ring dump etc.). 2597 */ 2598 void i915_handle_error(struct drm_device *dev, bool wedged, 2599 const char *fmt, ...) 2600 { 2601 struct drm_i915_private *dev_priv = dev->dev_private; 2602 #if 0 2603 va_list args; 2604 char error_msg[80]; 2605 2606 va_start(args, fmt); 2607 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2608 va_end(args); 2609 2610 i915_capture_error_state(dev, wedged, error_msg); 2611 #endif 2612 i915_report_and_clear_eir(dev); 2613 2614 if (wedged) { 2615 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2616 &dev_priv->gpu_error.reset_counter); 2617 2618 /* 2619 * Wakeup waiting processes so that the reset function 2620 * i915_reset_and_wakeup doesn't deadlock trying to grab 2621 * various locks. By bumping the reset counter first, the woken 2622 * processes will see a reset in progress and back off, 2623 * releasing their locks and then wait for the reset completion. 2624 * We must do this for _all_ gpu waiters that might hold locks 2625 * that the reset work needs to acquire. 2626 * 2627 * Note: The wake_up serves as the required memory barrier to 2628 * ensure that the waiters see the updated value of the reset 2629 * counter atomic_t. 2630 */ 2631 i915_error_wake_up(dev_priv, false); 2632 } 2633 2634 i915_reset_and_wakeup(dev); 2635 } 2636 2637 /* Called from drm generic code, passed 'crtc' which 2638 * we use as a pipe index 2639 */ 2640 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2641 { 2642 struct drm_i915_private *dev_priv = dev->dev_private; 2643 2644 if (!i915_pipe_enabled(dev, pipe)) 2645 return -EINVAL; 2646 2647 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2648 if (INTEL_INFO(dev)->gen >= 4) 2649 i915_enable_pipestat(dev_priv, pipe, 2650 PIPE_START_VBLANK_INTERRUPT_STATUS); 2651 else 2652 i915_enable_pipestat(dev_priv, pipe, 2653 PIPE_VBLANK_INTERRUPT_STATUS); 2654 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2655 2656 return 0; 2657 } 2658 2659 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2660 { 2661 struct drm_i915_private *dev_priv = dev->dev_private; 2662 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2663 DE_PIPE_VBLANK(pipe); 2664 2665 if (!i915_pipe_enabled(dev, pipe)) 2666 return -EINVAL; 2667 2668 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2669 ironlake_enable_display_irq(dev_priv, bit); 2670 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2671 2672 return 0; 2673 } 2674 2675 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2676 { 2677 struct drm_i915_private *dev_priv = dev->dev_private; 2678 2679 if (!i915_pipe_enabled(dev, pipe)) 2680 return -EINVAL; 2681 2682 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2683 i915_enable_pipestat(dev_priv, pipe, 2684 PIPE_START_VBLANK_INTERRUPT_STATUS); 2685 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2686 2687 return 0; 2688 } 2689 2690 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2691 { 2692 struct drm_i915_private *dev_priv = dev->dev_private; 2693 2694 if (!i915_pipe_enabled(dev, pipe)) 2695 return -EINVAL; 2696 2697 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2698 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2699 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2700 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2701 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2702 return 0; 2703 } 2704 2705 /* Called from drm generic code, passed 'crtc' which 2706 * we use as a pipe index 2707 */ 2708 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2709 { 2710 struct drm_i915_private *dev_priv = dev->dev_private; 2711 2712 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2713 i915_disable_pipestat(dev_priv, pipe, 2714 PIPE_VBLANK_INTERRUPT_STATUS | 2715 PIPE_START_VBLANK_INTERRUPT_STATUS); 2716 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2717 } 2718 2719 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2720 { 2721 struct drm_i915_private *dev_priv = dev->dev_private; 2722 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2723 DE_PIPE_VBLANK(pipe); 2724 2725 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2726 ironlake_disable_display_irq(dev_priv, bit); 2727 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2728 } 2729 2730 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2731 { 2732 struct drm_i915_private *dev_priv = dev->dev_private; 2733 2734 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2735 i915_disable_pipestat(dev_priv, pipe, 2736 PIPE_START_VBLANK_INTERRUPT_STATUS); 2737 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2738 } 2739 2740 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2741 { 2742 struct drm_i915_private *dev_priv = dev->dev_private; 2743 2744 if (!i915_pipe_enabled(dev, pipe)) 2745 return; 2746 2747 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2748 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2749 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2750 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2751 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2752 } 2753 2754 static struct drm_i915_gem_request * 2755 ring_last_request(struct intel_engine_cs *ring) 2756 { 2757 return list_entry(ring->request_list.prev, 2758 struct drm_i915_gem_request, list); 2759 } 2760 2761 static bool 2762 ring_idle(struct intel_engine_cs *ring) 2763 { 2764 return (list_empty(&ring->request_list) || 2765 i915_gem_request_completed(ring_last_request(ring), false)); 2766 } 2767 2768 static bool 2769 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2770 { 2771 if (INTEL_INFO(dev)->gen >= 8) { 2772 return (ipehr >> 23) == 0x1c; 2773 } else { 2774 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2775 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2776 MI_SEMAPHORE_REGISTER); 2777 } 2778 } 2779 2780 static struct intel_engine_cs * 2781 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) 2782 { 2783 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2784 struct intel_engine_cs *signaller; 2785 int i; 2786 2787 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2788 for_each_ring(signaller, dev_priv, i) { 2789 if (ring == signaller) 2790 continue; 2791 2792 if (offset == signaller->semaphore.signal_ggtt[ring->id]) 2793 return signaller; 2794 } 2795 } else { 2796 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2797 2798 for_each_ring(signaller, dev_priv, i) { 2799 if(ring == signaller) 2800 continue; 2801 2802 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2803 return signaller; 2804 } 2805 } 2806 2807 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n", 2808 ring->id, ipehr, offset); 2809 2810 return NULL; 2811 } 2812 2813 static struct intel_engine_cs * 2814 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2815 { 2816 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2817 u32 cmd, ipehr, head; 2818 u64 offset = 0; 2819 int i, backwards; 2820 2821 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2822 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2823 return NULL; 2824 2825 /* 2826 * HEAD is likely pointing to the dword after the actual command, 2827 * so scan backwards until we find the MBOX. But limit it to just 3 2828 * or 4 dwords depending on the semaphore wait command size. 2829 * Note that we don't care about ACTHD here since that might 2830 * point at at batch, and semaphores are always emitted into the 2831 * ringbuffer itself. 2832 */ 2833 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2834 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; 2835 2836 for (i = backwards; i; --i) { 2837 /* 2838 * Be paranoid and presume the hw has gone off into the wild - 2839 * our ring is smaller than what the hardware (and hence 2840 * HEAD_ADDR) allows. Also handles wrap-around. 2841 */ 2842 head &= ring->buffer->size - 1; 2843 2844 /* This here seems to blow up */ 2845 cmd = ioread32(ring->buffer->virtual_start + head); 2846 if (cmd == ipehr) 2847 break; 2848 2849 head -= 4; 2850 } 2851 2852 if (!i) 2853 return NULL; 2854 2855 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2856 if (INTEL_INFO(ring->dev)->gen >= 8) { 2857 offset = ioread32(ring->buffer->virtual_start + head + 12); 2858 offset <<= 32; 2859 offset = ioread32(ring->buffer->virtual_start + head + 8); 2860 } 2861 return semaphore_wait_to_signaller_ring(ring, ipehr, offset); 2862 } 2863 2864 static int semaphore_passed(struct intel_engine_cs *ring) 2865 { 2866 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2867 struct intel_engine_cs *signaller; 2868 u32 seqno; 2869 2870 ring->hangcheck.deadlock++; 2871 2872 signaller = semaphore_waits_for(ring, &seqno); 2873 if (signaller == NULL) 2874 return -1; 2875 2876 /* Prevent pathological recursion due to driver bugs */ 2877 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2878 return -1; 2879 2880 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2881 return 1; 2882 2883 /* cursory check for an unkickable deadlock */ 2884 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2885 semaphore_passed(signaller) < 0) 2886 return -1; 2887 2888 return 0; 2889 } 2890 2891 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2892 { 2893 struct intel_engine_cs *ring; 2894 int i; 2895 2896 for_each_ring(ring, dev_priv, i) 2897 ring->hangcheck.deadlock = 0; 2898 } 2899 2900 static enum intel_ring_hangcheck_action 2901 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2902 { 2903 struct drm_device *dev = ring->dev; 2904 struct drm_i915_private *dev_priv = dev->dev_private; 2905 u32 tmp; 2906 2907 if (acthd != ring->hangcheck.acthd) { 2908 if (acthd > ring->hangcheck.max_acthd) { 2909 ring->hangcheck.max_acthd = acthd; 2910 return HANGCHECK_ACTIVE; 2911 } 2912 2913 return HANGCHECK_ACTIVE_LOOP; 2914 } 2915 2916 if (IS_GEN2(dev)) 2917 return HANGCHECK_HUNG; 2918 2919 /* Is the chip hanging on a WAIT_FOR_EVENT? 2920 * If so we can simply poke the RB_WAIT bit 2921 * and break the hang. This should work on 2922 * all but the second generation chipsets. 2923 */ 2924 tmp = I915_READ_CTL(ring); 2925 if (tmp & RING_WAIT) { 2926 i915_handle_error(dev, false, 2927 "Kicking stuck wait on %s", 2928 ring->name); 2929 I915_WRITE_CTL(ring, tmp); 2930 return HANGCHECK_KICK; 2931 } 2932 2933 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2934 switch (semaphore_passed(ring)) { 2935 default: 2936 return HANGCHECK_HUNG; 2937 case 1: 2938 i915_handle_error(dev, false, 2939 "Kicking stuck semaphore on %s", 2940 ring->name); 2941 I915_WRITE_CTL(ring, tmp); 2942 return HANGCHECK_KICK; 2943 case 0: 2944 return HANGCHECK_WAIT; 2945 } 2946 } 2947 2948 return HANGCHECK_HUNG; 2949 } 2950 2951 /* 2952 * This is called when the chip hasn't reported back with completed 2953 * batchbuffers in a long time. We keep track per ring seqno progress and 2954 * if there are no progress, hangcheck score for that ring is increased. 2955 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2956 * we kick the ring. If we see no progress on three subsequent calls 2957 * we assume chip is wedged and try to fix it by resetting the chip. 2958 */ 2959 static void i915_hangcheck_elapsed(struct work_struct *work) 2960 { 2961 struct drm_i915_private *dev_priv = 2962 container_of(work, typeof(*dev_priv), 2963 gpu_error.hangcheck_work.work); 2964 struct drm_device *dev = dev_priv->dev; 2965 struct intel_engine_cs *ring; 2966 int i; 2967 int busy_count = 0, rings_hung = 0; 2968 bool stuck[I915_NUM_RINGS] = { 0 }; 2969 #define BUSY 1 2970 #define KICK 5 2971 #define HUNG 20 2972 2973 if (!i915.enable_hangcheck) 2974 return; 2975 2976 for_each_ring(ring, dev_priv, i) { 2977 u64 acthd; 2978 u32 seqno; 2979 bool busy = true; 2980 2981 semaphore_clear_deadlocks(dev_priv); 2982 2983 seqno = ring->get_seqno(ring, false); 2984 acthd = intel_ring_get_active_head(ring); 2985 2986 if (ring->hangcheck.seqno == seqno) { 2987 if (ring_idle(ring)) { 2988 ring->hangcheck.action = HANGCHECK_IDLE; 2989 2990 if (waitqueue_active(&ring->irq_queue)) { 2991 /* Issue a wake-up to catch stuck h/w. */ 2992 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2993 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2994 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2995 ring->name); 2996 else 2997 DRM_INFO("Fake missed irq on %s\n", 2998 ring->name); 2999 wake_up_all(&ring->irq_queue); 3000 } 3001 /* Safeguard against driver failure */ 3002 ring->hangcheck.score += BUSY; 3003 } else 3004 busy = false; 3005 } else { 3006 /* We always increment the hangcheck score 3007 * if the ring is busy and still processing 3008 * the same request, so that no single request 3009 * can run indefinitely (such as a chain of 3010 * batches). The only time we do not increment 3011 * the hangcheck score on this ring, if this 3012 * ring is in a legitimate wait for another 3013 * ring. In that case the waiting ring is a 3014 * victim and we want to be sure we catch the 3015 * right culprit. Then every time we do kick 3016 * the ring, add a small increment to the 3017 * score so that we can catch a batch that is 3018 * being repeatedly kicked and so responsible 3019 * for stalling the machine. 3020 */ 3021 ring->hangcheck.action = ring_stuck(ring, 3022 acthd); 3023 3024 switch (ring->hangcheck.action) { 3025 case HANGCHECK_IDLE: 3026 case HANGCHECK_WAIT: 3027 case HANGCHECK_ACTIVE: 3028 break; 3029 case HANGCHECK_ACTIVE_LOOP: 3030 ring->hangcheck.score += BUSY; 3031 break; 3032 case HANGCHECK_KICK: 3033 ring->hangcheck.score += KICK; 3034 break; 3035 case HANGCHECK_HUNG: 3036 ring->hangcheck.score += HUNG; 3037 stuck[i] = true; 3038 break; 3039 } 3040 } 3041 } else { 3042 ring->hangcheck.action = HANGCHECK_ACTIVE; 3043 3044 /* Gradually reduce the count so that we catch DoS 3045 * attempts across multiple batches. 3046 */ 3047 if (ring->hangcheck.score > 0) 3048 ring->hangcheck.score--; 3049 3050 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; 3051 } 3052 3053 ring->hangcheck.seqno = seqno; 3054 ring->hangcheck.acthd = acthd; 3055 busy_count += busy; 3056 } 3057 3058 for_each_ring(ring, dev_priv, i) { 3059 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3060 DRM_INFO("%s on %s\n", 3061 stuck[i] ? "stuck" : "no progress", 3062 ring->name); 3063 rings_hung++; 3064 } 3065 } 3066 3067 if (rings_hung) 3068 return i915_handle_error(dev, true, "Ring hung"); 3069 3070 if (busy_count) 3071 /* Reset timer case chip hangs without another request 3072 * being added */ 3073 i915_queue_hangcheck(dev); 3074 } 3075 3076 void i915_queue_hangcheck(struct drm_device *dev) 3077 { 3078 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3079 3080 if (!i915.enable_hangcheck) 3081 return; 3082 3083 /* Don't continually defer the hangcheck so that it is always run at 3084 * least once after work has been scheduled on any ring. Otherwise, 3085 * we will ignore a hung ring if a second ring is kept busy. 3086 */ 3087 3088 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3089 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3090 } 3091 3092 static void ibx_irq_reset(struct drm_device *dev) 3093 { 3094 struct drm_i915_private *dev_priv = dev->dev_private; 3095 3096 if (HAS_PCH_NOP(dev)) 3097 return; 3098 3099 GEN5_IRQ_RESET(SDE); 3100 3101 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3102 I915_WRITE(SERR_INT, 0xffffffff); 3103 } 3104 3105 /* 3106 * SDEIER is also touched by the interrupt handler to work around missed PCH 3107 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3108 * instead we unconditionally enable all PCH interrupt sources here, but then 3109 * only unmask them as needed with SDEIMR. 3110 * 3111 * This function needs to be called before interrupts are enabled. 3112 */ 3113 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3114 { 3115 struct drm_i915_private *dev_priv = dev->dev_private; 3116 3117 if (HAS_PCH_NOP(dev)) 3118 return; 3119 3120 WARN_ON(I915_READ(SDEIER) != 0); 3121 I915_WRITE(SDEIER, 0xffffffff); 3122 POSTING_READ(SDEIER); 3123 } 3124 3125 static void gen5_gt_irq_reset(struct drm_device *dev) 3126 { 3127 struct drm_i915_private *dev_priv = dev->dev_private; 3128 3129 GEN5_IRQ_RESET(GT); 3130 if (INTEL_INFO(dev)->gen >= 6) 3131 GEN5_IRQ_RESET(GEN6_PM); 3132 } 3133 3134 /* drm_dma.h hooks 3135 */ 3136 static void ironlake_irq_reset(struct drm_device *dev) 3137 { 3138 struct drm_i915_private *dev_priv = dev->dev_private; 3139 3140 I915_WRITE(HWSTAM, 0xffffffff); 3141 3142 GEN5_IRQ_RESET(DE); 3143 if (IS_GEN7(dev)) 3144 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3145 3146 gen5_gt_irq_reset(dev); 3147 3148 ibx_irq_reset(dev); 3149 } 3150 3151 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3152 { 3153 enum i915_pipe pipe; 3154 3155 I915_WRITE(PORT_HOTPLUG_EN, 0); 3156 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3157 3158 for_each_pipe(dev_priv, pipe) 3159 I915_WRITE(PIPESTAT(pipe), 0xffff); 3160 3161 GEN5_IRQ_RESET(VLV_); 3162 } 3163 3164 static void valleyview_irq_preinstall(struct drm_device *dev) 3165 { 3166 struct drm_i915_private *dev_priv = dev->dev_private; 3167 3168 /* VLV magic */ 3169 I915_WRITE(VLV_IMR, 0); 3170 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3171 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3172 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3173 3174 gen5_gt_irq_reset(dev); 3175 3176 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3177 3178 vlv_display_irq_reset(dev_priv); 3179 } 3180 3181 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3182 { 3183 GEN8_IRQ_RESET_NDX(GT, 0); 3184 GEN8_IRQ_RESET_NDX(GT, 1); 3185 GEN8_IRQ_RESET_NDX(GT, 2); 3186 GEN8_IRQ_RESET_NDX(GT, 3); 3187 } 3188 3189 static void gen8_irq_reset(struct drm_device *dev) 3190 { 3191 struct drm_i915_private *dev_priv = dev->dev_private; 3192 int pipe; 3193 3194 I915_WRITE(GEN8_MASTER_IRQ, 0); 3195 POSTING_READ(GEN8_MASTER_IRQ); 3196 3197 gen8_gt_irq_reset(dev_priv); 3198 3199 for_each_pipe(dev_priv, pipe) 3200 if (intel_display_power_is_enabled(dev_priv, 3201 POWER_DOMAIN_PIPE(pipe))) 3202 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3203 3204 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3205 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3206 GEN5_IRQ_RESET(GEN8_PCU_); 3207 3208 ibx_irq_reset(dev); 3209 } 3210 3211 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3212 { 3213 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3214 3215 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3216 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3217 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3218 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3219 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3220 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3221 } 3222 3223 static void cherryview_irq_preinstall(struct drm_device *dev) 3224 { 3225 struct drm_i915_private *dev_priv = dev->dev_private; 3226 3227 I915_WRITE(GEN8_MASTER_IRQ, 0); 3228 POSTING_READ(GEN8_MASTER_IRQ); 3229 3230 gen8_gt_irq_reset(dev_priv); 3231 3232 GEN5_IRQ_RESET(GEN8_PCU_); 3233 3234 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3235 3236 vlv_display_irq_reset(dev_priv); 3237 } 3238 3239 static void ibx_hpd_irq_setup(struct drm_device *dev) 3240 { 3241 struct drm_i915_private *dev_priv = dev->dev_private; 3242 struct intel_encoder *intel_encoder; 3243 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3244 3245 if (HAS_PCH_IBX(dev)) { 3246 hotplug_irqs = SDE_HOTPLUG_MASK; 3247 for_each_intel_encoder(dev, intel_encoder) 3248 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3249 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3250 } else { 3251 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3252 for_each_intel_encoder(dev, intel_encoder) 3253 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3254 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3255 } 3256 3257 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3258 3259 /* 3260 * Enable digital hotplug on the PCH, and configure the DP short pulse 3261 * duration to 2ms (which is the minimum in the Display Port spec) 3262 * 3263 * This register is the same on all known PCH chips. 3264 */ 3265 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3266 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3267 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3268 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3269 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3270 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3271 } 3272 3273 static void ibx_irq_postinstall(struct drm_device *dev) 3274 { 3275 struct drm_i915_private *dev_priv = dev->dev_private; 3276 u32 mask; 3277 3278 if (HAS_PCH_NOP(dev)) 3279 return; 3280 3281 if (HAS_PCH_IBX(dev)) 3282 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3283 else 3284 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3285 3286 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3287 I915_WRITE(SDEIMR, ~mask); 3288 } 3289 3290 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3291 { 3292 struct drm_i915_private *dev_priv = dev->dev_private; 3293 u32 pm_irqs, gt_irqs; 3294 3295 pm_irqs = gt_irqs = 0; 3296 3297 dev_priv->gt_irq_mask = ~0; 3298 if (HAS_L3_DPF(dev)) { 3299 /* L3 parity interrupt is always unmasked. */ 3300 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3301 gt_irqs |= GT_PARITY_ERROR(dev); 3302 } 3303 3304 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3305 if (IS_GEN5(dev)) { 3306 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3307 ILK_BSD_USER_INTERRUPT; 3308 } else { 3309 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3310 } 3311 3312 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3313 3314 if (INTEL_INFO(dev)->gen >= 6) { 3315 /* 3316 * RPS interrupts will get enabled/disabled on demand when RPS 3317 * itself is enabled/disabled. 3318 */ 3319 if (HAS_VEBOX(dev)) 3320 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3321 3322 dev_priv->pm_irq_mask = 0xffffffff; 3323 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3324 } 3325 } 3326 3327 static int ironlake_irq_postinstall(struct drm_device *dev) 3328 { 3329 struct drm_i915_private *dev_priv = dev->dev_private; 3330 u32 display_mask, extra_mask; 3331 3332 if (INTEL_INFO(dev)->gen >= 7) { 3333 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3334 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3335 DE_PLANEB_FLIP_DONE_IVB | 3336 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3337 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3338 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3339 } else { 3340 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3341 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3342 DE_AUX_CHANNEL_A | 3343 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3344 DE_POISON); 3345 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3346 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3347 } 3348 3349 dev_priv->irq_mask = ~display_mask; 3350 3351 I915_WRITE(HWSTAM, 0xeffe); 3352 3353 ibx_irq_pre_postinstall(dev); 3354 3355 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3356 3357 gen5_gt_irq_postinstall(dev); 3358 3359 ibx_irq_postinstall(dev); 3360 3361 if (IS_IRONLAKE_M(dev)) { 3362 /* Enable PCU event interrupts 3363 * 3364 * spinlocking not required here for correctness since interrupt 3365 * setup is guaranteed to run in single-threaded context. But we 3366 * need it to make the assert_spin_locked happy. */ 3367 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3368 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3369 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3370 } 3371 3372 return 0; 3373 } 3374 3375 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3376 { 3377 u32 pipestat_mask; 3378 u32 iir_mask; 3379 enum i915_pipe pipe; 3380 3381 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3382 PIPE_FIFO_UNDERRUN_STATUS; 3383 3384 for_each_pipe(dev_priv, pipe) 3385 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3386 POSTING_READ(PIPESTAT(PIPE_A)); 3387 3388 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3389 PIPE_CRC_DONE_INTERRUPT_STATUS; 3390 3391 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3392 for_each_pipe(dev_priv, pipe) 3393 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3394 3395 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3396 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3397 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3398 if (IS_CHERRYVIEW(dev_priv)) 3399 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3400 dev_priv->irq_mask &= ~iir_mask; 3401 3402 I915_WRITE(VLV_IIR, iir_mask); 3403 I915_WRITE(VLV_IIR, iir_mask); 3404 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3405 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3406 POSTING_READ(VLV_IMR); 3407 } 3408 3409 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3410 { 3411 u32 pipestat_mask; 3412 u32 iir_mask; 3413 enum i915_pipe pipe; 3414 3415 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3416 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3417 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3418 if (IS_CHERRYVIEW(dev_priv)) 3419 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3420 3421 dev_priv->irq_mask |= iir_mask; 3422 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3423 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3424 I915_WRITE(VLV_IIR, iir_mask); 3425 I915_WRITE(VLV_IIR, iir_mask); 3426 POSTING_READ(VLV_IIR); 3427 3428 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3429 PIPE_CRC_DONE_INTERRUPT_STATUS; 3430 3431 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3432 for_each_pipe(dev_priv, pipe) 3433 i915_disable_pipestat(dev_priv, pipe, pipestat_mask); 3434 3435 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3436 PIPE_FIFO_UNDERRUN_STATUS; 3437 3438 for_each_pipe(dev_priv, pipe) 3439 I915_WRITE(PIPESTAT(pipe), pipestat_mask); 3440 POSTING_READ(PIPESTAT(PIPE_A)); 3441 } 3442 3443 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3444 { 3445 assert_spin_locked(&dev_priv->irq_lock); 3446 3447 if (dev_priv->display_irqs_enabled) 3448 return; 3449 3450 dev_priv->display_irqs_enabled = true; 3451 3452 if (intel_irqs_enabled(dev_priv)) 3453 valleyview_display_irqs_install(dev_priv); 3454 } 3455 3456 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3457 { 3458 assert_spin_locked(&dev_priv->irq_lock); 3459 3460 if (!dev_priv->display_irqs_enabled) 3461 return; 3462 3463 dev_priv->display_irqs_enabled = false; 3464 3465 if (intel_irqs_enabled(dev_priv)) 3466 valleyview_display_irqs_uninstall(dev_priv); 3467 } 3468 3469 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3470 { 3471 dev_priv->irq_mask = ~0; 3472 3473 I915_WRITE(PORT_HOTPLUG_EN, 0); 3474 POSTING_READ(PORT_HOTPLUG_EN); 3475 3476 I915_WRITE(VLV_IIR, 0xffffffff); 3477 I915_WRITE(VLV_IIR, 0xffffffff); 3478 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3479 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3480 POSTING_READ(VLV_IMR); 3481 3482 /* Interrupt setup is already guaranteed to be single-threaded, this is 3483 * just to make the assert_spin_locked check happy. */ 3484 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3485 if (dev_priv->display_irqs_enabled) 3486 valleyview_display_irqs_install(dev_priv); 3487 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3488 } 3489 3490 static int valleyview_irq_postinstall(struct drm_device *dev) 3491 { 3492 struct drm_i915_private *dev_priv = dev->dev_private; 3493 3494 vlv_display_irq_postinstall(dev_priv); 3495 3496 gen5_gt_irq_postinstall(dev); 3497 3498 /* ack & enable invalid PTE error interrupts */ 3499 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3500 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3501 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3502 #endif 3503 3504 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3505 3506 return 0; 3507 } 3508 3509 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3510 { 3511 /* These are interrupts we'll toggle with the ring mask register */ 3512 uint32_t gt_interrupts[] = { 3513 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3514 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3515 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3516 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3517 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3518 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3519 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3520 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3521 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3522 0, 3523 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3524 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3525 }; 3526 3527 dev_priv->pm_irq_mask = 0xffffffff; 3528 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3529 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3530 /* 3531 * RPS interrupts will get enabled/disabled on demand when RPS itself 3532 * is enabled/disabled. 3533 */ 3534 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3535 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3536 } 3537 3538 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3539 { 3540 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3541 uint32_t de_pipe_enables; 3542 int pipe; 3543 u32 aux_en = GEN8_AUX_CHANNEL_A; 3544 3545 if (IS_GEN9(dev_priv)) { 3546 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3547 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3548 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3549 GEN9_AUX_CHANNEL_D; 3550 } else 3551 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3552 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3553 3554 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3555 GEN8_PIPE_FIFO_UNDERRUN; 3556 3557 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3558 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3559 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3560 3561 for_each_pipe(dev_priv, pipe) 3562 if (intel_display_power_is_enabled(dev_priv, 3563 POWER_DOMAIN_PIPE(pipe))) 3564 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3565 dev_priv->de_irq_mask[pipe], 3566 de_pipe_enables); 3567 3568 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); 3569 } 3570 3571 static int gen8_irq_postinstall(struct drm_device *dev) 3572 { 3573 struct drm_i915_private *dev_priv = dev->dev_private; 3574 3575 ibx_irq_pre_postinstall(dev); 3576 3577 gen8_gt_irq_postinstall(dev_priv); 3578 gen8_de_irq_postinstall(dev_priv); 3579 3580 ibx_irq_postinstall(dev); 3581 3582 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3583 POSTING_READ(GEN8_MASTER_IRQ); 3584 3585 return 0; 3586 } 3587 3588 static int cherryview_irq_postinstall(struct drm_device *dev) 3589 { 3590 struct drm_i915_private *dev_priv = dev->dev_private; 3591 3592 vlv_display_irq_postinstall(dev_priv); 3593 3594 gen8_gt_irq_postinstall(dev_priv); 3595 3596 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3597 POSTING_READ(GEN8_MASTER_IRQ); 3598 3599 return 0; 3600 } 3601 3602 static void gen8_irq_uninstall(struct drm_device *dev) 3603 { 3604 struct drm_i915_private *dev_priv = dev->dev_private; 3605 3606 if (!dev_priv) 3607 return; 3608 3609 gen8_irq_reset(dev); 3610 } 3611 3612 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) 3613 { 3614 /* Interrupt setup is already guaranteed to be single-threaded, this is 3615 * just to make the assert_spin_locked check happy. */ 3616 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3617 if (dev_priv->display_irqs_enabled) 3618 valleyview_display_irqs_uninstall(dev_priv); 3619 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3620 3621 vlv_display_irq_reset(dev_priv); 3622 3623 dev_priv->irq_mask = ~0; 3624 } 3625 3626 static void valleyview_irq_uninstall(struct drm_device *dev) 3627 { 3628 struct drm_i915_private *dev_priv = dev->dev_private; 3629 3630 if (!dev_priv) 3631 return; 3632 3633 I915_WRITE(VLV_MASTER_IER, 0); 3634 3635 gen5_gt_irq_reset(dev); 3636 3637 I915_WRITE(HWSTAM, 0xffffffff); 3638 3639 vlv_display_irq_uninstall(dev_priv); 3640 } 3641 3642 static void cherryview_irq_uninstall(struct drm_device *dev) 3643 { 3644 struct drm_i915_private *dev_priv = dev->dev_private; 3645 3646 if (!dev_priv) 3647 return; 3648 3649 I915_WRITE(GEN8_MASTER_IRQ, 0); 3650 POSTING_READ(GEN8_MASTER_IRQ); 3651 3652 gen8_gt_irq_reset(dev_priv); 3653 3654 GEN5_IRQ_RESET(GEN8_PCU_); 3655 3656 vlv_display_irq_uninstall(dev_priv); 3657 } 3658 3659 static void ironlake_irq_uninstall(struct drm_device *dev) 3660 { 3661 struct drm_i915_private *dev_priv = dev->dev_private; 3662 3663 if (!dev_priv) 3664 return; 3665 3666 ironlake_irq_reset(dev); 3667 } 3668 3669 static void i8xx_irq_preinstall(struct drm_device * dev) 3670 { 3671 struct drm_i915_private *dev_priv = dev->dev_private; 3672 int pipe; 3673 3674 for_each_pipe(dev_priv, pipe) 3675 I915_WRITE(PIPESTAT(pipe), 0); 3676 I915_WRITE16(IMR, 0xffff); 3677 I915_WRITE16(IER, 0x0); 3678 POSTING_READ16(IER); 3679 } 3680 3681 static int i8xx_irq_postinstall(struct drm_device *dev) 3682 { 3683 struct drm_i915_private *dev_priv = dev->dev_private; 3684 3685 I915_WRITE16(EMR, 3686 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3687 3688 /* Unmask the interrupts that we always want on. */ 3689 dev_priv->irq_mask = 3690 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3691 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3692 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3693 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3694 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3695 I915_WRITE16(IMR, dev_priv->irq_mask); 3696 3697 I915_WRITE16(IER, 3698 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3699 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3700 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3701 I915_USER_INTERRUPT); 3702 POSTING_READ16(IER); 3703 3704 /* Interrupt setup is already guaranteed to be single-threaded, this is 3705 * just to make the assert_spin_locked check happy. */ 3706 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3707 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3708 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3709 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3710 3711 return 0; 3712 } 3713 3714 /* 3715 * Returns true when a page flip has completed. 3716 */ 3717 static bool i8xx_handle_vblank(struct drm_device *dev, 3718 int plane, int pipe, u32 iir) 3719 { 3720 struct drm_i915_private *dev_priv = dev->dev_private; 3721 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3722 3723 if (!intel_pipe_handle_vblank(dev, pipe)) 3724 return false; 3725 3726 if ((iir & flip_pending) == 0) 3727 goto check_page_flip; 3728 3729 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3730 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3731 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3732 * the flip is completed (no longer pending). Since this doesn't raise 3733 * an interrupt per se, we watch for the change at vblank. 3734 */ 3735 if (I915_READ16(ISR) & flip_pending) 3736 goto check_page_flip; 3737 3738 intel_prepare_page_flip(dev, plane); 3739 intel_finish_page_flip(dev, pipe); 3740 return true; 3741 3742 check_page_flip: 3743 intel_check_page_flip(dev, pipe); 3744 return false; 3745 } 3746 3747 static irqreturn_t i8xx_irq_handler(void *arg) 3748 { 3749 struct drm_device *dev = arg; 3750 struct drm_i915_private *dev_priv = dev->dev_private; 3751 u16 iir, new_iir; 3752 u32 pipe_stats[2]; 3753 int pipe; 3754 u16 flip_mask = 3755 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3756 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3757 3758 if (!intel_irqs_enabled(dev_priv)) 3759 return IRQ_NONE; 3760 3761 iir = I915_READ16(IIR); 3762 if (iir == 0) 3763 return; 3764 3765 while (iir & ~flip_mask) { 3766 /* Can't rely on pipestat interrupt bit in iir as it might 3767 * have been cleared after the pipestat interrupt was received. 3768 * It doesn't set the bit in iir again, but it still produces 3769 * interrupts (for non-MSI). 3770 */ 3771 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3772 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3773 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3774 3775 for_each_pipe(dev_priv, pipe) { 3776 int reg = PIPESTAT(pipe); 3777 pipe_stats[pipe] = I915_READ(reg); 3778 3779 /* 3780 * Clear the PIPE*STAT regs before the IIR 3781 */ 3782 if (pipe_stats[pipe] & 0x8000ffff) 3783 I915_WRITE(reg, pipe_stats[pipe]); 3784 } 3785 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3786 3787 I915_WRITE16(IIR, iir & ~flip_mask); 3788 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3789 3790 if (iir & I915_USER_INTERRUPT) 3791 notify_ring(dev, &dev_priv->ring[RCS]); 3792 3793 for_each_pipe(dev_priv, pipe) { 3794 int plane = pipe; 3795 if (HAS_FBC(dev)) 3796 plane = !plane; 3797 3798 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3799 i8xx_handle_vblank(dev, plane, pipe, iir)) 3800 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3801 3802 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3803 i9xx_pipe_crc_irq_handler(dev, pipe); 3804 3805 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3806 intel_cpu_fifo_underrun_irq_handler(dev_priv, 3807 pipe); 3808 } 3809 3810 iir = new_iir; 3811 } 3812 3813 } 3814 3815 static void i8xx_irq_uninstall(struct drm_device * dev) 3816 { 3817 struct drm_i915_private *dev_priv = dev->dev_private; 3818 int pipe; 3819 3820 for_each_pipe(dev_priv, pipe) { 3821 /* Clear enable bits; then clear status bits */ 3822 I915_WRITE(PIPESTAT(pipe), 0); 3823 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3824 } 3825 I915_WRITE16(IMR, 0xffff); 3826 I915_WRITE16(IER, 0x0); 3827 I915_WRITE16(IIR, I915_READ16(IIR)); 3828 } 3829 3830 static void i915_irq_preinstall(struct drm_device * dev) 3831 { 3832 struct drm_i915_private *dev_priv = dev->dev_private; 3833 int pipe; 3834 3835 if (I915_HAS_HOTPLUG(dev)) { 3836 I915_WRITE(PORT_HOTPLUG_EN, 0); 3837 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3838 } 3839 3840 I915_WRITE16(HWSTAM, 0xeffe); 3841 for_each_pipe(dev_priv, pipe) 3842 I915_WRITE(PIPESTAT(pipe), 0); 3843 I915_WRITE(IMR, 0xffffffff); 3844 I915_WRITE(IER, 0x0); 3845 POSTING_READ(IER); 3846 } 3847 3848 static int i915_irq_postinstall(struct drm_device *dev) 3849 { 3850 struct drm_i915_private *dev_priv = dev->dev_private; 3851 u32 enable_mask; 3852 3853 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3854 3855 /* Unmask the interrupts that we always want on. */ 3856 dev_priv->irq_mask = 3857 ~(I915_ASLE_INTERRUPT | 3858 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3859 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3860 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3861 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3862 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3863 3864 enable_mask = 3865 I915_ASLE_INTERRUPT | 3866 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3867 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3868 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3869 I915_USER_INTERRUPT; 3870 3871 if (I915_HAS_HOTPLUG(dev)) { 3872 I915_WRITE(PORT_HOTPLUG_EN, 0); 3873 POSTING_READ(PORT_HOTPLUG_EN); 3874 3875 /* Enable in IER... */ 3876 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3877 /* and unmask in IMR */ 3878 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3879 } 3880 3881 I915_WRITE(IMR, dev_priv->irq_mask); 3882 I915_WRITE(IER, enable_mask); 3883 POSTING_READ(IER); 3884 3885 i915_enable_asle_pipestat(dev); 3886 3887 /* Interrupt setup is already guaranteed to be single-threaded, this is 3888 * just to make the assert_spin_locked check happy. */ 3889 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3890 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3891 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3892 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3893 3894 return 0; 3895 } 3896 3897 /* 3898 * Returns true when a page flip has completed. 3899 */ 3900 static bool i915_handle_vblank(struct drm_device *dev, 3901 int plane, int pipe, u32 iir) 3902 { 3903 struct drm_i915_private *dev_priv = dev->dev_private; 3904 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3905 3906 if (!intel_pipe_handle_vblank(dev, pipe)) 3907 return false; 3908 3909 if ((iir & flip_pending) == 0) 3910 goto check_page_flip; 3911 3912 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3913 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3914 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3915 * the flip is completed (no longer pending). Since this doesn't raise 3916 * an interrupt per se, we watch for the change at vblank. 3917 */ 3918 if (I915_READ(ISR) & flip_pending) 3919 goto check_page_flip; 3920 3921 intel_prepare_page_flip(dev, plane); 3922 intel_finish_page_flip(dev, pipe); 3923 return true; 3924 3925 check_page_flip: 3926 intel_check_page_flip(dev, pipe); 3927 return false; 3928 } 3929 3930 static irqreturn_t i915_irq_handler(void *arg) 3931 { 3932 struct drm_device *dev = arg; 3933 struct drm_i915_private *dev_priv = dev->dev_private; 3934 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3935 u32 flip_mask = 3936 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3937 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3938 int pipe; 3939 3940 if (!intel_irqs_enabled(dev_priv)) 3941 return IRQ_NONE; 3942 3943 iir = I915_READ(IIR); 3944 do { 3945 bool irq_received = (iir & ~flip_mask) != 0; 3946 bool blc_event = false; 3947 3948 /* Can't rely on pipestat interrupt bit in iir as it might 3949 * have been cleared after the pipestat interrupt was received. 3950 * It doesn't set the bit in iir again, but it still produces 3951 * interrupts (for non-MSI). 3952 */ 3953 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3954 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3955 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3956 3957 for_each_pipe(dev_priv, pipe) { 3958 int reg = PIPESTAT(pipe); 3959 pipe_stats[pipe] = I915_READ(reg); 3960 3961 /* Clear the PIPE*STAT regs before the IIR */ 3962 if (pipe_stats[pipe] & 0x8000ffff) { 3963 I915_WRITE(reg, pipe_stats[pipe]); 3964 irq_received = true; 3965 } 3966 } 3967 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3968 3969 if (!irq_received) 3970 break; 3971 3972 /* Consume port. Then clear IIR or we'll miss events */ 3973 if (I915_HAS_HOTPLUG(dev) && 3974 iir & I915_DISPLAY_PORT_INTERRUPT) 3975 i9xx_hpd_irq_handler(dev); 3976 3977 I915_WRITE(IIR, iir & ~flip_mask); 3978 new_iir = I915_READ(IIR); /* Flush posted writes */ 3979 3980 if (iir & I915_USER_INTERRUPT) 3981 notify_ring(dev, &dev_priv->ring[RCS]); 3982 3983 for_each_pipe(dev_priv, pipe) { 3984 int plane = pipe; 3985 if (HAS_FBC(dev)) 3986 plane = !plane; 3987 3988 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3989 i915_handle_vblank(dev, plane, pipe, iir)) 3990 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3991 3992 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3993 blc_event = true; 3994 3995 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3996 i9xx_pipe_crc_irq_handler(dev, pipe); 3997 3998 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3999 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4000 pipe); 4001 } 4002 4003 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4004 intel_opregion_asle_intr(dev); 4005 4006 /* With MSI, interrupts are only generated when iir 4007 * transitions from zero to nonzero. If another bit got 4008 * set while we were handling the existing iir bits, then 4009 * we would never get another interrupt. 4010 * 4011 * This is fine on non-MSI as well, as if we hit this path 4012 * we avoid exiting the interrupt handler only to generate 4013 * another one. 4014 * 4015 * Note that for MSI this could cause a stray interrupt report 4016 * if an interrupt landed in the time between writing IIR and 4017 * the posting read. This should be rare enough to never 4018 * trigger the 99% of 100,000 interrupts test for disabling 4019 * stray interrupts. 4020 */ 4021 iir = new_iir; 4022 } while (iir & ~flip_mask); 4023 4024 } 4025 4026 static void i915_irq_uninstall(struct drm_device * dev) 4027 { 4028 struct drm_i915_private *dev_priv = dev->dev_private; 4029 int pipe; 4030 4031 if (I915_HAS_HOTPLUG(dev)) { 4032 I915_WRITE(PORT_HOTPLUG_EN, 0); 4033 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4034 } 4035 4036 I915_WRITE16(HWSTAM, 0xffff); 4037 for_each_pipe(dev_priv, pipe) { 4038 /* Clear enable bits; then clear status bits */ 4039 I915_WRITE(PIPESTAT(pipe), 0); 4040 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4041 } 4042 I915_WRITE(IMR, 0xffffffff); 4043 I915_WRITE(IER, 0x0); 4044 4045 I915_WRITE(IIR, I915_READ(IIR)); 4046 } 4047 4048 static void i965_irq_preinstall(struct drm_device * dev) 4049 { 4050 struct drm_i915_private *dev_priv = dev->dev_private; 4051 int pipe; 4052 4053 I915_WRITE(PORT_HOTPLUG_EN, 0); 4054 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4055 4056 I915_WRITE(HWSTAM, 0xeffe); 4057 for_each_pipe(dev_priv, pipe) 4058 I915_WRITE(PIPESTAT(pipe), 0); 4059 I915_WRITE(IMR, 0xffffffff); 4060 I915_WRITE(IER, 0x0); 4061 POSTING_READ(IER); 4062 } 4063 4064 static int i965_irq_postinstall(struct drm_device *dev) 4065 { 4066 struct drm_i915_private *dev_priv = dev->dev_private; 4067 u32 enable_mask; 4068 u32 error_mask; 4069 4070 /* Unmask the interrupts that we always want on. */ 4071 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4072 I915_DISPLAY_PORT_INTERRUPT | 4073 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4074 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4075 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4076 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4077 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4078 4079 enable_mask = ~dev_priv->irq_mask; 4080 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4081 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4082 enable_mask |= I915_USER_INTERRUPT; 4083 4084 if (IS_G4X(dev)) 4085 enable_mask |= I915_BSD_USER_INTERRUPT; 4086 4087 /* Interrupt setup is already guaranteed to be single-threaded, this is 4088 * just to make the assert_spin_locked check happy. */ 4089 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4090 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4091 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4092 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4093 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4094 4095 /* 4096 * Enable some error detection, note the instruction error mask 4097 * bit is reserved, so we leave it masked. 4098 */ 4099 if (IS_G4X(dev)) { 4100 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4101 GM45_ERROR_MEM_PRIV | 4102 GM45_ERROR_CP_PRIV | 4103 I915_ERROR_MEMORY_REFRESH); 4104 } else { 4105 error_mask = ~(I915_ERROR_PAGE_TABLE | 4106 I915_ERROR_MEMORY_REFRESH); 4107 } 4108 I915_WRITE(EMR, error_mask); 4109 4110 I915_WRITE(IMR, dev_priv->irq_mask); 4111 I915_WRITE(IER, enable_mask); 4112 POSTING_READ(IER); 4113 4114 I915_WRITE(PORT_HOTPLUG_EN, 0); 4115 POSTING_READ(PORT_HOTPLUG_EN); 4116 4117 i915_enable_asle_pipestat(dev); 4118 4119 return 0; 4120 } 4121 4122 static void i915_hpd_irq_setup(struct drm_device *dev) 4123 { 4124 struct drm_i915_private *dev_priv = dev->dev_private; 4125 struct intel_encoder *intel_encoder; 4126 u32 hotplug_en; 4127 4128 assert_spin_locked(&dev_priv->irq_lock); 4129 4130 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4131 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4132 /* Note HDMI and DP share hotplug bits */ 4133 /* enable bits are the same for all generations */ 4134 for_each_intel_encoder(dev, intel_encoder) 4135 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4136 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4137 /* Programming the CRT detection parameters tends 4138 to generate a spurious hotplug event about three 4139 seconds later. So just do it once. 4140 */ 4141 if (IS_G4X(dev)) 4142 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4143 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4144 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4145 4146 /* Ignore TV since it's buggy */ 4147 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4148 } 4149 4150 static irqreturn_t i965_irq_handler(void *arg) 4151 { 4152 struct drm_device *dev = arg; 4153 struct drm_i915_private *dev_priv = dev->dev_private; 4154 u32 iir, new_iir; 4155 u32 pipe_stats[I915_MAX_PIPES]; 4156 int pipe; 4157 u32 flip_mask = 4158 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4159 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4160 4161 if (!intel_irqs_enabled(dev_priv)) 4162 return IRQ_NONE; 4163 4164 iir = I915_READ(IIR); 4165 4166 for (;;) { 4167 bool irq_received = (iir & ~flip_mask) != 0; 4168 bool blc_event = false; 4169 4170 /* Can't rely on pipestat interrupt bit in iir as it might 4171 * have been cleared after the pipestat interrupt was received. 4172 * It doesn't set the bit in iir again, but it still produces 4173 * interrupts (for non-MSI). 4174 */ 4175 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4176 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4177 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4178 4179 for_each_pipe(dev_priv, pipe) { 4180 int reg = PIPESTAT(pipe); 4181 pipe_stats[pipe] = I915_READ(reg); 4182 4183 /* 4184 * Clear the PIPE*STAT regs before the IIR 4185 */ 4186 if (pipe_stats[pipe] & 0x8000ffff) { 4187 I915_WRITE(reg, pipe_stats[pipe]); 4188 irq_received = true; 4189 } 4190 } 4191 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4192 4193 if (!irq_received) 4194 break; 4195 4196 /* Consume port. Then clear IIR or we'll miss events */ 4197 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4198 i9xx_hpd_irq_handler(dev); 4199 4200 I915_WRITE(IIR, iir & ~flip_mask); 4201 new_iir = I915_READ(IIR); /* Flush posted writes */ 4202 4203 if (iir & I915_USER_INTERRUPT) 4204 notify_ring(dev, &dev_priv->ring[RCS]); 4205 if (iir & I915_BSD_USER_INTERRUPT) 4206 notify_ring(dev, &dev_priv->ring[VCS]); 4207 4208 for_each_pipe(dev_priv, pipe) { 4209 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4210 i915_handle_vblank(dev, pipe, pipe, iir)) 4211 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4212 4213 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4214 blc_event = true; 4215 4216 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4217 i9xx_pipe_crc_irq_handler(dev, pipe); 4218 4219 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4220 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4221 } 4222 4223 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4224 intel_opregion_asle_intr(dev); 4225 4226 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4227 gmbus_irq_handler(dev); 4228 4229 /* With MSI, interrupts are only generated when iir 4230 * transitions from zero to nonzero. If another bit got 4231 * set while we were handling the existing iir bits, then 4232 * we would never get another interrupt. 4233 * 4234 * This is fine on non-MSI as well, as if we hit this path 4235 * we avoid exiting the interrupt handler only to generate 4236 * another one. 4237 * 4238 * Note that for MSI this could cause a stray interrupt report 4239 * if an interrupt landed in the time between writing IIR and 4240 * the posting read. This should be rare enough to never 4241 * trigger the 99% of 100,000 interrupts test for disabling 4242 * stray interrupts. 4243 */ 4244 iir = new_iir; 4245 } 4246 4247 } 4248 4249 static void i965_irq_uninstall(struct drm_device * dev) 4250 { 4251 struct drm_i915_private *dev_priv = dev->dev_private; 4252 int pipe; 4253 4254 if (!dev_priv) 4255 return; 4256 4257 I915_WRITE(PORT_HOTPLUG_EN, 0); 4258 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4259 4260 I915_WRITE(HWSTAM, 0xffffffff); 4261 for_each_pipe(dev_priv, pipe) 4262 I915_WRITE(PIPESTAT(pipe), 0); 4263 I915_WRITE(IMR, 0xffffffff); 4264 I915_WRITE(IER, 0x0); 4265 4266 for_each_pipe(dev_priv, pipe) 4267 I915_WRITE(PIPESTAT(pipe), 4268 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4269 I915_WRITE(IIR, I915_READ(IIR)); 4270 } 4271 4272 static void intel_hpd_irq_reenable_work(struct work_struct *work) 4273 { 4274 struct drm_i915_private *dev_priv = 4275 container_of(work, typeof(*dev_priv), 4276 hotplug_reenable_work.work); 4277 struct drm_device *dev = dev_priv->dev; 4278 struct drm_mode_config *mode_config = &dev->mode_config; 4279 int i; 4280 4281 intel_runtime_pm_get(dev_priv); 4282 4283 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4284 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4285 struct drm_connector *connector; 4286 4287 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4288 continue; 4289 4290 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4291 4292 list_for_each_entry(connector, &mode_config->connector_list, head) { 4293 struct intel_connector *intel_connector = to_intel_connector(connector); 4294 4295 if (intel_connector->encoder->hpd_pin == i) { 4296 if (connector->polled != intel_connector->polled) 4297 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4298 connector->name); 4299 connector->polled = intel_connector->polled; 4300 if (!connector->polled) 4301 connector->polled = DRM_CONNECTOR_POLL_HPD; 4302 } 4303 } 4304 } 4305 if (dev_priv->display.hpd_irq_setup) 4306 dev_priv->display.hpd_irq_setup(dev); 4307 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4308 4309 intel_runtime_pm_put(dev_priv); 4310 } 4311 4312 /** 4313 * intel_irq_init - initializes irq support 4314 * @dev_priv: i915 device instance 4315 * 4316 * This function initializes all the irq support including work items, timers 4317 * and all the vtables. It does not setup the interrupt itself though. 4318 */ 4319 void intel_irq_init(struct drm_i915_private *dev_priv) 4320 { 4321 struct drm_device *dev = dev_priv->dev; 4322 4323 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4324 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4325 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4326 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4327 4328 /* Let's track the enabled rps events */ 4329 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 4330 /* WaGsvRC0ResidencyMethod:vlv */ 4331 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4332 else 4333 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4334 4335 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4336 i915_hangcheck_elapsed); 4337 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4338 intel_hpd_irq_reenable_work); 4339 4340 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4341 4342 if (IS_GEN2(dev_priv)) { 4343 dev->max_vblank_count = 0; 4344 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4345 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4346 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4347 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4348 } else { 4349 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4350 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4351 } 4352 4353 /* 4354 * Opt out of the vblank disable timer on everything except gen2. 4355 * Gen2 doesn't have a hardware frame counter and so depends on 4356 * vblank interrupts to produce sane vblank seuquence numbers. 4357 */ 4358 if (!IS_GEN2(dev_priv)) 4359 dev->vblank_disable_immediate = true; 4360 4361 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4362 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4363 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4364 } 4365 4366 if (IS_CHERRYVIEW(dev_priv)) { 4367 dev->driver->irq_handler = cherryview_irq_handler; 4368 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4369 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4370 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4371 dev->driver->enable_vblank = valleyview_enable_vblank; 4372 dev->driver->disable_vblank = valleyview_disable_vblank; 4373 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4374 } else if (IS_VALLEYVIEW(dev_priv)) { 4375 dev->driver->irq_handler = valleyview_irq_handler; 4376 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4377 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4378 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4379 dev->driver->enable_vblank = valleyview_enable_vblank; 4380 dev->driver->disable_vblank = valleyview_disable_vblank; 4381 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4382 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4383 dev->driver->irq_handler = gen8_irq_handler; 4384 dev->driver->irq_preinstall = gen8_irq_reset; 4385 dev->driver->irq_postinstall = gen8_irq_postinstall; 4386 dev->driver->irq_uninstall = gen8_irq_uninstall; 4387 dev->driver->enable_vblank = gen8_enable_vblank; 4388 dev->driver->disable_vblank = gen8_disable_vblank; 4389 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4390 } else if (HAS_PCH_SPLIT(dev)) { 4391 dev->driver->irq_handler = ironlake_irq_handler; 4392 dev->driver->irq_preinstall = ironlake_irq_reset; 4393 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4394 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4395 dev->driver->enable_vblank = ironlake_enable_vblank; 4396 dev->driver->disable_vblank = ironlake_disable_vblank; 4397 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4398 } else { 4399 if (INTEL_INFO(dev_priv)->gen == 2) { 4400 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4401 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4402 dev->driver->irq_handler = i8xx_irq_handler; 4403 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4404 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4405 dev->driver->irq_preinstall = i915_irq_preinstall; 4406 dev->driver->irq_postinstall = i915_irq_postinstall; 4407 dev->driver->irq_uninstall = i915_irq_uninstall; 4408 dev->driver->irq_handler = i915_irq_handler; 4409 } else { 4410 dev->driver->irq_preinstall = i965_irq_preinstall; 4411 dev->driver->irq_postinstall = i965_irq_postinstall; 4412 dev->driver->irq_uninstall = i965_irq_uninstall; 4413 dev->driver->irq_handler = i965_irq_handler; 4414 } 4415 if (I915_HAS_HOTPLUG(dev_priv)) 4416 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4417 dev->driver->enable_vblank = i915_enable_vblank; 4418 dev->driver->disable_vblank = i915_disable_vblank; 4419 } 4420 } 4421 4422 /** 4423 * intel_hpd_init - initializes and enables hpd support 4424 * @dev_priv: i915 device instance 4425 * 4426 * This function enables the hotplug support. It requires that interrupts have 4427 * already been enabled with intel_irq_init_hw(). From this point on hotplug and 4428 * poll request can run concurrently to other code, so locking rules must be 4429 * obeyed. 4430 * 4431 * This is a separate step from interrupt enabling to simplify the locking rules 4432 * in the driver load and resume code. 4433 */ 4434 void intel_hpd_init(struct drm_i915_private *dev_priv) 4435 { 4436 struct drm_device *dev = dev_priv->dev; 4437 struct drm_mode_config *mode_config = &dev->mode_config; 4438 struct drm_connector *connector; 4439 int i; 4440 4441 for (i = 1; i < HPD_NUM_PINS; i++) { 4442 dev_priv->hpd_stats[i].hpd_cnt = 0; 4443 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4444 } 4445 list_for_each_entry(connector, &mode_config->connector_list, head) { 4446 struct intel_connector *intel_connector = to_intel_connector(connector); 4447 connector->polled = intel_connector->polled; 4448 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4449 connector->polled = DRM_CONNECTOR_POLL_HPD; 4450 } 4451 4452 /* Interrupt setup is already guaranteed to be single-threaded, this is 4453 * just to make the assert_spin_locked checks happy. */ 4454 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4455 if (dev_priv->display.hpd_irq_setup) 4456 dev_priv->display.hpd_irq_setup(dev); 4457 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4458 } 4459 4460 /** 4461 * intel_irq_install - enables the hardware interrupt 4462 * @dev_priv: i915 device instance 4463 * 4464 * This function enables the hardware interrupt handling, but leaves the hotplug 4465 * handling still disabled. It is called after intel_irq_init(). 4466 * 4467 * In the driver load and resume code we need working interrupts in a few places 4468 * but don't want to deal with the hassle of concurrent probe and hotplug 4469 * workers. Hence the split into this two-stage approach. 4470 */ 4471 int intel_irq_install(struct drm_i915_private *dev_priv) 4472 { 4473 /* 4474 * We enable some interrupt sources in our postinstall hooks, so mark 4475 * interrupts as enabled _before_ actually enabling them to avoid 4476 * special cases in our ordering checks. 4477 */ 4478 dev_priv->pm.irqs_enabled = true; 4479 4480 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4481 } 4482 4483 /** 4484 * intel_irq_uninstall - finilizes all irq handling 4485 * @dev_priv: i915 device instance 4486 * 4487 * This stops interrupt and hotplug handling and unregisters and frees all 4488 * resources acquired in the init functions. 4489 */ 4490 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4491 { 4492 drm_irq_uninstall(dev_priv->dev); 4493 intel_hpd_cancel_work(dev_priv); 4494 dev_priv->pm.irqs_enabled = false; 4495 } 4496 4497 /** 4498 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4499 * @dev_priv: i915 device instance 4500 * 4501 * This function is used to disable interrupts at runtime, both in the runtime 4502 * pm and the system suspend/resume code. 4503 */ 4504 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4505 { 4506 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4507 dev_priv->pm.irqs_enabled = false; 4508 #if 0 4509 synchronize_irq(dev_priv->dev->irq); 4510 #endif 4511 } 4512 4513 /** 4514 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4515 * @dev_priv: i915 device instance 4516 * 4517 * This function is used to enable interrupts at runtime, both in the runtime 4518 * pm and the system suspend/resume code. 4519 */ 4520 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4521 { 4522 dev_priv->pm.irqs_enabled = true; 4523 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4524 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4525 } 4526