1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define KBUILD_MODNAME "i915" 30 31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 32 33 #include <linux/sysrq.h> 34 #include <linux/circ_buf.h> 35 #include <drm/drmP.h> 36 #include <drm/i915_drm.h> 37 #include "i915_drv.h" 38 #include "i915_trace.h" 39 #include "intel_drv.h" 40 41 /** 42 * DOC: interrupt handling 43 * 44 * These functions provide the basic support for enabling and disabling the 45 * interrupt handling support. There's a lot more functionality in i915_irq.c 46 * and related files, but that will be described in separate chapters. 47 */ 48 49 static const u32 hpd_ilk[HPD_NUM_PINS] = { 50 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 51 }; 52 53 static const u32 hpd_ivb[HPD_NUM_PINS] = { 54 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 55 }; 56 57 static const u32 hpd_bdw[HPD_NUM_PINS] = { 58 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 59 }; 60 61 static const u32 hpd_ibx[HPD_NUM_PINS] = { 62 [HPD_CRT] = SDE_CRT_HOTPLUG, 63 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 64 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 65 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 66 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 67 }; 68 69 static const u32 hpd_cpt[HPD_NUM_PINS] = { 70 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 71 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 72 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 73 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 74 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 75 }; 76 77 static const u32 hpd_spt[HPD_NUM_PINS] = { 78 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 79 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 80 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 81 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 82 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 83 }; 84 85 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 86 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 87 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 88 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 89 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 90 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 91 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 92 }; 93 94 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 95 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 96 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 97 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 98 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 99 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 100 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 101 }; 102 103 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 104 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 105 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 106 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 107 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 108 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 109 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 110 }; 111 112 /* BXT hpd list */ 113 static const u32 hpd_bxt[HPD_NUM_PINS] = { 114 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 115 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 116 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 117 }; 118 119 /* IIR can theoretically queue up two events. Be paranoid. */ 120 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 121 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 122 POSTING_READ(GEN8_##type##_IMR(which)); \ 123 I915_WRITE(GEN8_##type##_IER(which), 0); \ 124 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 125 POSTING_READ(GEN8_##type##_IIR(which)); \ 126 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 127 POSTING_READ(GEN8_##type##_IIR(which)); \ 128 } while (0) 129 130 #define GEN5_IRQ_RESET(type) do { \ 131 I915_WRITE(type##IMR, 0xffffffff); \ 132 POSTING_READ(type##IMR); \ 133 I915_WRITE(type##IER, 0); \ 134 I915_WRITE(type##IIR, 0xffffffff); \ 135 POSTING_READ(type##IIR); \ 136 I915_WRITE(type##IIR, 0xffffffff); \ 137 POSTING_READ(type##IIR); \ 138 } while (0) 139 140 /* 141 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 142 */ 143 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, 144 i915_reg_t reg) 145 { 146 u32 val = I915_READ(reg); 147 148 if (val == 0) 149 return; 150 151 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 152 i915_mmio_reg_offset(reg), val); 153 I915_WRITE(reg, 0xffffffff); 154 POSTING_READ(reg); 155 I915_WRITE(reg, 0xffffffff); 156 POSTING_READ(reg); 157 } 158 159 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 160 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 161 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 162 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 163 POSTING_READ(GEN8_##type##_IMR(which)); \ 164 } while (0) 165 166 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 167 gen5_assert_iir_is_zero(dev_priv, type##IIR); \ 168 I915_WRITE(type##IER, (ier_val)); \ 169 I915_WRITE(type##IMR, (imr_val)); \ 170 POSTING_READ(type##IMR); \ 171 } while (0) 172 173 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 174 175 /* For display hotplug interrupt */ 176 static inline void 177 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 178 uint32_t mask, 179 uint32_t bits) 180 { 181 uint32_t val; 182 183 assert_spin_locked(&dev_priv->irq_lock); 184 WARN_ON(bits & ~mask); 185 186 val = I915_READ(PORT_HOTPLUG_EN); 187 val &= ~mask; 188 val |= bits; 189 I915_WRITE(PORT_HOTPLUG_EN, val); 190 } 191 192 /** 193 * i915_hotplug_interrupt_update - update hotplug interrupt enable 194 * @dev_priv: driver private 195 * @mask: bits to update 196 * @bits: bits to enable 197 * NOTE: the HPD enable bits are modified both inside and outside 198 * of an interrupt context. To avoid that read-modify-write cycles 199 * interfer, these bits are protected by a spinlock. Since this 200 * function is usually not called from a context where the lock is 201 * held already, this function acquires the lock itself. A non-locking 202 * version is also available. 203 */ 204 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 205 uint32_t mask, 206 uint32_t bits) 207 { 208 spin_lock_irq(&dev_priv->irq_lock); 209 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 210 spin_unlock_irq(&dev_priv->irq_lock); 211 } 212 213 /** 214 * ilk_update_display_irq - update DEIMR 215 * @dev_priv: driver private 216 * @interrupt_mask: mask of interrupt bits to update 217 * @enabled_irq_mask: mask of interrupt bits to enable 218 */ 219 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 220 uint32_t interrupt_mask, 221 uint32_t enabled_irq_mask) 222 { 223 uint32_t new_val; 224 225 assert_spin_locked(&dev_priv->irq_lock); 226 227 WARN_ON(enabled_irq_mask & ~interrupt_mask); 228 229 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 230 return; 231 232 new_val = dev_priv->irq_mask; 233 new_val &= ~interrupt_mask; 234 new_val |= (~enabled_irq_mask & interrupt_mask); 235 236 if (new_val != dev_priv->irq_mask) { 237 dev_priv->irq_mask = new_val; 238 I915_WRITE(DEIMR, dev_priv->irq_mask); 239 POSTING_READ(DEIMR); 240 } 241 } 242 243 /** 244 * ilk_update_gt_irq - update GTIMR 245 * @dev_priv: driver private 246 * @interrupt_mask: mask of interrupt bits to update 247 * @enabled_irq_mask: mask of interrupt bits to enable 248 */ 249 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 250 uint32_t interrupt_mask, 251 uint32_t enabled_irq_mask) 252 { 253 assert_spin_locked(&dev_priv->irq_lock); 254 255 WARN_ON(enabled_irq_mask & ~interrupt_mask); 256 257 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 258 return; 259 260 dev_priv->gt_irq_mask &= ~interrupt_mask; 261 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 262 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 263 POSTING_READ(GTIMR); 264 } 265 266 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 267 { 268 ilk_update_gt_irq(dev_priv, mask, mask); 269 } 270 271 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 272 { 273 ilk_update_gt_irq(dev_priv, mask, 0); 274 } 275 276 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 277 { 278 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 279 } 280 281 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 282 { 283 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 284 } 285 286 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 287 { 288 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 289 } 290 291 /** 292 * snb_update_pm_irq - update GEN6_PMIMR 293 * @dev_priv: driver private 294 * @interrupt_mask: mask of interrupt bits to update 295 * @enabled_irq_mask: mask of interrupt bits to enable 296 */ 297 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 298 uint32_t interrupt_mask, 299 uint32_t enabled_irq_mask) 300 { 301 uint32_t new_val; 302 303 WARN_ON(enabled_irq_mask & ~interrupt_mask); 304 305 assert_spin_locked(&dev_priv->irq_lock); 306 307 new_val = dev_priv->pm_irq_mask; 308 new_val &= ~interrupt_mask; 309 new_val |= (~enabled_irq_mask & interrupt_mask); 310 311 if (new_val != dev_priv->pm_irq_mask) { 312 dev_priv->pm_irq_mask = new_val; 313 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); 314 POSTING_READ(gen6_pm_imr(dev_priv)); 315 } 316 } 317 318 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 319 { 320 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 321 return; 322 323 snb_update_pm_irq(dev_priv, mask, mask); 324 } 325 326 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, 327 uint32_t mask) 328 { 329 snb_update_pm_irq(dev_priv, mask, 0); 330 } 331 332 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 333 { 334 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 335 return; 336 337 __gen6_disable_pm_irq(dev_priv, mask); 338 } 339 340 void gen6_reset_rps_interrupts(struct drm_device *dev) 341 { 342 struct drm_i915_private *dev_priv = dev->dev_private; 343 i915_reg_t reg = gen6_pm_iir(dev_priv); 344 345 spin_lock_irq(&dev_priv->irq_lock); 346 I915_WRITE(reg, dev_priv->pm_rps_events); 347 I915_WRITE(reg, dev_priv->pm_rps_events); 348 POSTING_READ(reg); 349 dev_priv->rps.pm_iir = 0; 350 spin_unlock_irq(&dev_priv->irq_lock); 351 } 352 353 void gen6_enable_rps_interrupts(struct drm_device *dev) 354 { 355 struct drm_i915_private *dev_priv = dev->dev_private; 356 357 spin_lock_irq(&dev_priv->irq_lock); 358 359 WARN_ON(dev_priv->rps.pm_iir); 360 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 361 dev_priv->rps.interrupts_enabled = true; 362 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 363 dev_priv->pm_rps_events); 364 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 365 366 spin_unlock_irq(&dev_priv->irq_lock); 367 } 368 369 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 370 { 371 /* 372 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer 373 * if GEN6_PM_UP_EI_EXPIRED is masked. 374 * 375 * TODO: verify if this can be reproduced on VLV,CHV. 376 */ 377 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) 378 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; 379 380 if (INTEL_INFO(dev_priv)->gen >= 8) 381 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; 382 383 return mask; 384 } 385 386 void gen6_disable_rps_interrupts(struct drm_device *dev) 387 { 388 struct drm_i915_private *dev_priv = dev->dev_private; 389 390 spin_lock_irq(&dev_priv->irq_lock); 391 dev_priv->rps.interrupts_enabled = false; 392 spin_unlock_irq(&dev_priv->irq_lock); 393 394 cancel_work_sync(&dev_priv->rps.work); 395 396 spin_lock_irq(&dev_priv->irq_lock); 397 398 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 399 400 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 401 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & 402 ~dev_priv->pm_rps_events); 403 404 spin_unlock_irq(&dev_priv->irq_lock); 405 406 synchronize_irq(dev->irq); 407 } 408 409 /** 410 * bdw_update_port_irq - update DE port interrupt 411 * @dev_priv: driver private 412 * @interrupt_mask: mask of interrupt bits to update 413 * @enabled_irq_mask: mask of interrupt bits to enable 414 */ 415 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 416 uint32_t interrupt_mask, 417 uint32_t enabled_irq_mask) 418 { 419 uint32_t new_val; 420 uint32_t old_val; 421 422 assert_spin_locked(&dev_priv->irq_lock); 423 424 WARN_ON(enabled_irq_mask & ~interrupt_mask); 425 426 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 427 return; 428 429 old_val = I915_READ(GEN8_DE_PORT_IMR); 430 431 new_val = old_val; 432 new_val &= ~interrupt_mask; 433 new_val |= (~enabled_irq_mask & interrupt_mask); 434 435 if (new_val != old_val) { 436 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 437 POSTING_READ(GEN8_DE_PORT_IMR); 438 } 439 } 440 441 /** 442 * bdw_update_pipe_irq - update DE pipe interrupt 443 * @dev_priv: driver private 444 * @pipe: pipe whose interrupt to update 445 * @interrupt_mask: mask of interrupt bits to update 446 * @enabled_irq_mask: mask of interrupt bits to enable 447 */ 448 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 449 enum i915_pipe pipe, 450 uint32_t interrupt_mask, 451 uint32_t enabled_irq_mask) 452 { 453 uint32_t new_val; 454 455 assert_spin_locked(&dev_priv->irq_lock); 456 457 WARN_ON(enabled_irq_mask & ~interrupt_mask); 458 459 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 460 return; 461 462 new_val = dev_priv->de_irq_mask[pipe]; 463 new_val &= ~interrupt_mask; 464 new_val |= (~enabled_irq_mask & interrupt_mask); 465 466 if (new_val != dev_priv->de_irq_mask[pipe]) { 467 dev_priv->de_irq_mask[pipe] = new_val; 468 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 469 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 470 } 471 } 472 473 /** 474 * ibx_display_interrupt_update - update SDEIMR 475 * @dev_priv: driver private 476 * @interrupt_mask: mask of interrupt bits to update 477 * @enabled_irq_mask: mask of interrupt bits to enable 478 */ 479 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 480 uint32_t interrupt_mask, 481 uint32_t enabled_irq_mask) 482 { 483 uint32_t sdeimr = I915_READ(SDEIMR); 484 sdeimr &= ~interrupt_mask; 485 sdeimr |= (~enabled_irq_mask & interrupt_mask); 486 487 WARN_ON(enabled_irq_mask & ~interrupt_mask); 488 489 assert_spin_locked(&dev_priv->irq_lock); 490 491 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 492 return; 493 494 I915_WRITE(SDEIMR, sdeimr); 495 POSTING_READ(SDEIMR); 496 } 497 498 static void 499 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 500 u32 enable_mask, u32 status_mask) 501 { 502 i915_reg_t reg = PIPESTAT(pipe); 503 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 504 505 assert_spin_locked(&dev_priv->irq_lock); 506 WARN_ON(!intel_irqs_enabled(dev_priv)); 507 508 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 509 status_mask & ~PIPESTAT_INT_STATUS_MASK, 510 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 511 pipe_name(pipe), enable_mask, status_mask)) 512 return; 513 514 if ((pipestat & enable_mask) == enable_mask) 515 return; 516 517 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 518 519 /* Enable the interrupt, clear any pending status */ 520 pipestat |= enable_mask | status_mask; 521 I915_WRITE(reg, pipestat); 522 POSTING_READ(reg); 523 } 524 525 static void 526 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 527 u32 enable_mask, u32 status_mask) 528 { 529 i915_reg_t reg = PIPESTAT(pipe); 530 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 531 532 assert_spin_locked(&dev_priv->irq_lock); 533 WARN_ON(!intel_irqs_enabled(dev_priv)); 534 535 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 536 status_mask & ~PIPESTAT_INT_STATUS_MASK, 537 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 538 pipe_name(pipe), enable_mask, status_mask)) 539 return; 540 541 if ((pipestat & enable_mask) == 0) 542 return; 543 544 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 545 546 pipestat &= ~enable_mask; 547 I915_WRITE(reg, pipestat); 548 POSTING_READ(reg); 549 } 550 551 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 552 { 553 u32 enable_mask = status_mask << 16; 554 555 /* 556 * On pipe A we don't support the PSR interrupt yet, 557 * on pipe B and C the same bit MBZ. 558 */ 559 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 560 return 0; 561 /* 562 * On pipe B and C we don't support the PSR interrupt yet, on pipe 563 * A the same bit is for perf counters which we don't use either. 564 */ 565 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 566 return 0; 567 568 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 569 SPRITE0_FLIP_DONE_INT_EN_VLV | 570 SPRITE1_FLIP_DONE_INT_EN_VLV); 571 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 572 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 573 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 574 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 575 576 return enable_mask; 577 } 578 579 void 580 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 581 u32 status_mask) 582 { 583 u32 enable_mask; 584 585 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 586 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 587 status_mask); 588 else 589 enable_mask = status_mask << 16; 590 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 591 } 592 593 void 594 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 595 u32 status_mask) 596 { 597 u32 enable_mask; 598 599 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 600 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 601 status_mask); 602 else 603 enable_mask = status_mask << 16; 604 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 605 } 606 607 /** 608 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 609 * @dev: drm device 610 */ 611 static void i915_enable_asle_pipestat(struct drm_device *dev) 612 { 613 struct drm_i915_private *dev_priv = dev->dev_private; 614 615 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 616 return; 617 618 spin_lock_irq(&dev_priv->irq_lock); 619 620 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 621 if (INTEL_INFO(dev)->gen >= 4) 622 i915_enable_pipestat(dev_priv, PIPE_A, 623 PIPE_LEGACY_BLC_EVENT_STATUS); 624 625 spin_unlock_irq(&dev_priv->irq_lock); 626 } 627 628 /* 629 * This timing diagram depicts the video signal in and 630 * around the vertical blanking period. 631 * 632 * Assumptions about the fictitious mode used in this example: 633 * vblank_start >= 3 634 * vsync_start = vblank_start + 1 635 * vsync_end = vblank_start + 2 636 * vtotal = vblank_start + 3 637 * 638 * start of vblank: 639 * latch double buffered registers 640 * increment frame counter (ctg+) 641 * generate start of vblank interrupt (gen4+) 642 * | 643 * | frame start: 644 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 645 * | may be shifted forward 1-3 extra lines via PIPECONF 646 * | | 647 * | | start of vsync: 648 * | | generate vsync interrupt 649 * | | | 650 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 651 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 652 * ----va---> <-----------------vb--------------------> <--------va------------- 653 * | | <----vs-----> | 654 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 655 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 656 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 657 * | | | 658 * last visible pixel first visible pixel 659 * | increment frame counter (gen3/4) 660 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 661 * 662 * x = horizontal active 663 * _ = horizontal blanking 664 * hs = horizontal sync 665 * va = vertical active 666 * vb = vertical blanking 667 * vs = vertical sync 668 * vbs = vblank_start (number) 669 * 670 * Summary: 671 * - most events happen at the start of horizontal sync 672 * - frame start happens at the start of horizontal blank, 1-4 lines 673 * (depending on PIPECONF settings) after the start of vblank 674 * - gen3/4 pixel and frame counter are synchronized with the start 675 * of horizontal active on the first line of vertical active 676 */ 677 678 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 679 { 680 /* Gen2 doesn't have a hardware frame counter */ 681 return 0; 682 } 683 684 /* Called from drm generic code, passed a 'crtc', which 685 * we use as a pipe index 686 */ 687 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 688 { 689 struct drm_i915_private *dev_priv = dev->dev_private; 690 i915_reg_t high_frame, low_frame; 691 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 692 struct intel_crtc *intel_crtc = 693 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 694 const struct drm_display_mode *mode = &intel_crtc->base.hwmode; 695 696 htotal = mode->crtc_htotal; 697 hsync_start = mode->crtc_hsync_start; 698 vbl_start = mode->crtc_vblank_start; 699 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 700 vbl_start = DIV_ROUND_UP(vbl_start, 2); 701 702 /* Convert to pixel count */ 703 vbl_start *= htotal; 704 705 /* Start of vblank event occurs at start of hsync */ 706 vbl_start -= htotal - hsync_start; 707 708 high_frame = PIPEFRAME(pipe); 709 low_frame = PIPEFRAMEPIXEL(pipe); 710 711 /* 712 * High & low register fields aren't synchronized, so make sure 713 * we get a low value that's stable across two reads of the high 714 * register. 715 */ 716 do { 717 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 718 low = I915_READ(low_frame); 719 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 720 } while (high1 != high2); 721 722 high1 >>= PIPE_FRAME_HIGH_SHIFT; 723 pixel = low & PIPE_PIXEL_MASK; 724 low >>= PIPE_FRAME_LOW_SHIFT; 725 726 /* 727 * The frame counter increments at beginning of active. 728 * Cook up a vblank counter by also checking the pixel 729 * counter against vblank start. 730 */ 731 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 732 } 733 734 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 735 { 736 struct drm_i915_private *dev_priv = dev->dev_private; 737 738 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 739 } 740 741 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 742 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 743 { 744 struct drm_device *dev = crtc->base.dev; 745 struct drm_i915_private *dev_priv = dev->dev_private; 746 const struct drm_display_mode *mode = &crtc->base.hwmode; 747 enum i915_pipe pipe = crtc->pipe; 748 int position, vtotal; 749 750 vtotal = mode->crtc_vtotal; 751 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 752 vtotal /= 2; 753 754 if (IS_GEN2(dev)) 755 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 756 else 757 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 758 759 /* 760 * On HSW, the DSL reg (0x70000) appears to return 0 if we 761 * read it just before the start of vblank. So try it again 762 * so we don't accidentally end up spanning a vblank frame 763 * increment, causing the pipe_update_end() code to squak at us. 764 * 765 * The nature of this problem means we can't simply check the ISR 766 * bit and return the vblank start value; nor can we use the scanline 767 * debug register in the transcoder as it appears to have the same 768 * problem. We may need to extend this to include other platforms, 769 * but so far testing only shows the problem on HSW. 770 */ 771 if (HAS_DDI(dev) && !position) { 772 int i, temp; 773 774 for (i = 0; i < 100; i++) { 775 udelay(1); 776 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & 777 DSL_LINEMASK_GEN3; 778 if (temp != position) { 779 position = temp; 780 break; 781 } 782 } 783 } 784 785 /* 786 * See update_scanline_offset() for the details on the 787 * scanline_offset adjustment. 788 */ 789 return (position + crtc->scanline_offset) % vtotal; 790 } 791 792 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 793 unsigned int flags, int *vpos, int *hpos, 794 ktime_t *stime, ktime_t *etime, 795 const struct drm_display_mode *mode) 796 { 797 struct drm_i915_private *dev_priv = dev->dev_private; 798 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 799 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 800 int position; 801 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 802 bool in_vbl = true; 803 int ret = 0; 804 unsigned long irqflags; 805 806 if (WARN_ON(!mode->crtc_clock)) { 807 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 808 "pipe %c\n", pipe_name(pipe)); 809 return 0; 810 } 811 812 htotal = mode->crtc_htotal; 813 hsync_start = mode->crtc_hsync_start; 814 vtotal = mode->crtc_vtotal; 815 vbl_start = mode->crtc_vblank_start; 816 vbl_end = mode->crtc_vblank_end; 817 818 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 819 vbl_start = DIV_ROUND_UP(vbl_start, 2); 820 vbl_end /= 2; 821 vtotal /= 2; 822 } 823 824 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 825 826 /* 827 * Lock uncore.lock, as we will do multiple timing critical raw 828 * register reads, potentially with preemption disabled, so the 829 * following code must not block on uncore.lock. 830 */ 831 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 832 833 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 834 835 /* Get optional system timestamp before query. */ 836 if (stime) 837 *stime = ktime_get(); 838 839 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 840 /* No obvious pixelcount register. Only query vertical 841 * scanout position from Display scan line register. 842 */ 843 position = __intel_get_crtc_scanline(intel_crtc); 844 } else { 845 /* Have access to pixelcount since start of frame. 846 * We can split this into vertical and horizontal 847 * scanout position. 848 */ 849 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 850 851 /* convert to pixel counts */ 852 vbl_start *= htotal; 853 vbl_end *= htotal; 854 vtotal *= htotal; 855 856 /* 857 * In interlaced modes, the pixel counter counts all pixels, 858 * so one field will have htotal more pixels. In order to avoid 859 * the reported position from jumping backwards when the pixel 860 * counter is beyond the length of the shorter field, just 861 * clamp the position the length of the shorter field. This 862 * matches how the scanline counter based position works since 863 * the scanline counter doesn't count the two half lines. 864 */ 865 if (position >= vtotal) 866 position = vtotal - 1; 867 868 /* 869 * Start of vblank interrupt is triggered at start of hsync, 870 * just prior to the first active line of vblank. However we 871 * consider lines to start at the leading edge of horizontal 872 * active. So, should we get here before we've crossed into 873 * the horizontal active of the first line in vblank, we would 874 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 875 * always add htotal-hsync_start to the current pixel position. 876 */ 877 position = (position + htotal - hsync_start) % vtotal; 878 } 879 880 /* Get optional system timestamp after query. */ 881 if (etime) 882 *etime = ktime_get(); 883 884 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 885 886 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 887 888 in_vbl = position >= vbl_start && position < vbl_end; 889 890 /* 891 * While in vblank, position will be negative 892 * counting up towards 0 at vbl_end. And outside 893 * vblank, position will be positive counting 894 * up since vbl_end. 895 */ 896 if (position >= vbl_start) 897 position -= vbl_end; 898 else 899 position += vtotal - vbl_end; 900 901 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 902 *vpos = position; 903 *hpos = 0; 904 } else { 905 *vpos = position / htotal; 906 *hpos = position - (*vpos * htotal); 907 } 908 909 /* In vblank? */ 910 if (in_vbl) 911 ret |= DRM_SCANOUTPOS_IN_VBLANK; 912 913 return ret; 914 } 915 916 int intel_get_crtc_scanline(struct intel_crtc *crtc) 917 { 918 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 919 unsigned long irqflags; 920 int position; 921 922 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 923 position = __intel_get_crtc_scanline(crtc); 924 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 925 926 return position; 927 } 928 929 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, 930 int *max_error, 931 struct timeval *vblank_time, 932 unsigned flags) 933 { 934 struct drm_crtc *crtc; 935 936 if (pipe >= INTEL_INFO(dev)->num_pipes) { 937 DRM_ERROR("Invalid crtc %u\n", pipe); 938 return -EINVAL; 939 } 940 941 /* Get drm_crtc to timestamp: */ 942 crtc = intel_get_crtc_for_pipe(dev, pipe); 943 if (crtc == NULL) { 944 DRM_ERROR("Invalid crtc %u\n", pipe); 945 return -EINVAL; 946 } 947 948 if (!crtc->hwmode.crtc_clock) { 949 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe); 950 return -EBUSY; 951 } 952 953 /* Helper routine in DRM core does all the work: */ 954 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 955 vblank_time, flags, 956 &crtc->hwmode); 957 } 958 959 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 960 { 961 struct drm_i915_private *dev_priv = dev->dev_private; 962 u32 busy_up, busy_down, max_avg, min_avg; 963 u8 new_delay; 964 965 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 966 967 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 968 969 new_delay = dev_priv->ips.cur_delay; 970 971 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 972 busy_up = I915_READ(RCPREVBSYTUPAVG); 973 busy_down = I915_READ(RCPREVBSYTDNAVG); 974 max_avg = I915_READ(RCBMAXAVG); 975 min_avg = I915_READ(RCBMINAVG); 976 977 /* Handle RCS change request from hw */ 978 if (busy_up > max_avg) { 979 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 980 new_delay = dev_priv->ips.cur_delay - 1; 981 if (new_delay < dev_priv->ips.max_delay) 982 new_delay = dev_priv->ips.max_delay; 983 } else if (busy_down < min_avg) { 984 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 985 new_delay = dev_priv->ips.cur_delay + 1; 986 if (new_delay > dev_priv->ips.min_delay) 987 new_delay = dev_priv->ips.min_delay; 988 } 989 990 if (ironlake_set_drps(dev, new_delay)) 991 dev_priv->ips.cur_delay = new_delay; 992 993 lockmgr(&mchdev_lock, LK_RELEASE); 994 995 return; 996 } 997 998 static void notify_ring(struct intel_engine_cs *engine) 999 { 1000 if (!intel_engine_initialized(engine)) 1001 return; 1002 1003 trace_i915_gem_request_notify(engine); 1004 engine->user_interrupts++; 1005 1006 wake_up_all(&engine->irq_queue); 1007 } 1008 1009 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1010 struct intel_rps_ei *ei) 1011 { 1012 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); 1013 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1014 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1015 } 1016 1017 static bool vlv_c0_above(struct drm_i915_private *dev_priv, 1018 const struct intel_rps_ei *old, 1019 const struct intel_rps_ei *now, 1020 int threshold) 1021 { 1022 u64 time, c0; 1023 unsigned int mul = 100; 1024 1025 if (old->cz_clock == 0) 1026 return false; 1027 1028 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1029 mul <<= 8; 1030 1031 time = now->cz_clock - old->cz_clock; 1032 time *= threshold * dev_priv->czclk_freq; 1033 1034 /* Workload can be split between render + media, e.g. SwapBuffers 1035 * being blitted in X after being rendered in mesa. To account for 1036 * this we need to combine both engines into our activity counter. 1037 */ 1038 c0 = now->render_c0 - old->render_c0; 1039 c0 += now->media_c0 - old->media_c0; 1040 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1041 1042 return c0 >= time; 1043 } 1044 1045 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1046 { 1047 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); 1048 dev_priv->rps.up_ei = dev_priv->rps.down_ei; 1049 } 1050 1051 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1052 { 1053 struct intel_rps_ei now; 1054 u32 events = 0; 1055 1056 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) 1057 return 0; 1058 1059 vlv_c0_read(dev_priv, &now); 1060 if (now.cz_clock == 0) 1061 return 0; 1062 1063 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { 1064 if (!vlv_c0_above(dev_priv, 1065 &dev_priv->rps.down_ei, &now, 1066 dev_priv->rps.down_threshold)) 1067 events |= GEN6_PM_RP_DOWN_THRESHOLD; 1068 dev_priv->rps.down_ei = now; 1069 } 1070 1071 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { 1072 if (vlv_c0_above(dev_priv, 1073 &dev_priv->rps.up_ei, &now, 1074 dev_priv->rps.up_threshold)) 1075 events |= GEN6_PM_RP_UP_THRESHOLD; 1076 dev_priv->rps.up_ei = now; 1077 } 1078 1079 return events; 1080 } 1081 1082 static bool any_waiters(struct drm_i915_private *dev_priv) 1083 { 1084 struct intel_engine_cs *engine; 1085 1086 for_each_engine(engine, dev_priv) 1087 if (engine->irq_refcount) 1088 return true; 1089 1090 return false; 1091 } 1092 1093 static void gen6_pm_rps_work(struct work_struct *work) 1094 { 1095 struct drm_i915_private *dev_priv = 1096 container_of(work, struct drm_i915_private, rps.work); 1097 bool client_boost; 1098 int new_delay, adj, min, max; 1099 u32 pm_iir; 1100 1101 spin_lock_irq(&dev_priv->irq_lock); 1102 /* Speed up work cancelation during disabling rps interrupts. */ 1103 if (!dev_priv->rps.interrupts_enabled) { 1104 spin_unlock_irq(&dev_priv->irq_lock); 1105 return; 1106 } 1107 1108 /* 1109 * The RPS work is synced during runtime suspend, we don't require a 1110 * wakeref. TODO: instead of disabling the asserts make sure that we 1111 * always hold an RPM reference while the work is running. 1112 */ 1113 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1114 1115 pm_iir = dev_priv->rps.pm_iir; 1116 dev_priv->rps.pm_iir = 0; 1117 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1118 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1119 client_boost = dev_priv->rps.client_boost; 1120 dev_priv->rps.client_boost = false; 1121 spin_unlock_irq(&dev_priv->irq_lock); 1122 1123 /* Make sure we didn't queue anything we're not going to process. */ 1124 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1125 1126 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1127 goto out; 1128 1129 mutex_lock(&dev_priv->rps.hw_lock); 1130 1131 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1132 1133 adj = dev_priv->rps.last_adj; 1134 new_delay = dev_priv->rps.cur_freq; 1135 min = dev_priv->rps.min_freq_softlimit; 1136 max = dev_priv->rps.max_freq_softlimit; 1137 1138 if (client_boost) { 1139 new_delay = dev_priv->rps.max_freq_softlimit; 1140 adj = 0; 1141 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1142 if (adj > 0) 1143 adj *= 2; 1144 else /* CHV needs even encode values */ 1145 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1146 /* 1147 * For better performance, jump directly 1148 * to RPe if we're below it. 1149 */ 1150 if (new_delay < dev_priv->rps.efficient_freq - adj) { 1151 new_delay = dev_priv->rps.efficient_freq; 1152 adj = 0; 1153 } 1154 } else if (any_waiters(dev_priv)) { 1155 adj = 0; 1156 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1157 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1158 new_delay = dev_priv->rps.efficient_freq; 1159 else 1160 new_delay = dev_priv->rps.min_freq_softlimit; 1161 adj = 0; 1162 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1163 if (adj < 0) 1164 adj *= 2; 1165 else /* CHV needs even encode values */ 1166 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1167 } else { /* unknown event */ 1168 adj = 0; 1169 } 1170 1171 dev_priv->rps.last_adj = adj; 1172 1173 /* sysfs frequency interfaces may have snuck in while servicing the 1174 * interrupt 1175 */ 1176 new_delay += adj; 1177 new_delay = clamp_t(int, new_delay, min, max); 1178 1179 intel_set_rps(dev_priv->dev, new_delay); 1180 1181 mutex_unlock(&dev_priv->rps.hw_lock); 1182 out: 1183 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 1184 } 1185 1186 1187 /** 1188 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1189 * occurred. 1190 * @work: workqueue struct 1191 * 1192 * Doesn't actually do anything except notify userspace. As a consequence of 1193 * this event, userspace should try to remap the bad rows since statistically 1194 * it is likely the same row is more likely to go bad again. 1195 */ 1196 static void ivybridge_parity_work(struct work_struct *work) 1197 { 1198 struct drm_i915_private *dev_priv = 1199 container_of(work, struct drm_i915_private, l3_parity.error_work); 1200 u32 error_status, row, bank, subbank; 1201 char *parity_event[6]; 1202 uint32_t misccpctl; 1203 uint8_t slice = 0; 1204 1205 /* We must turn off DOP level clock gating to access the L3 registers. 1206 * In order to prevent a get/put style interface, acquire struct mutex 1207 * any time we access those registers. 1208 */ 1209 mutex_lock(&dev_priv->dev->struct_mutex); 1210 1211 /* If we've screwed up tracking, just let the interrupt fire again */ 1212 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1213 goto out; 1214 1215 misccpctl = I915_READ(GEN7_MISCCPCTL); 1216 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1217 POSTING_READ(GEN7_MISCCPCTL); 1218 1219 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1220 i915_reg_t reg; 1221 1222 slice--; 1223 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1224 break; 1225 1226 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1227 1228 reg = GEN7_L3CDERRST1(slice); 1229 1230 error_status = I915_READ(reg); 1231 row = GEN7_PARITY_ERROR_ROW(error_status); 1232 bank = GEN7_PARITY_ERROR_BANK(error_status); 1233 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1234 1235 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1236 POSTING_READ(reg); 1237 1238 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1239 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1240 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1241 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1242 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1243 parity_event[5] = NULL; 1244 1245 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1246 KOBJ_CHANGE, parity_event); 1247 1248 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1249 slice, row, bank, subbank); 1250 1251 kfree(parity_event[4]); 1252 kfree(parity_event[3]); 1253 kfree(parity_event[2]); 1254 kfree(parity_event[1]); 1255 } 1256 1257 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1258 1259 out: 1260 WARN_ON(dev_priv->l3_parity.which_slice); 1261 spin_lock_irq(&dev_priv->irq_lock); 1262 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1263 spin_unlock_irq(&dev_priv->irq_lock); 1264 1265 mutex_unlock(&dev_priv->dev->struct_mutex); 1266 } 1267 1268 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1269 u32 iir) 1270 { 1271 if (!HAS_L3_DPF(dev_priv)) 1272 return; 1273 1274 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1275 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1276 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1277 1278 iir &= GT_PARITY_ERROR(dev_priv); 1279 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1280 dev_priv->l3_parity.which_slice |= 1 << 1; 1281 1282 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1283 dev_priv->l3_parity.which_slice |= 1 << 0; 1284 1285 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1286 } 1287 1288 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1289 u32 gt_iir) 1290 { 1291 if (gt_iir & 1292 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1293 notify_ring(&dev_priv->engine[RCS]); 1294 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1295 notify_ring(&dev_priv->engine[VCS]); 1296 } 1297 1298 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1299 u32 gt_iir) 1300 { 1301 1302 if (gt_iir & 1303 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1304 notify_ring(&dev_priv->engine[RCS]); 1305 if (gt_iir & GT_BSD_USER_INTERRUPT) 1306 notify_ring(&dev_priv->engine[VCS]); 1307 if (gt_iir & GT_BLT_USER_INTERRUPT) 1308 notify_ring(&dev_priv->engine[BCS]); 1309 1310 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1311 GT_BSD_CS_ERROR_INTERRUPT | 1312 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1313 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1314 1315 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1316 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1317 } 1318 1319 static __always_inline void 1320 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1321 { 1322 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) 1323 notify_ring(engine); 1324 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) 1325 tasklet_schedule(&engine->irq_tasklet); 1326 } 1327 1328 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1329 u32 master_ctl, 1330 u32 gt_iir[4]) 1331 { 1332 irqreturn_t ret = IRQ_NONE; 1333 1334 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1335 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1336 if (gt_iir[0]) { 1337 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1338 ret = IRQ_HANDLED; 1339 } else 1340 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1341 } 1342 1343 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1344 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1345 if (gt_iir[1]) { 1346 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1347 ret = IRQ_HANDLED; 1348 } else 1349 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1350 } 1351 1352 if (master_ctl & GEN8_GT_VECS_IRQ) { 1353 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1354 if (gt_iir[3]) { 1355 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1356 ret = IRQ_HANDLED; 1357 } else 1358 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1359 } 1360 1361 if (master_ctl & GEN8_GT_PM_IRQ) { 1362 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1363 if (gt_iir[2] & dev_priv->pm_rps_events) { 1364 I915_WRITE_FW(GEN8_GT_IIR(2), 1365 gt_iir[2] & dev_priv->pm_rps_events); 1366 ret = IRQ_HANDLED; 1367 } else 1368 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1369 } 1370 1371 return ret; 1372 } 1373 1374 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1375 u32 gt_iir[4]) 1376 { 1377 if (gt_iir[0]) { 1378 gen8_cs_irq_handler(&dev_priv->engine[RCS], 1379 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1380 gen8_cs_irq_handler(&dev_priv->engine[BCS], 1381 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1382 } 1383 1384 if (gt_iir[1]) { 1385 gen8_cs_irq_handler(&dev_priv->engine[VCS], 1386 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1387 gen8_cs_irq_handler(&dev_priv->engine[VCS2], 1388 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1389 } 1390 1391 if (gt_iir[3]) 1392 gen8_cs_irq_handler(&dev_priv->engine[VECS], 1393 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1394 1395 if (gt_iir[2] & dev_priv->pm_rps_events) 1396 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1397 } 1398 1399 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1400 { 1401 switch (port) { 1402 case PORT_A: 1403 return val & PORTA_HOTPLUG_LONG_DETECT; 1404 case PORT_B: 1405 return val & PORTB_HOTPLUG_LONG_DETECT; 1406 case PORT_C: 1407 return val & PORTC_HOTPLUG_LONG_DETECT; 1408 default: 1409 return false; 1410 } 1411 } 1412 1413 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1414 { 1415 switch (port) { 1416 case PORT_E: 1417 return val & PORTE_HOTPLUG_LONG_DETECT; 1418 default: 1419 return false; 1420 } 1421 } 1422 1423 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1424 { 1425 switch (port) { 1426 case PORT_A: 1427 return val & PORTA_HOTPLUG_LONG_DETECT; 1428 case PORT_B: 1429 return val & PORTB_HOTPLUG_LONG_DETECT; 1430 case PORT_C: 1431 return val & PORTC_HOTPLUG_LONG_DETECT; 1432 case PORT_D: 1433 return val & PORTD_HOTPLUG_LONG_DETECT; 1434 default: 1435 return false; 1436 } 1437 } 1438 1439 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1440 { 1441 switch (port) { 1442 case PORT_A: 1443 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1444 default: 1445 return false; 1446 } 1447 } 1448 1449 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1450 { 1451 switch (port) { 1452 case PORT_B: 1453 return val & PORTB_HOTPLUG_LONG_DETECT; 1454 case PORT_C: 1455 return val & PORTC_HOTPLUG_LONG_DETECT; 1456 case PORT_D: 1457 return val & PORTD_HOTPLUG_LONG_DETECT; 1458 default: 1459 return false; 1460 } 1461 } 1462 1463 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1464 { 1465 switch (port) { 1466 case PORT_B: 1467 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1468 case PORT_C: 1469 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1470 case PORT_D: 1471 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1472 default: 1473 return false; 1474 } 1475 } 1476 1477 /* 1478 * Get a bit mask of pins that have triggered, and which ones may be long. 1479 * This can be called multiple times with the same masks to accumulate 1480 * hotplug detection results from several registers. 1481 * 1482 * Note that the caller is expected to zero out the masks initially. 1483 */ 1484 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1485 u32 hotplug_trigger, u32 dig_hotplug_reg, 1486 const u32 hpd[HPD_NUM_PINS], 1487 bool long_pulse_detect(enum port port, u32 val)) 1488 { 1489 enum port port; 1490 int i; 1491 1492 for_each_hpd_pin(i) { 1493 if ((hpd[i] & hotplug_trigger) == 0) 1494 continue; 1495 1496 *pin_mask |= BIT(i); 1497 1498 if (!intel_hpd_pin_to_port(i, &port)) 1499 continue; 1500 1501 if (long_pulse_detect(port, dig_hotplug_reg)) 1502 *long_mask |= BIT(i); 1503 } 1504 1505 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1506 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1507 1508 } 1509 1510 static void gmbus_irq_handler(struct drm_device *dev) 1511 { 1512 struct drm_i915_private *dev_priv = dev->dev_private; 1513 1514 wake_up_all(&dev_priv->gmbus_wait_queue); 1515 } 1516 1517 static void dp_aux_irq_handler(struct drm_device *dev) 1518 { 1519 struct drm_i915_private *dev_priv = dev->dev_private; 1520 1521 wake_up_all(&dev_priv->gmbus_wait_queue); 1522 } 1523 1524 #if defined(CONFIG_DEBUG_FS) 1525 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1526 uint32_t crc0, uint32_t crc1, 1527 uint32_t crc2, uint32_t crc3, 1528 uint32_t crc4) 1529 { 1530 struct drm_i915_private *dev_priv = dev->dev_private; 1531 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1532 struct intel_pipe_crc_entry *entry; 1533 int head, tail; 1534 1535 spin_lock(&pipe_crc->lock); 1536 1537 if (!pipe_crc->entries) { 1538 spin_unlock(&pipe_crc->lock); 1539 DRM_DEBUG_KMS("spurious interrupt\n"); 1540 return; 1541 } 1542 1543 head = pipe_crc->head; 1544 tail = pipe_crc->tail; 1545 1546 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1547 spin_unlock(&pipe_crc->lock); 1548 DRM_ERROR("CRC buffer overflowing\n"); 1549 return; 1550 } 1551 1552 entry = &pipe_crc->entries[head]; 1553 1554 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1555 entry->crc[0] = crc0; 1556 entry->crc[1] = crc1; 1557 entry->crc[2] = crc2; 1558 entry->crc[3] = crc3; 1559 entry->crc[4] = crc4; 1560 1561 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1562 pipe_crc->head = head; 1563 1564 spin_unlock(&pipe_crc->lock); 1565 1566 wake_up_interruptible(&pipe_crc->wq); 1567 } 1568 #else 1569 static inline void 1570 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1571 uint32_t crc0, uint32_t crc1, 1572 uint32_t crc2, uint32_t crc3, 1573 uint32_t crc4) {} 1574 #endif 1575 1576 1577 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1578 { 1579 struct drm_i915_private *dev_priv = dev->dev_private; 1580 1581 display_pipe_crc_irq_handler(dev, pipe, 1582 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1583 0, 0, 0, 0); 1584 } 1585 1586 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1587 { 1588 struct drm_i915_private *dev_priv = dev->dev_private; 1589 1590 display_pipe_crc_irq_handler(dev, pipe, 1591 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1592 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1593 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1594 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1595 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1596 } 1597 1598 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1599 { 1600 struct drm_i915_private *dev_priv = dev->dev_private; 1601 uint32_t res1, res2; 1602 1603 if (INTEL_INFO(dev)->gen >= 3) 1604 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1605 else 1606 res1 = 0; 1607 1608 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1609 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1610 else 1611 res2 = 0; 1612 1613 display_pipe_crc_irq_handler(dev, pipe, 1614 I915_READ(PIPE_CRC_RES_RED(pipe)), 1615 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1616 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1617 res1, res2); 1618 } 1619 1620 /* The RPS events need forcewake, so we add them to a work queue and mask their 1621 * IMR bits until the work is done. Other interrupts can be processed without 1622 * the work queue. */ 1623 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1624 { 1625 if (pm_iir & dev_priv->pm_rps_events) { 1626 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1627 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1628 if (dev_priv->rps.interrupts_enabled) { 1629 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1630 queue_work(dev_priv->wq, &dev_priv->rps.work); 1631 } 1632 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1633 } 1634 1635 if (INTEL_INFO(dev_priv)->gen >= 8) 1636 return; 1637 1638 if (HAS_VEBOX(dev_priv)) { 1639 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1640 notify_ring(&dev_priv->engine[VECS]); 1641 1642 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1643 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1644 } 1645 } 1646 1647 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1648 { 1649 if (!drm_handle_vblank(dev, pipe)) 1650 return false; 1651 1652 return true; 1653 } 1654 1655 static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1656 u32 pipe_stats[I915_MAX_PIPES]) 1657 { 1658 struct drm_i915_private *dev_priv = dev->dev_private; 1659 int pipe; 1660 1661 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1662 1663 if (!dev_priv->display_irqs_enabled) { 1664 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1665 return; 1666 } 1667 1668 for_each_pipe(dev_priv, pipe) { 1669 i915_reg_t reg; 1670 u32 mask, iir_bit = 0; 1671 1672 /* 1673 * PIPESTAT bits get signalled even when the interrupt is 1674 * disabled with the mask bits, and some of the status bits do 1675 * not generate interrupts at all (like the underrun bit). Hence 1676 * we need to be careful that we only handle what we want to 1677 * handle. 1678 */ 1679 1680 /* fifo underruns are filterered in the underrun handler. */ 1681 mask = PIPE_FIFO_UNDERRUN_STATUS; 1682 1683 switch (pipe) { 1684 case PIPE_A: 1685 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1686 break; 1687 case PIPE_B: 1688 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1689 break; 1690 case PIPE_C: 1691 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1692 break; 1693 } 1694 if (iir & iir_bit) 1695 mask |= dev_priv->pipestat_irq_mask[pipe]; 1696 1697 if (!mask) 1698 continue; 1699 1700 reg = PIPESTAT(pipe); 1701 mask |= PIPESTAT_INT_ENABLE_MASK; 1702 pipe_stats[pipe] = I915_READ(reg) & mask; 1703 1704 /* 1705 * Clear the PIPE*STAT regs before the IIR 1706 */ 1707 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1708 PIPESTAT_INT_STATUS_MASK)) 1709 I915_WRITE(reg, pipe_stats[pipe]); 1710 } 1711 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1712 } 1713 1714 static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1715 u32 pipe_stats[I915_MAX_PIPES]) 1716 { 1717 struct drm_i915_private *dev_priv = to_i915(dev); 1718 enum i915_pipe pipe; 1719 1720 for_each_pipe(dev_priv, pipe) { 1721 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1722 intel_pipe_handle_vblank(dev, pipe)) 1723 intel_check_page_flip(dev, pipe); 1724 1725 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1726 intel_prepare_page_flip(dev, pipe); 1727 intel_finish_page_flip(dev, pipe); 1728 } 1729 1730 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1731 i9xx_pipe_crc_irq_handler(dev, pipe); 1732 1733 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1734 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1735 } 1736 1737 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1738 gmbus_irq_handler(dev); 1739 } 1740 1741 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1742 { 1743 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1744 1745 if (hotplug_status) 1746 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1747 1748 return hotplug_status; 1749 } 1750 1751 static void i9xx_hpd_irq_handler(struct drm_device *dev, 1752 u32 hotplug_status) 1753 { 1754 u32 pin_mask = 0, long_mask = 0; 1755 1756 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1757 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1758 1759 if (hotplug_trigger) { 1760 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1761 hotplug_trigger, hpd_status_g4x, 1762 i9xx_port_hotplug_long_detect); 1763 1764 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1765 } 1766 1767 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1768 dp_aux_irq_handler(dev); 1769 } else { 1770 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1771 1772 if (hotplug_trigger) { 1773 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1774 hotplug_trigger, hpd_status_i915, 1775 i9xx_port_hotplug_long_detect); 1776 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1777 } 1778 } 1779 } 1780 1781 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1782 { 1783 struct drm_device *dev = arg; 1784 struct drm_i915_private *dev_priv = dev->dev_private; 1785 irqreturn_t ret = IRQ_NONE; 1786 1787 if (!intel_irqs_enabled(dev_priv)) 1788 return IRQ_NONE; 1789 1790 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1791 disable_rpm_wakeref_asserts(dev_priv); 1792 1793 do { 1794 u32 iir, gt_iir, pm_iir; 1795 u32 pipe_stats[I915_MAX_PIPES] = {}; 1796 u32 hotplug_status = 0; 1797 u32 ier = 0; 1798 1799 gt_iir = I915_READ(GTIIR); 1800 pm_iir = I915_READ(GEN6_PMIIR); 1801 iir = I915_READ(VLV_IIR); 1802 1803 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1804 break; 1805 1806 ret = IRQ_HANDLED; 1807 1808 /* 1809 * Theory on interrupt generation, based on empirical evidence: 1810 * 1811 * x = ((VLV_IIR & VLV_IER) || 1812 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 1813 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 1814 * 1815 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1816 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 1817 * guarantee the CPU interrupt will be raised again even if we 1818 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 1819 * bits this time around. 1820 */ 1821 I915_WRITE(VLV_MASTER_IER, 0); 1822 ier = I915_READ(VLV_IER); 1823 I915_WRITE(VLV_IER, 0); 1824 1825 if (gt_iir) 1826 I915_WRITE(GTIIR, gt_iir); 1827 if (pm_iir) 1828 I915_WRITE(GEN6_PMIIR, pm_iir); 1829 1830 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1831 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1832 1833 /* Call regardless, as some status bits might not be 1834 * signalled in iir */ 1835 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1836 1837 /* 1838 * VLV_IIR is single buffered, and reflects the level 1839 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1840 */ 1841 if (iir) 1842 I915_WRITE(VLV_IIR, iir); 1843 1844 I915_WRITE(VLV_IER, ier); 1845 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 1846 POSTING_READ(VLV_MASTER_IER); 1847 1848 if (gt_iir) 1849 snb_gt_irq_handler(dev_priv, gt_iir); 1850 if (pm_iir) 1851 gen6_rps_irq_handler(dev_priv, pm_iir); 1852 1853 if (hotplug_status) 1854 i9xx_hpd_irq_handler(dev, hotplug_status); 1855 1856 valleyview_pipestat_irq_handler(dev, pipe_stats); 1857 } while (0); 1858 1859 enable_rpm_wakeref_asserts(dev_priv); 1860 1861 return ret; 1862 } 1863 1864 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1865 { 1866 struct drm_device *dev = arg; 1867 struct drm_i915_private *dev_priv = dev->dev_private; 1868 irqreturn_t ret = IRQ_NONE; 1869 1870 if (!intel_irqs_enabled(dev_priv)) 1871 return IRQ_NONE; 1872 1873 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 1874 disable_rpm_wakeref_asserts(dev_priv); 1875 1876 do { 1877 u32 master_ctl, iir; 1878 u32 gt_iir[4] = {}; 1879 u32 pipe_stats[I915_MAX_PIPES] = {}; 1880 u32 hotplug_status = 0; 1881 u32 ier = 0; 1882 1883 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1884 iir = I915_READ(VLV_IIR); 1885 1886 if (master_ctl == 0 && iir == 0) 1887 break; 1888 1889 ret = IRQ_HANDLED; 1890 1891 /* 1892 * Theory on interrupt generation, based on empirical evidence: 1893 * 1894 * x = ((VLV_IIR & VLV_IER) || 1895 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 1896 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 1897 * 1898 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 1899 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 1900 * guarantee the CPU interrupt will be raised again even if we 1901 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 1902 * bits this time around. 1903 */ 1904 I915_WRITE(GEN8_MASTER_IRQ, 0); 1905 ier = I915_READ(VLV_IER); 1906 I915_WRITE(VLV_IER, 0); 1907 1908 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 1909 1910 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1911 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 1912 1913 /* Call regardless, as some status bits might not be 1914 * signalled in iir */ 1915 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1916 1917 /* 1918 * VLV_IIR is single buffered, and reflects the level 1919 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 1920 */ 1921 if (iir) 1922 I915_WRITE(VLV_IIR, iir); 1923 1924 I915_WRITE(VLV_IER, ier); 1925 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1926 POSTING_READ(GEN8_MASTER_IRQ); 1927 1928 gen8_gt_irq_handler(dev_priv, gt_iir); 1929 1930 if (hotplug_status) 1931 i9xx_hpd_irq_handler(dev, hotplug_status); 1932 1933 valleyview_pipestat_irq_handler(dev, pipe_stats); 1934 } while (0); 1935 1936 enable_rpm_wakeref_asserts(dev_priv); 1937 1938 return ret; 1939 } 1940 1941 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1942 const u32 hpd[HPD_NUM_PINS]) 1943 { 1944 struct drm_i915_private *dev_priv = to_i915(dev); 1945 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1946 1947 /* 1948 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 1949 * unless we touch the hotplug register, even if hotplug_trigger is 1950 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 1951 * errors. 1952 */ 1953 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 1954 if (!hotplug_trigger) { 1955 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 1956 PORTD_HOTPLUG_STATUS_MASK | 1957 PORTC_HOTPLUG_STATUS_MASK | 1958 PORTB_HOTPLUG_STATUS_MASK; 1959 dig_hotplug_reg &= ~mask; 1960 } 1961 1962 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 1963 if (!hotplug_trigger) 1964 return; 1965 1966 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1967 dig_hotplug_reg, hpd, 1968 pch_port_hotplug_long_detect); 1969 1970 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1971 } 1972 1973 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1974 { 1975 struct drm_i915_private *dev_priv = dev->dev_private; 1976 int pipe; 1977 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1978 1979 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1980 1981 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1982 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1983 SDE_AUDIO_POWER_SHIFT); 1984 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1985 port_name(port)); 1986 } 1987 1988 if (pch_iir & SDE_AUX_MASK) 1989 dp_aux_irq_handler(dev); 1990 1991 if (pch_iir & SDE_GMBUS) 1992 gmbus_irq_handler(dev); 1993 1994 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1995 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1996 1997 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1998 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1999 2000 if (pch_iir & SDE_POISON) 2001 DRM_ERROR("PCH poison interrupt\n"); 2002 2003 if (pch_iir & SDE_FDI_MASK) 2004 for_each_pipe(dev_priv, pipe) 2005 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2006 pipe_name(pipe), 2007 I915_READ(FDI_RX_IIR(pipe))); 2008 2009 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2010 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2011 2012 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2013 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2014 2015 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2016 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2017 2018 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2019 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2020 } 2021 2022 static void ivb_err_int_handler(struct drm_device *dev) 2023 { 2024 struct drm_i915_private *dev_priv = dev->dev_private; 2025 u32 err_int = I915_READ(GEN7_ERR_INT); 2026 enum i915_pipe pipe; 2027 2028 if (err_int & ERR_INT_POISON) 2029 DRM_ERROR("Poison interrupt\n"); 2030 2031 for_each_pipe(dev_priv, pipe) { 2032 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2033 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2034 2035 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2036 if (IS_IVYBRIDGE(dev)) 2037 ivb_pipe_crc_irq_handler(dev, pipe); 2038 else 2039 hsw_pipe_crc_irq_handler(dev, pipe); 2040 } 2041 } 2042 2043 I915_WRITE(GEN7_ERR_INT, err_int); 2044 } 2045 2046 static void cpt_serr_int_handler(struct drm_device *dev) 2047 { 2048 struct drm_i915_private *dev_priv = dev->dev_private; 2049 u32 serr_int = I915_READ(SERR_INT); 2050 2051 if (serr_int & SERR_INT_POISON) 2052 DRM_ERROR("PCH poison interrupt\n"); 2053 2054 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2055 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); 2056 2057 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2058 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 2059 2060 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2061 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); 2062 2063 I915_WRITE(SERR_INT, serr_int); 2064 } 2065 2066 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2067 { 2068 struct drm_i915_private *dev_priv = dev->dev_private; 2069 int pipe; 2070 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2071 2072 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2073 2074 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2075 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2076 SDE_AUDIO_POWER_SHIFT_CPT); 2077 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2078 port_name(port)); 2079 } 2080 2081 if (pch_iir & SDE_AUX_MASK_CPT) 2082 dp_aux_irq_handler(dev); 2083 2084 if (pch_iir & SDE_GMBUS_CPT) 2085 gmbus_irq_handler(dev); 2086 2087 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2088 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2089 2090 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2091 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2092 2093 if (pch_iir & SDE_FDI_MASK_CPT) 2094 for_each_pipe(dev_priv, pipe) 2095 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2096 pipe_name(pipe), 2097 I915_READ(FDI_RX_IIR(pipe))); 2098 2099 if (pch_iir & SDE_ERROR_CPT) 2100 cpt_serr_int_handler(dev); 2101 } 2102 2103 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2104 { 2105 struct drm_i915_private *dev_priv = dev->dev_private; 2106 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2107 ~SDE_PORTE_HOTPLUG_SPT; 2108 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2109 u32 pin_mask = 0, long_mask = 0; 2110 2111 if (hotplug_trigger) { 2112 u32 dig_hotplug_reg; 2113 2114 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2115 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2116 2117 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2118 dig_hotplug_reg, hpd_spt, 2119 spt_port_hotplug_long_detect); 2120 } 2121 2122 if (hotplug2_trigger) { 2123 u32 dig_hotplug_reg; 2124 2125 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2126 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2127 2128 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2129 dig_hotplug_reg, hpd_spt, 2130 spt_port_hotplug2_long_detect); 2131 } 2132 2133 if (pin_mask) 2134 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2135 2136 if (pch_iir & SDE_GMBUS_CPT) 2137 gmbus_irq_handler(dev); 2138 } 2139 2140 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2141 const u32 hpd[HPD_NUM_PINS]) 2142 { 2143 struct drm_i915_private *dev_priv = to_i915(dev); 2144 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2145 2146 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2147 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2148 2149 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2150 dig_hotplug_reg, hpd, 2151 ilk_port_hotplug_long_detect); 2152 2153 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2154 } 2155 2156 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2157 { 2158 struct drm_i915_private *dev_priv = dev->dev_private; 2159 enum i915_pipe pipe; 2160 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2161 2162 if (hotplug_trigger) 2163 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2164 2165 if (de_iir & DE_AUX_CHANNEL_A) 2166 dp_aux_irq_handler(dev); 2167 2168 if (de_iir & DE_GSE) 2169 intel_opregion_asle_intr(dev); 2170 2171 if (de_iir & DE_POISON) 2172 DRM_ERROR("Poison interrupt\n"); 2173 2174 for_each_pipe(dev_priv, pipe) { 2175 if (de_iir & DE_PIPE_VBLANK(pipe) && 2176 intel_pipe_handle_vblank(dev, pipe)) 2177 intel_check_page_flip(dev, pipe); 2178 2179 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2180 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2181 2182 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2183 i9xx_pipe_crc_irq_handler(dev, pipe); 2184 2185 /* plane/pipes map 1:1 on ilk+ */ 2186 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2187 intel_prepare_page_flip(dev, pipe); 2188 intel_finish_page_flip_plane(dev, pipe); 2189 } 2190 } 2191 2192 /* check event from PCH */ 2193 if (de_iir & DE_PCH_EVENT) { 2194 u32 pch_iir = I915_READ(SDEIIR); 2195 2196 if (HAS_PCH_CPT(dev)) 2197 cpt_irq_handler(dev, pch_iir); 2198 else 2199 ibx_irq_handler(dev, pch_iir); 2200 2201 /* should clear PCH hotplug event before clear CPU irq */ 2202 I915_WRITE(SDEIIR, pch_iir); 2203 } 2204 2205 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2206 ironlake_rps_change_irq_handler(dev); 2207 } 2208 2209 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2210 { 2211 struct drm_i915_private *dev_priv = dev->dev_private; 2212 enum i915_pipe pipe; 2213 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2214 2215 if (hotplug_trigger) 2216 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2217 2218 if (de_iir & DE_ERR_INT_IVB) 2219 ivb_err_int_handler(dev); 2220 2221 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2222 dp_aux_irq_handler(dev); 2223 2224 if (de_iir & DE_GSE_IVB) 2225 intel_opregion_asle_intr(dev); 2226 2227 for_each_pipe(dev_priv, pipe) { 2228 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2229 intel_pipe_handle_vblank(dev, pipe)) 2230 intel_check_page_flip(dev, pipe); 2231 2232 /* plane/pipes map 1:1 on ilk+ */ 2233 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2234 intel_prepare_page_flip(dev, pipe); 2235 intel_finish_page_flip_plane(dev, pipe); 2236 } 2237 } 2238 2239 /* check event from PCH */ 2240 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2241 u32 pch_iir = I915_READ(SDEIIR); 2242 2243 cpt_irq_handler(dev, pch_iir); 2244 2245 /* clear PCH hotplug event before clear CPU irq */ 2246 I915_WRITE(SDEIIR, pch_iir); 2247 } 2248 } 2249 2250 /* 2251 * To handle irqs with the minimum potential races with fresh interrupts, we: 2252 * 1 - Disable Master Interrupt Control. 2253 * 2 - Find the source(s) of the interrupt. 2254 * 3 - Clear the Interrupt Identity bits (IIR). 2255 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2256 * 5 - Re-enable Master Interrupt Control. 2257 */ 2258 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2259 { 2260 struct drm_device *dev = arg; 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2263 irqreturn_t ret = IRQ_NONE; 2264 2265 if (!intel_irqs_enabled(dev_priv)) 2266 return IRQ_NONE; 2267 2268 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2269 disable_rpm_wakeref_asserts(dev_priv); 2270 2271 /* disable master interrupt before clearing iir */ 2272 de_ier = I915_READ(DEIER); 2273 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2274 POSTING_READ(DEIER); 2275 2276 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2277 * interrupts will will be stored on its back queue, and then we'll be 2278 * able to process them after we restore SDEIER (as soon as we restore 2279 * it, we'll get an interrupt if SDEIIR still has something to process 2280 * due to its back queue). */ 2281 if (!HAS_PCH_NOP(dev)) { 2282 sde_ier = I915_READ(SDEIER); 2283 I915_WRITE(SDEIER, 0); 2284 POSTING_READ(SDEIER); 2285 } 2286 2287 /* Find, clear, then process each source of interrupt */ 2288 2289 gt_iir = I915_READ(GTIIR); 2290 if (gt_iir) { 2291 I915_WRITE(GTIIR, gt_iir); 2292 ret = IRQ_HANDLED; 2293 if (INTEL_INFO(dev)->gen >= 6) 2294 snb_gt_irq_handler(dev_priv, gt_iir); 2295 else 2296 ilk_gt_irq_handler(dev_priv, gt_iir); 2297 } 2298 2299 de_iir = I915_READ(DEIIR); 2300 if (de_iir) { 2301 I915_WRITE(DEIIR, de_iir); 2302 ret = IRQ_HANDLED; 2303 if (INTEL_INFO(dev)->gen >= 7) 2304 ivb_display_irq_handler(dev, de_iir); 2305 else 2306 ilk_display_irq_handler(dev, de_iir); 2307 } 2308 2309 if (INTEL_INFO(dev)->gen >= 6) { 2310 u32 pm_iir = I915_READ(GEN6_PMIIR); 2311 if (pm_iir) { 2312 I915_WRITE(GEN6_PMIIR, pm_iir); 2313 ret = IRQ_HANDLED; 2314 gen6_rps_irq_handler(dev_priv, pm_iir); 2315 } 2316 } 2317 2318 I915_WRITE(DEIER, de_ier); 2319 POSTING_READ(DEIER); 2320 if (!HAS_PCH_NOP(dev)) { 2321 I915_WRITE(SDEIER, sde_ier); 2322 POSTING_READ(SDEIER); 2323 } 2324 2325 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2326 enable_rpm_wakeref_asserts(dev_priv); 2327 2328 return ret; 2329 } 2330 2331 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2332 const u32 hpd[HPD_NUM_PINS]) 2333 { 2334 struct drm_i915_private *dev_priv = to_i915(dev); 2335 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2336 2337 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2338 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2339 2340 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2341 dig_hotplug_reg, hpd, 2342 bxt_port_hotplug_long_detect); 2343 2344 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2345 } 2346 2347 static irqreturn_t 2348 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2349 { 2350 struct drm_device *dev = dev_priv->dev; 2351 irqreturn_t ret = IRQ_NONE; 2352 u32 iir; 2353 enum i915_pipe pipe; 2354 2355 if (master_ctl & GEN8_DE_MISC_IRQ) { 2356 iir = I915_READ(GEN8_DE_MISC_IIR); 2357 if (iir) { 2358 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2359 ret = IRQ_HANDLED; 2360 if (iir & GEN8_DE_MISC_GSE) 2361 intel_opregion_asle_intr(dev); 2362 else 2363 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2364 } 2365 else 2366 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2367 } 2368 2369 if (master_ctl & GEN8_DE_PORT_IRQ) { 2370 iir = I915_READ(GEN8_DE_PORT_IIR); 2371 if (iir) { 2372 u32 tmp_mask; 2373 bool found = false; 2374 2375 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2376 ret = IRQ_HANDLED; 2377 2378 tmp_mask = GEN8_AUX_CHANNEL_A; 2379 if (INTEL_INFO(dev_priv)->gen >= 9) 2380 tmp_mask |= GEN9_AUX_CHANNEL_B | 2381 GEN9_AUX_CHANNEL_C | 2382 GEN9_AUX_CHANNEL_D; 2383 2384 if (iir & tmp_mask) { 2385 dp_aux_irq_handler(dev); 2386 found = true; 2387 } 2388 2389 if (IS_BROXTON(dev_priv)) { 2390 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2391 if (tmp_mask) { 2392 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2393 found = true; 2394 } 2395 } else if (IS_BROADWELL(dev_priv)) { 2396 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2397 if (tmp_mask) { 2398 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2399 found = true; 2400 } 2401 } 2402 2403 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2404 gmbus_irq_handler(dev); 2405 found = true; 2406 } 2407 2408 if (!found) 2409 DRM_ERROR("Unexpected DE Port interrupt\n"); 2410 } 2411 else 2412 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2413 } 2414 2415 for_each_pipe(dev_priv, pipe) { 2416 u32 flip_done, fault_errors; 2417 2418 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2419 continue; 2420 2421 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2422 if (!iir) { 2423 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2424 continue; 2425 } 2426 2427 ret = IRQ_HANDLED; 2428 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2429 2430 if (iir & GEN8_PIPE_VBLANK && 2431 intel_pipe_handle_vblank(dev, pipe)) 2432 intel_check_page_flip(dev, pipe); 2433 2434 flip_done = iir; 2435 if (INTEL_INFO(dev_priv)->gen >= 9) 2436 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE; 2437 else 2438 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2439 2440 if (flip_done) { 2441 intel_prepare_page_flip(dev, pipe); 2442 intel_finish_page_flip_plane(dev, pipe); 2443 } 2444 2445 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2446 hsw_pipe_crc_irq_handler(dev, pipe); 2447 2448 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2449 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2450 2451 fault_errors = iir; 2452 if (INTEL_INFO(dev_priv)->gen >= 9) 2453 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2454 else 2455 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2456 2457 if (fault_errors) 2458 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2459 pipe_name(pipe), 2460 fault_errors); 2461 } 2462 2463 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2464 master_ctl & GEN8_DE_PCH_IRQ) { 2465 /* 2466 * FIXME(BDW): Assume for now that the new interrupt handling 2467 * scheme also closed the SDE interrupt handling race we've seen 2468 * on older pch-split platforms. But this needs testing. 2469 */ 2470 iir = I915_READ(SDEIIR); 2471 if (iir) { 2472 I915_WRITE(SDEIIR, iir); 2473 ret = IRQ_HANDLED; 2474 2475 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2476 spt_irq_handler(dev, iir); 2477 else 2478 cpt_irq_handler(dev, iir); 2479 } else { 2480 /* 2481 * Like on previous PCH there seems to be something 2482 * fishy going on with forwarding PCH interrupts. 2483 */ 2484 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2485 } 2486 } 2487 2488 return ret; 2489 } 2490 2491 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2492 { 2493 struct drm_device *dev = arg; 2494 struct drm_i915_private *dev_priv = dev->dev_private; 2495 u32 master_ctl; 2496 u32 gt_iir[4] = {}; 2497 irqreturn_t ret; 2498 2499 if (!intel_irqs_enabled(dev_priv)) 2500 return IRQ_NONE; 2501 2502 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2503 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2504 if (!master_ctl) 2505 return IRQ_NONE; 2506 2507 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2508 2509 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2510 disable_rpm_wakeref_asserts(dev_priv); 2511 2512 /* Find, clear, then process each source of interrupt */ 2513 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2514 gen8_gt_irq_handler(dev_priv, gt_iir); 2515 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2516 2517 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2518 POSTING_READ_FW(GEN8_MASTER_IRQ); 2519 2520 enable_rpm_wakeref_asserts(dev_priv); 2521 2522 return ret; 2523 } 2524 2525 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2526 bool reset_completed) 2527 { 2528 struct intel_engine_cs *engine; 2529 2530 /* 2531 * Notify all waiters for GPU completion events that reset state has 2532 * been changed, and that they need to restart their wait after 2533 * checking for potential errors (and bail out to drop locks if there is 2534 * a gpu reset pending so that i915_error_work_func can acquire them). 2535 */ 2536 2537 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2538 for_each_engine(engine, dev_priv) 2539 wake_up_all(&engine->irq_queue); 2540 2541 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2542 wake_up_all(&dev_priv->pending_flip_queue); 2543 2544 /* 2545 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2546 * reset state is cleared. 2547 */ 2548 if (reset_completed) 2549 wake_up_all(&dev_priv->gpu_error.reset_queue); 2550 } 2551 2552 /** 2553 * i915_reset_and_wakeup - do process context error handling work 2554 * @dev: drm device 2555 * 2556 * Fire an error uevent so userspace can see that a hang or error 2557 * was detected. 2558 */ 2559 static void i915_reset_and_wakeup(struct drm_device *dev) 2560 { 2561 struct drm_i915_private *dev_priv = to_i915(dev); 2562 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2563 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2564 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2565 int ret; 2566 2567 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2568 2569 /* 2570 * Note that there's only one work item which does gpu resets, so we 2571 * need not worry about concurrent gpu resets potentially incrementing 2572 * error->reset_counter twice. We only need to take care of another 2573 * racing irq/hangcheck declaring the gpu dead for a second time. A 2574 * quick check for that is good enough: schedule_work ensures the 2575 * correct ordering between hang detection and this work item, and since 2576 * the reset in-progress bit is only ever set by code outside of this 2577 * work we don't need to worry about any other races. 2578 */ 2579 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2580 DRM_DEBUG_DRIVER("resetting chip\n"); 2581 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2582 reset_event); 2583 2584 /* 2585 * In most cases it's guaranteed that we get here with an RPM 2586 * reference held, for example because there is a pending GPU 2587 * request that won't finish until the reset is done. This 2588 * isn't the case at least when we get here by doing a 2589 * simulated reset via debugs, so get an RPM reference. 2590 */ 2591 intel_runtime_pm_get(dev_priv); 2592 2593 intel_prepare_reset(dev); 2594 2595 /* 2596 * All state reset _must_ be completed before we update the 2597 * reset counter, for otherwise waiters might miss the reset 2598 * pending state and not properly drop locks, resulting in 2599 * deadlocks with the reset work. 2600 */ 2601 ret = i915_reset(dev); 2602 2603 intel_finish_reset(dev); 2604 2605 intel_runtime_pm_put(dev_priv); 2606 2607 if (ret == 0) 2608 kobject_uevent_env(&dev->primary->kdev->kobj, 2609 KOBJ_CHANGE, reset_done_event); 2610 2611 /* 2612 * Note: The wake_up also serves as a memory barrier so that 2613 * waiters see the update value of the reset counter atomic_t. 2614 */ 2615 i915_error_wake_up(dev_priv, true); 2616 } 2617 } 2618 2619 static void i915_report_and_clear_eir(struct drm_device *dev) 2620 { 2621 struct drm_i915_private *dev_priv = dev->dev_private; 2622 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2623 u32 eir = I915_READ(EIR); 2624 int pipe, i; 2625 2626 if (!eir) 2627 return; 2628 2629 pr_err("render error detected, EIR: 0x%08x\n", eir); 2630 2631 i915_get_extra_instdone(dev, instdone); 2632 2633 if (IS_G4X(dev)) { 2634 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2635 u32 ipeir = I915_READ(IPEIR_I965); 2636 2637 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2638 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2639 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2640 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2641 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2642 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2643 I915_WRITE(IPEIR_I965, ipeir); 2644 POSTING_READ(IPEIR_I965); 2645 } 2646 if (eir & GM45_ERROR_PAGE_TABLE) { 2647 u32 pgtbl_err = I915_READ(PGTBL_ER); 2648 pr_err("page table error\n"); 2649 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2650 I915_WRITE(PGTBL_ER, pgtbl_err); 2651 POSTING_READ(PGTBL_ER); 2652 } 2653 } 2654 2655 if (!IS_GEN2(dev)) { 2656 if (eir & I915_ERROR_PAGE_TABLE) { 2657 u32 pgtbl_err = I915_READ(PGTBL_ER); 2658 pr_err("page table error\n"); 2659 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2660 I915_WRITE(PGTBL_ER, pgtbl_err); 2661 POSTING_READ(PGTBL_ER); 2662 } 2663 } 2664 2665 if (eir & I915_ERROR_MEMORY_REFRESH) { 2666 pr_err("memory refresh error:\n"); 2667 for_each_pipe(dev_priv, pipe) 2668 pr_err("pipe %c stat: 0x%08x\n", 2669 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2670 /* pipestat has already been acked */ 2671 } 2672 if (eir & I915_ERROR_INSTRUCTION) { 2673 pr_err("instruction error\n"); 2674 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2675 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2676 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2677 if (INTEL_INFO(dev)->gen < 4) { 2678 u32 ipeir = I915_READ(IPEIR); 2679 2680 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2681 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2682 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2683 I915_WRITE(IPEIR, ipeir); 2684 POSTING_READ(IPEIR); 2685 } else { 2686 u32 ipeir = I915_READ(IPEIR_I965); 2687 2688 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2689 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2690 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2691 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2692 I915_WRITE(IPEIR_I965, ipeir); 2693 POSTING_READ(IPEIR_I965); 2694 } 2695 } 2696 2697 I915_WRITE(EIR, eir); 2698 POSTING_READ(EIR); 2699 eir = I915_READ(EIR); 2700 if (eir) { 2701 /* 2702 * some errors might have become stuck, 2703 * mask them. 2704 */ 2705 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2706 I915_WRITE(EMR, I915_READ(EMR) | eir); 2707 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2708 } 2709 } 2710 2711 /** 2712 * i915_handle_error - handle a gpu error 2713 * @dev: drm device 2714 * @engine_mask: mask representing engines that are hung 2715 * Do some basic checking of register state at error time and 2716 * dump it to the syslog. Also call i915_capture_error_state() to make 2717 * sure we get a record and make it available in debugfs. Fire a uevent 2718 * so userspace knows something bad happened (should trigger collection 2719 * of a ring dump etc.). 2720 */ 2721 void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2722 const char *fmt, ...) 2723 { 2724 struct drm_i915_private *dev_priv = dev->dev_private; 2725 #if 0 2726 va_list args; 2727 char error_msg[80]; 2728 2729 va_start(args, fmt); 2730 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2731 va_end(args); 2732 2733 i915_capture_error_state(dev, engine_mask, error_msg); 2734 #endif 2735 i915_report_and_clear_eir(dev); 2736 2737 if (engine_mask) { 2738 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2739 &dev_priv->gpu_error.reset_counter); 2740 2741 /* 2742 * Wakeup waiting processes so that the reset function 2743 * i915_reset_and_wakeup doesn't deadlock trying to grab 2744 * various locks. By bumping the reset counter first, the woken 2745 * processes will see a reset in progress and back off, 2746 * releasing their locks and then wait for the reset completion. 2747 * We must do this for _all_ gpu waiters that might hold locks 2748 * that the reset work needs to acquire. 2749 * 2750 * Note: The wake_up serves as the required memory barrier to 2751 * ensure that the waiters see the updated value of the reset 2752 * counter atomic_t. 2753 */ 2754 i915_error_wake_up(dev_priv, false); 2755 } 2756 2757 i915_reset_and_wakeup(dev); 2758 } 2759 2760 /* Called from drm generic code, passed 'crtc' which 2761 * we use as a pipe index 2762 */ 2763 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2764 { 2765 struct drm_i915_private *dev_priv = dev->dev_private; 2766 unsigned long irqflags; 2767 2768 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2769 if (INTEL_INFO(dev)->gen >= 4) 2770 i915_enable_pipestat(dev_priv, pipe, 2771 PIPE_START_VBLANK_INTERRUPT_STATUS); 2772 else 2773 i915_enable_pipestat(dev_priv, pipe, 2774 PIPE_VBLANK_INTERRUPT_STATUS); 2775 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2776 2777 return 0; 2778 } 2779 2780 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2781 { 2782 struct drm_i915_private *dev_priv = dev->dev_private; 2783 unsigned long irqflags; 2784 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2785 DE_PIPE_VBLANK(pipe); 2786 2787 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2788 ilk_enable_display_irq(dev_priv, bit); 2789 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2790 2791 return 0; 2792 } 2793 2794 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2795 { 2796 struct drm_i915_private *dev_priv = dev->dev_private; 2797 unsigned long irqflags; 2798 2799 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2800 i915_enable_pipestat(dev_priv, pipe, 2801 PIPE_START_VBLANK_INTERRUPT_STATUS); 2802 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2803 2804 return 0; 2805 } 2806 2807 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2808 { 2809 struct drm_i915_private *dev_priv = dev->dev_private; 2810 unsigned long irqflags; 2811 2812 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2813 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2814 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2815 2816 return 0; 2817 } 2818 2819 /* Called from drm generic code, passed 'crtc' which 2820 * we use as a pipe index 2821 */ 2822 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2823 { 2824 struct drm_i915_private *dev_priv = dev->dev_private; 2825 unsigned long irqflags; 2826 2827 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2828 i915_disable_pipestat(dev_priv, pipe, 2829 PIPE_VBLANK_INTERRUPT_STATUS | 2830 PIPE_START_VBLANK_INTERRUPT_STATUS); 2831 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2832 } 2833 2834 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2835 { 2836 struct drm_i915_private *dev_priv = dev->dev_private; 2837 unsigned long irqflags; 2838 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2839 DE_PIPE_VBLANK(pipe); 2840 2841 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2842 ilk_disable_display_irq(dev_priv, bit); 2843 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2844 } 2845 2846 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2847 { 2848 struct drm_i915_private *dev_priv = dev->dev_private; 2849 unsigned long irqflags; 2850 2851 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2852 i915_disable_pipestat(dev_priv, pipe, 2853 PIPE_START_VBLANK_INTERRUPT_STATUS); 2854 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2855 } 2856 2857 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2858 { 2859 struct drm_i915_private *dev_priv = dev->dev_private; 2860 unsigned long irqflags; 2861 2862 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2863 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2864 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2865 } 2866 2867 static bool 2868 ring_idle(struct intel_engine_cs *engine, u32 seqno) 2869 { 2870 return i915_seqno_passed(seqno, 2871 READ_ONCE(engine->last_submitted_seqno)); 2872 } 2873 2874 static bool 2875 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2876 { 2877 if (INTEL_INFO(dev)->gen >= 8) { 2878 return (ipehr >> 23) == 0x1c; 2879 } else { 2880 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2881 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2882 MI_SEMAPHORE_REGISTER); 2883 } 2884 } 2885 2886 static struct intel_engine_cs * 2887 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2888 u64 offset) 2889 { 2890 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2891 struct intel_engine_cs *signaller; 2892 2893 if (INTEL_INFO(dev_priv)->gen >= 8) { 2894 for_each_engine(signaller, dev_priv) { 2895 if (engine == signaller) 2896 continue; 2897 2898 if (offset == signaller->semaphore.signal_ggtt[engine->id]) 2899 return signaller; 2900 } 2901 } else { 2902 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2903 2904 for_each_engine(signaller, dev_priv) { 2905 if(engine == signaller) 2906 continue; 2907 2908 if (sync_bits == signaller->semaphore.mbox.wait[engine->id]) 2909 return signaller; 2910 } 2911 } 2912 2913 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", 2914 engine->id, ipehr, offset); 2915 2916 return NULL; 2917 } 2918 2919 static struct intel_engine_cs * 2920 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2921 { 2922 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2923 u32 cmd, ipehr, head; 2924 u64 offset = 0; 2925 int i, backwards; 2926 2927 /* 2928 * This function does not support execlist mode - any attempt to 2929 * proceed further into this function will result in a kernel panic 2930 * when dereferencing ring->buffer, which is not set up in execlist 2931 * mode. 2932 * 2933 * The correct way of doing it would be to derive the currently 2934 * executing ring buffer from the current context, which is derived 2935 * from the currently running request. Unfortunately, to get the 2936 * current request we would have to grab the struct_mutex before doing 2937 * anything else, which would be ill-advised since some other thread 2938 * might have grabbed it already and managed to hang itself, causing 2939 * the hang checker to deadlock. 2940 * 2941 * Therefore, this function does not support execlist mode in its 2942 * current form. Just return NULL and move on. 2943 */ 2944 if (engine->buffer == NULL) 2945 return NULL; 2946 2947 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2948 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2949 return NULL; 2950 2951 /* 2952 * HEAD is likely pointing to the dword after the actual command, 2953 * so scan backwards until we find the MBOX. But limit it to just 3 2954 * or 4 dwords depending on the semaphore wait command size. 2955 * Note that we don't care about ACTHD here since that might 2956 * point at at batch, and semaphores are always emitted into the 2957 * ringbuffer itself. 2958 */ 2959 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2960 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2961 2962 for (i = backwards; i; --i) { 2963 /* 2964 * Be paranoid and presume the hw has gone off into the wild - 2965 * our ring is smaller than what the hardware (and hence 2966 * HEAD_ADDR) allows. Also handles wrap-around. 2967 */ 2968 head &= engine->buffer->size - 1; 2969 2970 /* This here seems to blow up */ 2971 cmd = ioread32(engine->buffer->virtual_start + head); 2972 if (cmd == ipehr) 2973 break; 2974 2975 head -= 4; 2976 } 2977 2978 if (!i) 2979 return NULL; 2980 2981 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2982 if (INTEL_INFO(engine->dev)->gen >= 8) { 2983 offset = ioread32(engine->buffer->virtual_start + head + 12); 2984 offset <<= 32; 2985 offset = ioread32(engine->buffer->virtual_start + head + 8); 2986 } 2987 return semaphore_wait_to_signaller_ring(engine, ipehr, offset); 2988 } 2989 2990 static int semaphore_passed(struct intel_engine_cs *engine) 2991 { 2992 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2993 struct intel_engine_cs *signaller; 2994 u32 seqno; 2995 2996 engine->hangcheck.deadlock++; 2997 2998 signaller = semaphore_waits_for(engine, &seqno); 2999 if (signaller == NULL) 3000 return -1; 3001 3002 /* Prevent pathological recursion due to driver bugs */ 3003 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 3004 return -1; 3005 3006 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) 3007 return 1; 3008 3009 /* cursory check for an unkickable deadlock */ 3010 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 3011 semaphore_passed(signaller) < 0) 3012 return -1; 3013 3014 return 0; 3015 } 3016 3017 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 3018 { 3019 struct intel_engine_cs *engine; 3020 3021 for_each_engine(engine, dev_priv) 3022 engine->hangcheck.deadlock = 0; 3023 } 3024 3025 static bool subunits_stuck(struct intel_engine_cs *engine) 3026 { 3027 u32 instdone[I915_NUM_INSTDONE_REG]; 3028 bool stuck; 3029 int i; 3030 3031 if (engine->id != RCS) 3032 return true; 3033 3034 i915_get_extra_instdone(engine->dev, instdone); 3035 3036 /* There might be unstable subunit states even when 3037 * actual head is not moving. Filter out the unstable ones by 3038 * accumulating the undone -> done transitions and only 3039 * consider those as progress. 3040 */ 3041 stuck = true; 3042 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) { 3043 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i]; 3044 3045 if (tmp != engine->hangcheck.instdone[i]) 3046 stuck = false; 3047 3048 engine->hangcheck.instdone[i] |= tmp; 3049 } 3050 3051 return stuck; 3052 } 3053 3054 static enum intel_ring_hangcheck_action 3055 head_stuck(struct intel_engine_cs *engine, u64 acthd) 3056 { 3057 if (acthd != engine->hangcheck.acthd) { 3058 3059 /* Clear subunit states on head movement */ 3060 memset(engine->hangcheck.instdone, 0, 3061 sizeof(engine->hangcheck.instdone)); 3062 3063 return HANGCHECK_ACTIVE; 3064 } 3065 3066 if (!subunits_stuck(engine)) 3067 return HANGCHECK_ACTIVE; 3068 3069 return HANGCHECK_HUNG; 3070 } 3071 3072 static enum intel_ring_hangcheck_action 3073 ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3074 { 3075 struct drm_device *dev = engine->dev; 3076 struct drm_i915_private *dev_priv = dev->dev_private; 3077 enum intel_ring_hangcheck_action ha; 3078 u32 tmp; 3079 3080 ha = head_stuck(engine, acthd); 3081 if (ha != HANGCHECK_HUNG) 3082 return ha; 3083 3084 if (IS_GEN2(dev)) 3085 return HANGCHECK_HUNG; 3086 3087 /* Is the chip hanging on a WAIT_FOR_EVENT? 3088 * If so we can simply poke the RB_WAIT bit 3089 * and break the hang. This should work on 3090 * all but the second generation chipsets. 3091 */ 3092 tmp = I915_READ_CTL(engine); 3093 if (tmp & RING_WAIT) { 3094 i915_handle_error(dev, 0, 3095 "Kicking stuck wait on %s", 3096 engine->name); 3097 I915_WRITE_CTL(engine, tmp); 3098 return HANGCHECK_KICK; 3099 } 3100 3101 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3102 switch (semaphore_passed(engine)) { 3103 default: 3104 return HANGCHECK_HUNG; 3105 case 1: 3106 i915_handle_error(dev, 0, 3107 "Kicking stuck semaphore on %s", 3108 engine->name); 3109 I915_WRITE_CTL(engine, tmp); 3110 return HANGCHECK_KICK; 3111 case 0: 3112 return HANGCHECK_WAIT; 3113 } 3114 } 3115 3116 return HANGCHECK_HUNG; 3117 } 3118 3119 static unsigned kick_waiters(struct intel_engine_cs *engine) 3120 { 3121 struct drm_i915_private *i915 = to_i915(engine->dev); 3122 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3123 3124 if (engine->hangcheck.user_interrupts == user_interrupts && 3125 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3126 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) 3127 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3128 engine->name); 3129 else 3130 DRM_INFO("Fake missed irq on %s\n", 3131 engine->name); 3132 wake_up_all(&engine->irq_queue); 3133 } 3134 3135 return user_interrupts; 3136 } 3137 /* 3138 * This is called when the chip hasn't reported back with completed 3139 * batchbuffers in a long time. We keep track per ring seqno progress and 3140 * if there are no progress, hangcheck score for that ring is increased. 3141 * Further, acthd is inspected to see if the ring is stuck. On stuck case 3142 * we kick the ring. If we see no progress on three subsequent calls 3143 * we assume chip is wedged and try to fix it by resetting the chip. 3144 */ 3145 static void i915_hangcheck_elapsed(struct work_struct *work) 3146 { 3147 struct drm_i915_private *dev_priv = 3148 container_of(work, typeof(*dev_priv), 3149 gpu_error.hangcheck_work.work); 3150 struct drm_device *dev = dev_priv->dev; 3151 struct intel_engine_cs *engine; 3152 enum intel_engine_id id; 3153 int busy_count = 0, rings_hung = 0; 3154 bool stuck[I915_NUM_ENGINES] = { 0 }; 3155 #define BUSY 1 3156 #define KICK 5 3157 #define HUNG 20 3158 #define ACTIVE_DECAY 15 3159 3160 if (!i915.enable_hangcheck) 3161 return; 3162 3163 /* 3164 * The hangcheck work is synced during runtime suspend, we don't 3165 * require a wakeref. TODO: instead of disabling the asserts make 3166 * sure that we hold a reference when this work is running. 3167 */ 3168 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3169 3170 /* As enabling the GPU requires fairly extensive mmio access, 3171 * periodically arm the mmio checker to see if we are triggering 3172 * any invalid access. 3173 */ 3174 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3175 3176 for_each_engine_id(engine, dev_priv, id) { 3177 u64 acthd; 3178 u32 seqno; 3179 unsigned user_interrupts; 3180 bool busy = true; 3181 3182 semaphore_clear_deadlocks(dev_priv); 3183 3184 /* We don't strictly need an irq-barrier here, as we are not 3185 * serving an interrupt request, be paranoid in case the 3186 * barrier has side-effects (such as preventing a broken 3187 * cacheline snoop) and so be sure that we can see the seqno 3188 * advance. If the seqno should stick, due to a stale 3189 * cacheline, we would erroneously declare the GPU hung. 3190 */ 3191 if (engine->irq_seqno_barrier) 3192 engine->irq_seqno_barrier(engine); 3193 3194 acthd = intel_ring_get_active_head(engine); 3195 seqno = engine->get_seqno(engine); 3196 3197 /* Reset stuck interrupts between batch advances */ 3198 user_interrupts = 0; 3199 3200 if (engine->hangcheck.seqno == seqno) { 3201 if (ring_idle(engine, seqno)) { 3202 engine->hangcheck.action = HANGCHECK_IDLE; 3203 if (waitqueue_active(&engine->irq_queue)) { 3204 /* Safeguard against driver failure */ 3205 user_interrupts = kick_waiters(engine); 3206 engine->hangcheck.score += BUSY; 3207 } else 3208 busy = false; 3209 } else { 3210 /* We always increment the hangcheck score 3211 * if the ring is busy and still processing 3212 * the same request, so that no single request 3213 * can run indefinitely (such as a chain of 3214 * batches). The only time we do not increment 3215 * the hangcheck score on this ring, if this 3216 * ring is in a legitimate wait for another 3217 * ring. In that case the waiting ring is a 3218 * victim and we want to be sure we catch the 3219 * right culprit. Then every time we do kick 3220 * the ring, add a small increment to the 3221 * score so that we can catch a batch that is 3222 * being repeatedly kicked and so responsible 3223 * for stalling the machine. 3224 */ 3225 engine->hangcheck.action = ring_stuck(engine, 3226 acthd); 3227 3228 switch (engine->hangcheck.action) { 3229 case HANGCHECK_IDLE: 3230 case HANGCHECK_WAIT: 3231 break; 3232 case HANGCHECK_ACTIVE: 3233 engine->hangcheck.score += BUSY; 3234 break; 3235 case HANGCHECK_KICK: 3236 engine->hangcheck.score += KICK; 3237 break; 3238 case HANGCHECK_HUNG: 3239 engine->hangcheck.score += HUNG; 3240 stuck[id] = true; 3241 break; 3242 } 3243 } 3244 } else { 3245 engine->hangcheck.action = HANGCHECK_ACTIVE; 3246 3247 /* Gradually reduce the count so that we catch DoS 3248 * attempts across multiple batches. 3249 */ 3250 if (engine->hangcheck.score > 0) 3251 engine->hangcheck.score -= ACTIVE_DECAY; 3252 if (engine->hangcheck.score < 0) 3253 engine->hangcheck.score = 0; 3254 3255 /* Clear head and subunit states on seqno movement */ 3256 acthd = 0; 3257 3258 memset(engine->hangcheck.instdone, 0, 3259 sizeof(engine->hangcheck.instdone)); 3260 } 3261 3262 engine->hangcheck.seqno = seqno; 3263 engine->hangcheck.acthd = acthd; 3264 engine->hangcheck.user_interrupts = user_interrupts; 3265 busy_count += busy; 3266 } 3267 3268 for_each_engine_id(engine, dev_priv, id) { 3269 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3270 DRM_INFO("%s on %s\n", 3271 stuck[id] ? "stuck" : "no progress", 3272 engine->name); 3273 rings_hung |= intel_engine_flag(engine); 3274 } 3275 } 3276 3277 if (rings_hung) { 3278 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3279 goto out; 3280 } 3281 3282 if (busy_count) 3283 /* Reset timer case chip hangs without another request 3284 * being added */ 3285 i915_queue_hangcheck(dev); 3286 3287 out: 3288 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv); 3289 } 3290 3291 void i915_queue_hangcheck(struct drm_device *dev) 3292 { 3293 struct i915_gpu_error *e = &to_i915(dev)->gpu_error; 3294 3295 if (!i915.enable_hangcheck) 3296 return; 3297 3298 /* Don't continually defer the hangcheck so that it is always run at 3299 * least once after work has been scheduled on any ring. Otherwise, 3300 * we will ignore a hung ring if a second ring is kept busy. 3301 */ 3302 3303 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work, 3304 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES)); 3305 } 3306 3307 static void ibx_irq_reset(struct drm_device *dev) 3308 { 3309 struct drm_i915_private *dev_priv = dev->dev_private; 3310 3311 if (HAS_PCH_NOP(dev)) 3312 return; 3313 3314 GEN5_IRQ_RESET(SDE); 3315 3316 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3317 I915_WRITE(SERR_INT, 0xffffffff); 3318 } 3319 3320 /* 3321 * SDEIER is also touched by the interrupt handler to work around missed PCH 3322 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3323 * instead we unconditionally enable all PCH interrupt sources here, but then 3324 * only unmask them as needed with SDEIMR. 3325 * 3326 * This function needs to be called before interrupts are enabled. 3327 */ 3328 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3329 { 3330 struct drm_i915_private *dev_priv = dev->dev_private; 3331 3332 if (HAS_PCH_NOP(dev)) 3333 return; 3334 3335 WARN_ON(I915_READ(SDEIER) != 0); 3336 I915_WRITE(SDEIER, 0xffffffff); 3337 POSTING_READ(SDEIER); 3338 } 3339 3340 static void gen5_gt_irq_reset(struct drm_device *dev) 3341 { 3342 struct drm_i915_private *dev_priv = dev->dev_private; 3343 3344 GEN5_IRQ_RESET(GT); 3345 if (INTEL_INFO(dev)->gen >= 6) 3346 GEN5_IRQ_RESET(GEN6_PM); 3347 } 3348 3349 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3350 { 3351 enum i915_pipe pipe; 3352 3353 if (IS_CHERRYVIEW(dev_priv)) 3354 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3355 else 3356 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3357 3358 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3359 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3360 3361 for_each_pipe(dev_priv, pipe) { 3362 I915_WRITE(PIPESTAT(pipe), 3363 PIPE_FIFO_UNDERRUN_STATUS | 3364 PIPESTAT_INT_STATUS_MASK); 3365 dev_priv->pipestat_irq_mask[pipe] = 0; 3366 } 3367 3368 GEN5_IRQ_RESET(VLV_); 3369 dev_priv->irq_mask = ~0; 3370 } 3371 3372 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3373 { 3374 u32 pipestat_mask; 3375 u32 enable_mask; 3376 enum i915_pipe pipe; 3377 3378 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3379 PIPE_CRC_DONE_INTERRUPT_STATUS; 3380 3381 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3382 for_each_pipe(dev_priv, pipe) 3383 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3384 3385 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3386 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3387 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3388 if (IS_CHERRYVIEW(dev_priv)) 3389 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3390 3391 WARN_ON(dev_priv->irq_mask != ~0); 3392 3393 dev_priv->irq_mask = ~enable_mask; 3394 3395 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3396 } 3397 3398 /* drm_dma.h hooks 3399 */ 3400 static void ironlake_irq_reset(struct drm_device *dev) 3401 { 3402 struct drm_i915_private *dev_priv = dev->dev_private; 3403 3404 I915_WRITE(HWSTAM, 0xffffffff); 3405 3406 GEN5_IRQ_RESET(DE); 3407 if (IS_GEN7(dev)) 3408 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3409 3410 gen5_gt_irq_reset(dev); 3411 3412 ibx_irq_reset(dev); 3413 } 3414 3415 static void valleyview_irq_preinstall(struct drm_device *dev) 3416 { 3417 struct drm_i915_private *dev_priv = dev->dev_private; 3418 3419 I915_WRITE(VLV_MASTER_IER, 0); 3420 POSTING_READ(VLV_MASTER_IER); 3421 3422 gen5_gt_irq_reset(dev); 3423 3424 spin_lock_irq(&dev_priv->irq_lock); 3425 if (dev_priv->display_irqs_enabled) 3426 vlv_display_irq_reset(dev_priv); 3427 spin_unlock_irq(&dev_priv->irq_lock); 3428 } 3429 3430 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3431 { 3432 GEN8_IRQ_RESET_NDX(GT, 0); 3433 GEN8_IRQ_RESET_NDX(GT, 1); 3434 GEN8_IRQ_RESET_NDX(GT, 2); 3435 GEN8_IRQ_RESET_NDX(GT, 3); 3436 } 3437 3438 static void gen8_irq_reset(struct drm_device *dev) 3439 { 3440 struct drm_i915_private *dev_priv = dev->dev_private; 3441 int pipe; 3442 3443 I915_WRITE(GEN8_MASTER_IRQ, 0); 3444 POSTING_READ(GEN8_MASTER_IRQ); 3445 3446 gen8_gt_irq_reset(dev_priv); 3447 3448 for_each_pipe(dev_priv, pipe) 3449 if (intel_display_power_is_enabled(dev_priv, 3450 POWER_DOMAIN_PIPE(pipe))) 3451 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3452 3453 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3454 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3455 GEN5_IRQ_RESET(GEN8_PCU_); 3456 3457 if (HAS_PCH_SPLIT(dev)) 3458 ibx_irq_reset(dev); 3459 } 3460 3461 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3462 unsigned int pipe_mask) 3463 { 3464 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3465 enum i915_pipe pipe; 3466 3467 spin_lock_irq(&dev_priv->irq_lock); 3468 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3469 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3470 dev_priv->de_irq_mask[pipe], 3471 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3472 spin_unlock_irq(&dev_priv->irq_lock); 3473 } 3474 3475 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3476 unsigned int pipe_mask) 3477 { 3478 enum i915_pipe pipe; 3479 3480 spin_lock_irq(&dev_priv->irq_lock); 3481 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3482 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3483 spin_unlock_irq(&dev_priv->irq_lock); 3484 3485 /* make sure we're done processing display irqs */ 3486 synchronize_irq(dev_priv->dev->irq); 3487 } 3488 3489 static void cherryview_irq_preinstall(struct drm_device *dev) 3490 { 3491 struct drm_i915_private *dev_priv = dev->dev_private; 3492 3493 I915_WRITE(GEN8_MASTER_IRQ, 0); 3494 POSTING_READ(GEN8_MASTER_IRQ); 3495 3496 gen8_gt_irq_reset(dev_priv); 3497 3498 GEN5_IRQ_RESET(GEN8_PCU_); 3499 3500 spin_lock_irq(&dev_priv->irq_lock); 3501 if (dev_priv->display_irqs_enabled) 3502 vlv_display_irq_reset(dev_priv); 3503 spin_unlock_irq(&dev_priv->irq_lock); 3504 } 3505 3506 static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3507 const u32 hpd[HPD_NUM_PINS]) 3508 { 3509 struct drm_i915_private *dev_priv = to_i915(dev); 3510 struct intel_encoder *encoder; 3511 u32 enabled_irqs = 0; 3512 3513 for_each_intel_encoder(dev, encoder) 3514 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3515 enabled_irqs |= hpd[encoder->hpd_pin]; 3516 3517 return enabled_irqs; 3518 } 3519 3520 static void ibx_hpd_irq_setup(struct drm_device *dev) 3521 { 3522 struct drm_i915_private *dev_priv = dev->dev_private; 3523 u32 hotplug_irqs, hotplug, enabled_irqs; 3524 3525 if (HAS_PCH_IBX(dev)) { 3526 hotplug_irqs = SDE_HOTPLUG_MASK; 3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3528 } else { 3529 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3530 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3531 } 3532 3533 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3534 3535 /* 3536 * Enable digital hotplug on the PCH, and configure the DP short pulse 3537 * duration to 2ms (which is the minimum in the Display Port spec). 3538 * The pulse duration bits are reserved on LPT+. 3539 */ 3540 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3541 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3542 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3543 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3544 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3545 /* 3546 * When CPU and PCH are on the same package, port A 3547 * HPD must be enabled in both north and south. 3548 */ 3549 if (HAS_PCH_LPT_LP(dev)) 3550 hotplug |= PORTA_HOTPLUG_ENABLE; 3551 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3552 } 3553 3554 static void spt_hpd_irq_setup(struct drm_device *dev) 3555 { 3556 struct drm_i915_private *dev_priv = dev->dev_private; 3557 u32 hotplug_irqs, hotplug, enabled_irqs; 3558 3559 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3560 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3561 3562 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3563 3564 /* Enable digital hotplug on the PCH */ 3565 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3566 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | 3567 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE; 3568 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3569 3570 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3571 hotplug |= PORTE_HOTPLUG_ENABLE; 3572 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3573 } 3574 3575 static void ilk_hpd_irq_setup(struct drm_device *dev) 3576 { 3577 struct drm_i915_private *dev_priv = dev->dev_private; 3578 u32 hotplug_irqs, hotplug, enabled_irqs; 3579 3580 if (INTEL_INFO(dev)->gen >= 8) { 3581 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3582 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3583 3584 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3585 } else if (INTEL_INFO(dev)->gen >= 7) { 3586 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3587 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3588 3589 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3590 } else { 3591 hotplug_irqs = DE_DP_A_HOTPLUG; 3592 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3593 3594 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3595 } 3596 3597 /* 3598 * Enable digital hotplug on the CPU, and configure the DP short pulse 3599 * duration to 2ms (which is the minimum in the Display Port spec) 3600 * The pulse duration bits are reserved on HSW+. 3601 */ 3602 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3603 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3604 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3605 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3606 3607 ibx_hpd_irq_setup(dev); 3608 } 3609 3610 static void bxt_hpd_irq_setup(struct drm_device *dev) 3611 { 3612 struct drm_i915_private *dev_priv = dev->dev_private; 3613 u32 hotplug_irqs, hotplug, enabled_irqs; 3614 3615 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3616 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3617 3618 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3619 3620 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3621 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | 3622 PORTA_HOTPLUG_ENABLE; 3623 3624 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3625 hotplug, enabled_irqs); 3626 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3627 3628 /* 3629 * For BXT invert bit has to be set based on AOB design 3630 * for HPD detection logic, update it based on VBT fields. 3631 */ 3632 3633 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3634 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3635 hotplug |= BXT_DDIA_HPD_INVERT; 3636 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3637 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3638 hotplug |= BXT_DDIB_HPD_INVERT; 3639 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3640 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3641 hotplug |= BXT_DDIC_HPD_INVERT; 3642 3643 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3644 } 3645 3646 static void ibx_irq_postinstall(struct drm_device *dev) 3647 { 3648 struct drm_i915_private *dev_priv = dev->dev_private; 3649 u32 mask; 3650 3651 if (HAS_PCH_NOP(dev)) 3652 return; 3653 3654 if (HAS_PCH_IBX(dev)) 3655 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3656 else 3657 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3658 3659 gen5_assert_iir_is_zero(dev_priv, SDEIIR); 3660 I915_WRITE(SDEIMR, ~mask); 3661 } 3662 3663 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3664 { 3665 struct drm_i915_private *dev_priv = dev->dev_private; 3666 u32 pm_irqs, gt_irqs; 3667 3668 pm_irqs = gt_irqs = 0; 3669 3670 dev_priv->gt_irq_mask = ~0; 3671 if (HAS_L3_DPF(dev)) { 3672 /* L3 parity interrupt is always unmasked. */ 3673 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3674 gt_irqs |= GT_PARITY_ERROR(dev); 3675 } 3676 3677 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3678 if (IS_GEN5(dev)) { 3679 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3680 ILK_BSD_USER_INTERRUPT; 3681 } else { 3682 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3683 } 3684 3685 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3686 3687 if (INTEL_INFO(dev)->gen >= 6) { 3688 /* 3689 * RPS interrupts will get enabled/disabled on demand when RPS 3690 * itself is enabled/disabled. 3691 */ 3692 if (HAS_VEBOX(dev)) 3693 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3694 3695 dev_priv->pm_irq_mask = 0xffffffff; 3696 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3697 } 3698 } 3699 3700 static int ironlake_irq_postinstall(struct drm_device *dev) 3701 { 3702 struct drm_i915_private *dev_priv = dev->dev_private; 3703 u32 display_mask, extra_mask; 3704 3705 if (INTEL_INFO(dev)->gen >= 7) { 3706 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3707 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3708 DE_PLANEB_FLIP_DONE_IVB | 3709 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3710 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3711 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3712 DE_DP_A_HOTPLUG_IVB); 3713 } else { 3714 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3715 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3716 DE_AUX_CHANNEL_A | 3717 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3718 DE_POISON); 3719 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3720 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3721 DE_DP_A_HOTPLUG); 3722 } 3723 3724 dev_priv->irq_mask = ~display_mask; 3725 3726 I915_WRITE(HWSTAM, 0xeffe); 3727 3728 ibx_irq_pre_postinstall(dev); 3729 3730 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3731 3732 gen5_gt_irq_postinstall(dev); 3733 3734 ibx_irq_postinstall(dev); 3735 3736 if (IS_IRONLAKE_M(dev)) { 3737 /* Enable PCU event interrupts 3738 * 3739 * spinlocking not required here for correctness since interrupt 3740 * setup is guaranteed to run in single-threaded context. But we 3741 * need it to make the assert_spin_locked happy. */ 3742 spin_lock_irq(&dev_priv->irq_lock); 3743 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3744 spin_unlock_irq(&dev_priv->irq_lock); 3745 } 3746 3747 return 0; 3748 } 3749 3750 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3751 { 3752 assert_spin_locked(&dev_priv->irq_lock); 3753 3754 if (dev_priv->display_irqs_enabled) 3755 return; 3756 3757 dev_priv->display_irqs_enabled = true; 3758 3759 if (intel_irqs_enabled(dev_priv)) { 3760 vlv_display_irq_reset(dev_priv); 3761 vlv_display_irq_postinstall(dev_priv); 3762 } 3763 } 3764 3765 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3766 { 3767 assert_spin_locked(&dev_priv->irq_lock); 3768 3769 if (!dev_priv->display_irqs_enabled) 3770 return; 3771 3772 dev_priv->display_irqs_enabled = false; 3773 3774 if (intel_irqs_enabled(dev_priv)) 3775 vlv_display_irq_reset(dev_priv); 3776 } 3777 3778 3779 static int valleyview_irq_postinstall(struct drm_device *dev) 3780 { 3781 struct drm_i915_private *dev_priv = dev->dev_private; 3782 3783 gen5_gt_irq_postinstall(dev); 3784 3785 spin_lock_irq(&dev_priv->irq_lock); 3786 if (dev_priv->display_irqs_enabled) 3787 vlv_display_irq_postinstall(dev_priv); 3788 spin_unlock_irq(&dev_priv->irq_lock); 3789 3790 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3791 POSTING_READ(VLV_MASTER_IER); 3792 3793 return 0; 3794 } 3795 3796 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3797 { 3798 /* These are interrupts we'll toggle with the ring mask register */ 3799 uint32_t gt_interrupts[] = { 3800 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3801 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3802 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3803 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3804 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3805 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3806 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3807 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3808 0, 3809 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3810 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3811 }; 3812 3813 if (HAS_L3_DPF(dev_priv)) 3814 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3815 3816 dev_priv->pm_irq_mask = 0xffffffff; 3817 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3818 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3819 /* 3820 * RPS interrupts will get enabled/disabled on demand when RPS itself 3821 * is enabled/disabled. 3822 */ 3823 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3824 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3825 } 3826 3827 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3828 { 3829 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3830 uint32_t de_pipe_enables; 3831 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3832 u32 de_port_enables; 3833 enum i915_pipe pipe; 3834 3835 if (INTEL_INFO(dev_priv)->gen >= 9) { 3836 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | 3837 GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3838 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3839 GEN9_AUX_CHANNEL_D; 3840 if (IS_BROXTON(dev_priv)) 3841 de_port_masked |= BXT_DE_PORT_GMBUS; 3842 } else { 3843 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | 3844 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3845 } 3846 3847 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3848 GEN8_PIPE_FIFO_UNDERRUN; 3849 3850 de_port_enables = de_port_masked; 3851 if (IS_BROXTON(dev_priv)) 3852 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3853 else if (IS_BROADWELL(dev_priv)) 3854 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3855 3856 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3857 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3858 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3859 3860 for_each_pipe(dev_priv, pipe) 3861 if (intel_display_power_is_enabled(dev_priv, 3862 POWER_DOMAIN_PIPE(pipe))) 3863 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3864 dev_priv->de_irq_mask[pipe], 3865 de_pipe_enables); 3866 3867 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3868 } 3869 3870 static int gen8_irq_postinstall(struct drm_device *dev) 3871 { 3872 struct drm_i915_private *dev_priv = dev->dev_private; 3873 3874 if (HAS_PCH_SPLIT(dev)) 3875 ibx_irq_pre_postinstall(dev); 3876 3877 gen8_gt_irq_postinstall(dev_priv); 3878 gen8_de_irq_postinstall(dev_priv); 3879 3880 if (HAS_PCH_SPLIT(dev)) 3881 ibx_irq_postinstall(dev); 3882 3883 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3884 POSTING_READ(GEN8_MASTER_IRQ); 3885 3886 return 0; 3887 } 3888 3889 static int cherryview_irq_postinstall(struct drm_device *dev) 3890 { 3891 struct drm_i915_private *dev_priv = dev->dev_private; 3892 3893 gen8_gt_irq_postinstall(dev_priv); 3894 3895 spin_lock_irq(&dev_priv->irq_lock); 3896 if (dev_priv->display_irqs_enabled) 3897 vlv_display_irq_postinstall(dev_priv); 3898 spin_unlock_irq(&dev_priv->irq_lock); 3899 3900 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3901 POSTING_READ(GEN8_MASTER_IRQ); 3902 3903 return 0; 3904 } 3905 3906 static void gen8_irq_uninstall(struct drm_device *dev) 3907 { 3908 struct drm_i915_private *dev_priv = dev->dev_private; 3909 3910 if (!dev_priv) 3911 return; 3912 3913 gen8_irq_reset(dev); 3914 } 3915 3916 static void valleyview_irq_uninstall(struct drm_device *dev) 3917 { 3918 struct drm_i915_private *dev_priv = dev->dev_private; 3919 3920 if (!dev_priv) 3921 return; 3922 3923 I915_WRITE(VLV_MASTER_IER, 0); 3924 POSTING_READ(VLV_MASTER_IER); 3925 3926 gen5_gt_irq_reset(dev); 3927 3928 I915_WRITE(HWSTAM, 0xffffffff); 3929 3930 spin_lock_irq(&dev_priv->irq_lock); 3931 if (dev_priv->display_irqs_enabled) 3932 vlv_display_irq_reset(dev_priv); 3933 spin_unlock_irq(&dev_priv->irq_lock); 3934 } 3935 3936 static void cherryview_irq_uninstall(struct drm_device *dev) 3937 { 3938 struct drm_i915_private *dev_priv = dev->dev_private; 3939 3940 if (!dev_priv) 3941 return; 3942 3943 I915_WRITE(GEN8_MASTER_IRQ, 0); 3944 POSTING_READ(GEN8_MASTER_IRQ); 3945 3946 gen8_gt_irq_reset(dev_priv); 3947 3948 GEN5_IRQ_RESET(GEN8_PCU_); 3949 3950 spin_lock_irq(&dev_priv->irq_lock); 3951 if (dev_priv->display_irqs_enabled) 3952 vlv_display_irq_reset(dev_priv); 3953 spin_unlock_irq(&dev_priv->irq_lock); 3954 } 3955 3956 static void ironlake_irq_uninstall(struct drm_device *dev) 3957 { 3958 struct drm_i915_private *dev_priv = dev->dev_private; 3959 3960 if (!dev_priv) 3961 return; 3962 3963 ironlake_irq_reset(dev); 3964 } 3965 3966 static void i8xx_irq_preinstall(struct drm_device * dev) 3967 { 3968 struct drm_i915_private *dev_priv = dev->dev_private; 3969 int pipe; 3970 3971 for_each_pipe(dev_priv, pipe) 3972 I915_WRITE(PIPESTAT(pipe), 0); 3973 I915_WRITE16(IMR, 0xffff); 3974 I915_WRITE16(IER, 0x0); 3975 POSTING_READ16(IER); 3976 } 3977 3978 static int i8xx_irq_postinstall(struct drm_device *dev) 3979 { 3980 struct drm_i915_private *dev_priv = dev->dev_private; 3981 3982 I915_WRITE16(EMR, 3983 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3984 3985 /* Unmask the interrupts that we always want on. */ 3986 dev_priv->irq_mask = 3987 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3988 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3989 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3990 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3991 I915_WRITE16(IMR, dev_priv->irq_mask); 3992 3993 I915_WRITE16(IER, 3994 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3995 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3996 I915_USER_INTERRUPT); 3997 POSTING_READ16(IER); 3998 3999 /* Interrupt setup is already guaranteed to be single-threaded, this is 4000 * just to make the assert_spin_locked check happy. */ 4001 spin_lock_irq(&dev_priv->irq_lock); 4002 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4003 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4004 spin_unlock_irq(&dev_priv->irq_lock); 4005 4006 return 0; 4007 } 4008 4009 /* 4010 * Returns true when a page flip has completed. 4011 */ 4012 static bool i8xx_handle_vblank(struct drm_device *dev, 4013 int plane, int pipe, u32 iir) 4014 { 4015 struct drm_i915_private *dev_priv = dev->dev_private; 4016 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4017 4018 if (!intel_pipe_handle_vblank(dev, pipe)) 4019 return false; 4020 4021 if ((iir & flip_pending) == 0) 4022 goto check_page_flip; 4023 4024 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4025 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4026 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4027 * the flip is completed (no longer pending). Since this doesn't raise 4028 * an interrupt per se, we watch for the change at vblank. 4029 */ 4030 if (I915_READ16(ISR) & flip_pending) 4031 goto check_page_flip; 4032 4033 intel_prepare_page_flip(dev, plane); 4034 intel_finish_page_flip(dev, pipe); 4035 return true; 4036 4037 check_page_flip: 4038 intel_check_page_flip(dev, pipe); 4039 return false; 4040 } 4041 4042 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 4043 { 4044 struct drm_device *dev = arg; 4045 struct drm_i915_private *dev_priv = dev->dev_private; 4046 u16 iir, new_iir; 4047 u32 pipe_stats[2]; 4048 int pipe; 4049 u16 flip_mask = 4050 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4051 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4052 irqreturn_t ret; 4053 4054 if (!intel_irqs_enabled(dev_priv)) 4055 return IRQ_NONE; 4056 4057 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4058 disable_rpm_wakeref_asserts(dev_priv); 4059 4060 ret = IRQ_NONE; 4061 iir = I915_READ16(IIR); 4062 if (iir == 0) 4063 goto out; 4064 4065 while (iir & ~flip_mask) { 4066 /* Can't rely on pipestat interrupt bit in iir as it might 4067 * have been cleared after the pipestat interrupt was received. 4068 * It doesn't set the bit in iir again, but it still produces 4069 * interrupts (for non-MSI). 4070 */ 4071 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4072 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4073 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4074 4075 for_each_pipe(dev_priv, pipe) { 4076 i915_reg_t reg = PIPESTAT(pipe); 4077 pipe_stats[pipe] = I915_READ(reg); 4078 4079 /* 4080 * Clear the PIPE*STAT regs before the IIR 4081 */ 4082 if (pipe_stats[pipe] & 0x8000ffff) 4083 I915_WRITE(reg, pipe_stats[pipe]); 4084 } 4085 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4086 4087 I915_WRITE16(IIR, iir & ~flip_mask); 4088 new_iir = I915_READ16(IIR); /* Flush posted writes */ 4089 4090 if (iir & I915_USER_INTERRUPT) 4091 notify_ring(&dev_priv->engine[RCS]); 4092 4093 for_each_pipe(dev_priv, pipe) { 4094 int plane = pipe; 4095 if (HAS_FBC(dev)) 4096 plane = !plane; 4097 4098 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4099 i8xx_handle_vblank(dev, plane, pipe, iir)) 4100 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4101 4102 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4103 i9xx_pipe_crc_irq_handler(dev, pipe); 4104 4105 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4106 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4107 pipe); 4108 } 4109 4110 iir = new_iir; 4111 } 4112 ret = IRQ_HANDLED; 4113 4114 out: 4115 enable_rpm_wakeref_asserts(dev_priv); 4116 4117 return ret; 4118 } 4119 4120 static void i8xx_irq_uninstall(struct drm_device * dev) 4121 { 4122 struct drm_i915_private *dev_priv = dev->dev_private; 4123 int pipe; 4124 4125 for_each_pipe(dev_priv, pipe) { 4126 /* Clear enable bits; then clear status bits */ 4127 I915_WRITE(PIPESTAT(pipe), 0); 4128 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4129 } 4130 I915_WRITE16(IMR, 0xffff); 4131 I915_WRITE16(IER, 0x0); 4132 I915_WRITE16(IIR, I915_READ16(IIR)); 4133 } 4134 4135 static void i915_irq_preinstall(struct drm_device * dev) 4136 { 4137 struct drm_i915_private *dev_priv = dev->dev_private; 4138 int pipe; 4139 4140 if (I915_HAS_HOTPLUG(dev)) { 4141 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4142 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4143 } 4144 4145 I915_WRITE16(HWSTAM, 0xeffe); 4146 for_each_pipe(dev_priv, pipe) 4147 I915_WRITE(PIPESTAT(pipe), 0); 4148 I915_WRITE(IMR, 0xffffffff); 4149 I915_WRITE(IER, 0x0); 4150 POSTING_READ(IER); 4151 } 4152 4153 static int i915_irq_postinstall(struct drm_device *dev) 4154 { 4155 struct drm_i915_private *dev_priv = dev->dev_private; 4156 u32 enable_mask; 4157 4158 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4159 4160 /* Unmask the interrupts that we always want on. */ 4161 dev_priv->irq_mask = 4162 ~(I915_ASLE_INTERRUPT | 4163 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4164 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4165 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4166 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4167 4168 enable_mask = 4169 I915_ASLE_INTERRUPT | 4170 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4171 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4172 I915_USER_INTERRUPT; 4173 4174 if (I915_HAS_HOTPLUG(dev)) { 4175 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4176 POSTING_READ(PORT_HOTPLUG_EN); 4177 4178 /* Enable in IER... */ 4179 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 4180 /* and unmask in IMR */ 4181 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 4182 } 4183 4184 I915_WRITE(IMR, dev_priv->irq_mask); 4185 I915_WRITE(IER, enable_mask); 4186 POSTING_READ(IER); 4187 4188 i915_enable_asle_pipestat(dev); 4189 4190 /* Interrupt setup is already guaranteed to be single-threaded, this is 4191 * just to make the assert_spin_locked check happy. */ 4192 spin_lock_irq(&dev_priv->irq_lock); 4193 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4194 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4195 spin_unlock_irq(&dev_priv->irq_lock); 4196 4197 return 0; 4198 } 4199 4200 /* 4201 * Returns true when a page flip has completed. 4202 */ 4203 static bool i915_handle_vblank(struct drm_device *dev, 4204 int plane, int pipe, u32 iir) 4205 { 4206 struct drm_i915_private *dev_priv = dev->dev_private; 4207 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4208 4209 if (!intel_pipe_handle_vblank(dev, pipe)) 4210 return false; 4211 4212 if ((iir & flip_pending) == 0) 4213 goto check_page_flip; 4214 4215 /* We detect FlipDone by looking for the change in PendingFlip from '1' 4216 * to '0' on the following vblank, i.e. IIR has the Pendingflip 4217 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 4218 * the flip is completed (no longer pending). Since this doesn't raise 4219 * an interrupt per se, we watch for the change at vblank. 4220 */ 4221 if (I915_READ(ISR) & flip_pending) 4222 goto check_page_flip; 4223 4224 intel_prepare_page_flip(dev, plane); 4225 intel_finish_page_flip(dev, pipe); 4226 return true; 4227 4228 check_page_flip: 4229 intel_check_page_flip(dev, pipe); 4230 return false; 4231 } 4232 4233 static irqreturn_t i915_irq_handler(int irq, void *arg) 4234 { 4235 struct drm_device *dev = arg; 4236 struct drm_i915_private *dev_priv = dev->dev_private; 4237 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4238 u32 flip_mask = 4239 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4240 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4241 int pipe, ret = IRQ_NONE; 4242 4243 if (!intel_irqs_enabled(dev_priv)) 4244 return IRQ_NONE; 4245 4246 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4247 disable_rpm_wakeref_asserts(dev_priv); 4248 4249 iir = I915_READ(IIR); 4250 do { 4251 bool irq_received = (iir & ~flip_mask) != 0; 4252 bool blc_event = false; 4253 4254 /* Can't rely on pipestat interrupt bit in iir as it might 4255 * have been cleared after the pipestat interrupt was received. 4256 * It doesn't set the bit in iir again, but it still produces 4257 * interrupts (for non-MSI). 4258 */ 4259 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4260 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4261 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4262 4263 for_each_pipe(dev_priv, pipe) { 4264 i915_reg_t reg = PIPESTAT(pipe); 4265 pipe_stats[pipe] = I915_READ(reg); 4266 4267 /* Clear the PIPE*STAT regs before the IIR */ 4268 if (pipe_stats[pipe] & 0x8000ffff) { 4269 I915_WRITE(reg, pipe_stats[pipe]); 4270 irq_received = true; 4271 } 4272 } 4273 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4274 4275 if (!irq_received) 4276 break; 4277 4278 /* Consume port. Then clear IIR or we'll miss events */ 4279 if (I915_HAS_HOTPLUG(dev) && 4280 iir & I915_DISPLAY_PORT_INTERRUPT) { 4281 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4282 if (hotplug_status) 4283 i9xx_hpd_irq_handler(dev, hotplug_status); 4284 } 4285 4286 I915_WRITE(IIR, iir & ~flip_mask); 4287 new_iir = I915_READ(IIR); /* Flush posted writes */ 4288 4289 if (iir & I915_USER_INTERRUPT) 4290 notify_ring(&dev_priv->engine[RCS]); 4291 4292 for_each_pipe(dev_priv, pipe) { 4293 int plane = pipe; 4294 if (HAS_FBC(dev)) 4295 plane = !plane; 4296 4297 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4298 i915_handle_vblank(dev, plane, pipe, iir)) 4299 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4300 4301 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4302 blc_event = true; 4303 4304 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4305 i9xx_pipe_crc_irq_handler(dev, pipe); 4306 4307 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4308 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4309 pipe); 4310 } 4311 4312 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4313 intel_opregion_asle_intr(dev); 4314 4315 /* With MSI, interrupts are only generated when iir 4316 * transitions from zero to nonzero. If another bit got 4317 * set while we were handling the existing iir bits, then 4318 * we would never get another interrupt. 4319 * 4320 * This is fine on non-MSI as well, as if we hit this path 4321 * we avoid exiting the interrupt handler only to generate 4322 * another one. 4323 * 4324 * Note that for MSI this could cause a stray interrupt report 4325 * if an interrupt landed in the time between writing IIR and 4326 * the posting read. This should be rare enough to never 4327 * trigger the 99% of 100,000 interrupts test for disabling 4328 * stray interrupts. 4329 */ 4330 ret = IRQ_HANDLED; 4331 iir = new_iir; 4332 } while (iir & ~flip_mask); 4333 4334 enable_rpm_wakeref_asserts(dev_priv); 4335 4336 return ret; 4337 } 4338 4339 static void i915_irq_uninstall(struct drm_device * dev) 4340 { 4341 struct drm_i915_private *dev_priv = dev->dev_private; 4342 int pipe; 4343 4344 if (I915_HAS_HOTPLUG(dev)) { 4345 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4346 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4347 } 4348 4349 I915_WRITE16(HWSTAM, 0xffff); 4350 for_each_pipe(dev_priv, pipe) { 4351 /* Clear enable bits; then clear status bits */ 4352 I915_WRITE(PIPESTAT(pipe), 0); 4353 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 4354 } 4355 I915_WRITE(IMR, 0xffffffff); 4356 I915_WRITE(IER, 0x0); 4357 4358 I915_WRITE(IIR, I915_READ(IIR)); 4359 } 4360 4361 static void i965_irq_preinstall(struct drm_device * dev) 4362 { 4363 struct drm_i915_private *dev_priv = dev->dev_private; 4364 int pipe; 4365 4366 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4368 4369 I915_WRITE(HWSTAM, 0xeffe); 4370 for_each_pipe(dev_priv, pipe) 4371 I915_WRITE(PIPESTAT(pipe), 0); 4372 I915_WRITE(IMR, 0xffffffff); 4373 I915_WRITE(IER, 0x0); 4374 POSTING_READ(IER); 4375 } 4376 4377 static int i965_irq_postinstall(struct drm_device *dev) 4378 { 4379 struct drm_i915_private *dev_priv = dev->dev_private; 4380 u32 enable_mask; 4381 u32 error_mask; 4382 4383 /* Unmask the interrupts that we always want on. */ 4384 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4385 I915_DISPLAY_PORT_INTERRUPT | 4386 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4387 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4388 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4389 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4390 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4391 4392 enable_mask = ~dev_priv->irq_mask; 4393 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4394 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4395 enable_mask |= I915_USER_INTERRUPT; 4396 4397 if (IS_G4X(dev)) 4398 enable_mask |= I915_BSD_USER_INTERRUPT; 4399 4400 /* Interrupt setup is already guaranteed to be single-threaded, this is 4401 * just to make the assert_spin_locked check happy. */ 4402 spin_lock_irq(&dev_priv->irq_lock); 4403 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4404 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4405 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4406 spin_unlock_irq(&dev_priv->irq_lock); 4407 4408 /* 4409 * Enable some error detection, note the instruction error mask 4410 * bit is reserved, so we leave it masked. 4411 */ 4412 if (IS_G4X(dev)) { 4413 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4414 GM45_ERROR_MEM_PRIV | 4415 GM45_ERROR_CP_PRIV | 4416 I915_ERROR_MEMORY_REFRESH); 4417 } else { 4418 error_mask = ~(I915_ERROR_PAGE_TABLE | 4419 I915_ERROR_MEMORY_REFRESH); 4420 } 4421 I915_WRITE(EMR, error_mask); 4422 4423 I915_WRITE(IMR, dev_priv->irq_mask); 4424 I915_WRITE(IER, enable_mask); 4425 POSTING_READ(IER); 4426 4427 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4428 POSTING_READ(PORT_HOTPLUG_EN); 4429 4430 i915_enable_asle_pipestat(dev); 4431 4432 return 0; 4433 } 4434 4435 static void i915_hpd_irq_setup(struct drm_device *dev) 4436 { 4437 struct drm_i915_private *dev_priv = dev->dev_private; 4438 u32 hotplug_en; 4439 4440 assert_spin_locked(&dev_priv->irq_lock); 4441 4442 /* Note HDMI and DP share hotplug bits */ 4443 /* enable bits are the same for all generations */ 4444 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4445 /* Programming the CRT detection parameters tends 4446 to generate a spurious hotplug event about three 4447 seconds later. So just do it once. 4448 */ 4449 if (IS_G4X(dev)) 4450 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4451 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4452 4453 /* Ignore TV since it's buggy */ 4454 i915_hotplug_interrupt_update_locked(dev_priv, 4455 HOTPLUG_INT_EN_MASK | 4456 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 4457 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 4458 hotplug_en); 4459 } 4460 4461 static irqreturn_t i965_irq_handler(int irq, void *arg) 4462 { 4463 struct drm_device *dev = arg; 4464 struct drm_i915_private *dev_priv = dev->dev_private; 4465 u32 iir, new_iir; 4466 u32 pipe_stats[I915_MAX_PIPES]; 4467 int ret = IRQ_NONE, pipe; 4468 u32 flip_mask = 4469 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4470 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4471 4472 if (!intel_irqs_enabled(dev_priv)) 4473 return IRQ_NONE; 4474 4475 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 4476 disable_rpm_wakeref_asserts(dev_priv); 4477 4478 iir = I915_READ(IIR); 4479 4480 for (;;) { 4481 bool irq_received = (iir & ~flip_mask) != 0; 4482 bool blc_event = false; 4483 4484 /* Can't rely on pipestat interrupt bit in iir as it might 4485 * have been cleared after the pipestat interrupt was received. 4486 * It doesn't set the bit in iir again, but it still produces 4487 * interrupts (for non-MSI). 4488 */ 4489 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4490 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4491 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 4492 4493 for_each_pipe(dev_priv, pipe) { 4494 i915_reg_t reg = PIPESTAT(pipe); 4495 pipe_stats[pipe] = I915_READ(reg); 4496 4497 /* 4498 * Clear the PIPE*STAT regs before the IIR 4499 */ 4500 if (pipe_stats[pipe] & 0x8000ffff) { 4501 I915_WRITE(reg, pipe_stats[pipe]); 4502 irq_received = true; 4503 } 4504 } 4505 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4506 4507 if (!irq_received) 4508 break; 4509 4510 ret = IRQ_HANDLED; 4511 4512 /* Consume port. Then clear IIR or we'll miss events */ 4513 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4514 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4515 if (hotplug_status) 4516 i9xx_hpd_irq_handler(dev, hotplug_status); 4517 } 4518 4519 I915_WRITE(IIR, iir & ~flip_mask); 4520 new_iir = I915_READ(IIR); /* Flush posted writes */ 4521 4522 if (iir & I915_USER_INTERRUPT) 4523 notify_ring(&dev_priv->engine[RCS]); 4524 if (iir & I915_BSD_USER_INTERRUPT) 4525 notify_ring(&dev_priv->engine[VCS]); 4526 4527 for_each_pipe(dev_priv, pipe) { 4528 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4529 i915_handle_vblank(dev, pipe, pipe, iir)) 4530 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4531 4532 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4533 blc_event = true; 4534 4535 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4536 i9xx_pipe_crc_irq_handler(dev, pipe); 4537 4538 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4539 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4540 } 4541 4542 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4543 intel_opregion_asle_intr(dev); 4544 4545 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4546 gmbus_irq_handler(dev); 4547 4548 /* With MSI, interrupts are only generated when iir 4549 * transitions from zero to nonzero. If another bit got 4550 * set while we were handling the existing iir bits, then 4551 * we would never get another interrupt. 4552 * 4553 * This is fine on non-MSI as well, as if we hit this path 4554 * we avoid exiting the interrupt handler only to generate 4555 * another one. 4556 * 4557 * Note that for MSI this could cause a stray interrupt report 4558 * if an interrupt landed in the time between writing IIR and 4559 * the posting read. This should be rare enough to never 4560 * trigger the 99% of 100,000 interrupts test for disabling 4561 * stray interrupts. 4562 */ 4563 iir = new_iir; 4564 } 4565 4566 enable_rpm_wakeref_asserts(dev_priv); 4567 4568 return ret; 4569 } 4570 4571 static void i965_irq_uninstall(struct drm_device * dev) 4572 { 4573 struct drm_i915_private *dev_priv = dev->dev_private; 4574 int pipe; 4575 4576 if (!dev_priv) 4577 return; 4578 4579 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4580 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4581 4582 I915_WRITE(HWSTAM, 0xffffffff); 4583 for_each_pipe(dev_priv, pipe) 4584 I915_WRITE(PIPESTAT(pipe), 0); 4585 I915_WRITE(IMR, 0xffffffff); 4586 I915_WRITE(IER, 0x0); 4587 4588 for_each_pipe(dev_priv, pipe) 4589 I915_WRITE(PIPESTAT(pipe), 4590 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4591 I915_WRITE(IIR, I915_READ(IIR)); 4592 } 4593 4594 /** 4595 * intel_irq_init - initializes irq support 4596 * @dev_priv: i915 device instance 4597 * 4598 * This function initializes all the irq support including work items, timers 4599 * and all the vtables. It does not setup the interrupt itself though. 4600 */ 4601 void intel_irq_init(struct drm_i915_private *dev_priv) 4602 { 4603 struct drm_device *dev = dev_priv->dev; 4604 4605 intel_hpd_init_work(dev_priv); 4606 4607 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4608 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4609 4610 /* Let's track the enabled rps events */ 4611 if (IS_VALLEYVIEW(dev_priv)) 4612 /* WaGsvRC0ResidencyMethod:vlv */ 4613 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; 4614 else 4615 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4616 4617 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4618 i915_hangcheck_elapsed); 4619 4620 if (IS_GEN2(dev_priv)) { 4621 dev->max_vblank_count = 0; 4622 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4623 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 4624 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4625 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4626 } else { 4627 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4628 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4629 } 4630 4631 /* 4632 * Opt out of the vblank disable timer on everything except gen2. 4633 * Gen2 doesn't have a hardware frame counter and so depends on 4634 * vblank interrupts to produce sane vblank seuquence numbers. 4635 */ 4636 if (!IS_GEN2(dev_priv)) 4637 dev->vblank_disable_immediate = true; 4638 4639 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4640 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4641 4642 if (IS_CHERRYVIEW(dev_priv)) { 4643 dev->driver->irq_handler = cherryview_irq_handler; 4644 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4645 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4646 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4647 dev->driver->enable_vblank = valleyview_enable_vblank; 4648 dev->driver->disable_vblank = valleyview_disable_vblank; 4649 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4650 } else if (IS_VALLEYVIEW(dev_priv)) { 4651 dev->driver->irq_handler = valleyview_irq_handler; 4652 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4653 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4654 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4655 dev->driver->enable_vblank = valleyview_enable_vblank; 4656 dev->driver->disable_vblank = valleyview_disable_vblank; 4657 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4658 } else if (INTEL_INFO(dev_priv)->gen >= 8) { 4659 dev->driver->irq_handler = gen8_irq_handler; 4660 dev->driver->irq_preinstall = gen8_irq_reset; 4661 dev->driver->irq_postinstall = gen8_irq_postinstall; 4662 dev->driver->irq_uninstall = gen8_irq_uninstall; 4663 dev->driver->enable_vblank = gen8_enable_vblank; 4664 dev->driver->disable_vblank = gen8_disable_vblank; 4665 if (IS_BROXTON(dev)) 4666 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4667 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev)) 4668 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4669 else 4670 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4671 } else if (HAS_PCH_SPLIT(dev)) { 4672 dev->driver->irq_handler = ironlake_irq_handler; 4673 dev->driver->irq_preinstall = ironlake_irq_reset; 4674 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4675 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4676 dev->driver->enable_vblank = ironlake_enable_vblank; 4677 dev->driver->disable_vblank = ironlake_disable_vblank; 4678 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4679 } else { 4680 if (INTEL_INFO(dev_priv)->gen == 2) { 4681 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4682 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4683 dev->driver->irq_handler = i8xx_irq_handler; 4684 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4685 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4686 dev->driver->irq_preinstall = i915_irq_preinstall; 4687 dev->driver->irq_postinstall = i915_irq_postinstall; 4688 dev->driver->irq_uninstall = i915_irq_uninstall; 4689 dev->driver->irq_handler = i915_irq_handler; 4690 } else { 4691 dev->driver->irq_preinstall = i965_irq_preinstall; 4692 dev->driver->irq_postinstall = i965_irq_postinstall; 4693 dev->driver->irq_uninstall = i965_irq_uninstall; 4694 dev->driver->irq_handler = i965_irq_handler; 4695 } 4696 if (I915_HAS_HOTPLUG(dev_priv)) 4697 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4698 dev->driver->enable_vblank = i915_enable_vblank; 4699 dev->driver->disable_vblank = i915_disable_vblank; 4700 } 4701 } 4702 4703 /** 4704 * intel_irq_install - enables the hardware interrupt 4705 * @dev_priv: i915 device instance 4706 * 4707 * This function enables the hardware interrupt handling, but leaves the hotplug 4708 * handling still disabled. It is called after intel_irq_init(). 4709 * 4710 * In the driver load and resume code we need working interrupts in a few places 4711 * but don't want to deal with the hassle of concurrent probe and hotplug 4712 * workers. Hence the split into this two-stage approach. 4713 */ 4714 int intel_irq_install(struct drm_i915_private *dev_priv) 4715 { 4716 /* 4717 * We enable some interrupt sources in our postinstall hooks, so mark 4718 * interrupts as enabled _before_ actually enabling them to avoid 4719 * special cases in our ordering checks. 4720 */ 4721 dev_priv->pm.irqs_enabled = true; 4722 4723 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4724 } 4725 4726 /** 4727 * intel_irq_uninstall - finilizes all irq handling 4728 * @dev_priv: i915 device instance 4729 * 4730 * This stops interrupt and hotplug handling and unregisters and frees all 4731 * resources acquired in the init functions. 4732 */ 4733 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4734 { 4735 drm_irq_uninstall(dev_priv->dev); 4736 intel_hpd_cancel_work(dev_priv); 4737 dev_priv->pm.irqs_enabled = false; 4738 } 4739 4740 /** 4741 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4742 * @dev_priv: i915 device instance 4743 * 4744 * This function is used to disable interrupts at runtime, both in the runtime 4745 * pm and the system suspend/resume code. 4746 */ 4747 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4748 { 4749 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4750 dev_priv->pm.irqs_enabled = false; 4751 synchronize_irq(dev_priv->dev->irq); 4752 } 4753 4754 /** 4755 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4756 * @dev_priv: i915 device instance 4757 * 4758 * This function is used to enable interrupts at runtime, both in the runtime 4759 * pm and the system suspend/resume code. 4760 */ 4761 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4762 { 4763 dev_priv->pm.irqs_enabled = true; 4764 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4765 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4766 } 4767