1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 30 31 #include <linux/sysrq.h> 32 #include <linux/slab.h> 33 #include <linux/circ_buf.h> 34 #include <drm/drmP.h> 35 #include <drm/i915_drm.h> 36 #include "i915_drv.h" 37 #include "i915_trace.h" 38 #include "intel_drv.h" 39 40 /** 41 * DOC: interrupt handling 42 * 43 * These functions provide the basic support for enabling and disabling the 44 * interrupt handling support. There's a lot more functionality in i915_irq.c 45 * and related files, but that will be described in separate chapters. 46 */ 47 48 static const u32 hpd_ilk[HPD_NUM_PINS] = { 49 [HPD_PORT_A] = DE_DP_A_HOTPLUG, 50 }; 51 52 static const u32 hpd_ivb[HPD_NUM_PINS] = { 53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB, 54 }; 55 56 static const u32 hpd_bdw[HPD_NUM_PINS] = { 57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG, 58 }; 59 60 static const u32 hpd_ibx[HPD_NUM_PINS] = { 61 [HPD_CRT] = SDE_CRT_HOTPLUG, 62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 66 }; 67 68 static const u32 hpd_cpt[HPD_NUM_PINS] = { 69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 74 }; 75 76 static const u32 hpd_spt[HPD_NUM_PINS] = { 77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT, 78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT, 81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT 82 }; 83 84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = { 85 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 91 }; 92 93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = { 94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 100 }; 101 102 static const u32 hpd_status_i915[HPD_NUM_PINS] = { 103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 109 }; 110 111 /* BXT hpd list */ 112 static const u32 hpd_bxt[HPD_NUM_PINS] = { 113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA, 114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB, 115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC 116 }; 117 118 /* IIR can theoretically queue up two events. Be paranoid. */ 119 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 121 POSTING_READ(GEN8_##type##_IMR(which)); \ 122 I915_WRITE(GEN8_##type##_IER(which), 0); \ 123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 124 POSTING_READ(GEN8_##type##_IIR(which)); \ 125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 126 POSTING_READ(GEN8_##type##_IIR(which)); \ 127 } while (0) 128 129 #define GEN3_IRQ_RESET(type) do { \ 130 I915_WRITE(type##IMR, 0xffffffff); \ 131 POSTING_READ(type##IMR); \ 132 I915_WRITE(type##IER, 0); \ 133 I915_WRITE(type##IIR, 0xffffffff); \ 134 POSTING_READ(type##IIR); \ 135 I915_WRITE(type##IIR, 0xffffffff); \ 136 POSTING_READ(type##IIR); \ 137 } while (0) 138 139 #define GEN2_IRQ_RESET(type) do { \ 140 I915_WRITE16(type##IMR, 0xffff); \ 141 POSTING_READ16(type##IMR); \ 142 I915_WRITE16(type##IER, 0); \ 143 I915_WRITE16(type##IIR, 0xffff); \ 144 POSTING_READ16(type##IIR); \ 145 I915_WRITE16(type##IIR, 0xffff); \ 146 POSTING_READ16(type##IIR); \ 147 } while (0) 148 149 /* 150 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 151 */ 152 static void gen3_assert_iir_is_zero(struct drm_i915_private *dev_priv, 153 i915_reg_t reg) 154 { 155 u32 val = I915_READ(reg); 156 157 if (val == 0) 158 return; 159 160 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 161 i915_mmio_reg_offset(reg), val); 162 I915_WRITE(reg, 0xffffffff); 163 POSTING_READ(reg); 164 I915_WRITE(reg, 0xffffffff); 165 POSTING_READ(reg); 166 } 167 168 static void gen2_assert_iir_is_zero(struct drm_i915_private *dev_priv, 169 i915_reg_t reg) 170 { 171 u16 val = I915_READ16(reg); 172 173 if (val == 0) 174 return; 175 176 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", 177 i915_mmio_reg_offset(reg), val); 178 I915_WRITE16(reg, 0xffff); 179 POSTING_READ16(reg); 180 I915_WRITE16(reg, 0xffff); 181 POSTING_READ16(reg); 182 } 183 184 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 185 gen3_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \ 186 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 187 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 188 POSTING_READ(GEN8_##type##_IMR(which)); \ 189 } while (0) 190 191 #define GEN3_IRQ_INIT(type, imr_val, ier_val) do { \ 192 gen3_assert_iir_is_zero(dev_priv, type##IIR); \ 193 I915_WRITE(type##IER, (ier_val)); \ 194 I915_WRITE(type##IMR, (imr_val)); \ 195 POSTING_READ(type##IMR); \ 196 } while (0) 197 198 #define GEN2_IRQ_INIT(type, imr_val, ier_val) do { \ 199 gen2_assert_iir_is_zero(dev_priv, type##IIR); \ 200 I915_WRITE16(type##IER, (ier_val)); \ 201 I915_WRITE16(type##IMR, (imr_val)); \ 202 POSTING_READ16(type##IMR); \ 203 } while (0) 204 205 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 206 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); 207 208 /* For display hotplug interrupt */ 209 static inline void 210 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, 211 uint32_t mask, 212 uint32_t bits) 213 { 214 uint32_t val; 215 216 lockdep_assert_held(&dev_priv->irq_lock); 217 WARN_ON(bits & ~mask); 218 219 val = I915_READ(PORT_HOTPLUG_EN); 220 val &= ~mask; 221 val |= bits; 222 I915_WRITE(PORT_HOTPLUG_EN, val); 223 } 224 225 /** 226 * i915_hotplug_interrupt_update - update hotplug interrupt enable 227 * @dev_priv: driver private 228 * @mask: bits to update 229 * @bits: bits to enable 230 * NOTE: the HPD enable bits are modified both inside and outside 231 * of an interrupt context. To avoid that read-modify-write cycles 232 * interfer, these bits are protected by a spinlock. Since this 233 * function is usually not called from a context where the lock is 234 * held already, this function acquires the lock itself. A non-locking 235 * version is also available. 236 */ 237 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, 238 uint32_t mask, 239 uint32_t bits) 240 { 241 spin_lock_irq(&dev_priv->irq_lock); 242 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); 243 spin_unlock_irq(&dev_priv->irq_lock); 244 } 245 246 /** 247 * ilk_update_display_irq - update DEIMR 248 * @dev_priv: driver private 249 * @interrupt_mask: mask of interrupt bits to update 250 * @enabled_irq_mask: mask of interrupt bits to enable 251 */ 252 void ilk_update_display_irq(struct drm_i915_private *dev_priv, 253 uint32_t interrupt_mask, 254 uint32_t enabled_irq_mask) 255 { 256 uint32_t new_val; 257 258 lockdep_assert_held(&dev_priv->irq_lock); 259 260 WARN_ON(enabled_irq_mask & ~interrupt_mask); 261 262 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 263 return; 264 265 new_val = dev_priv->irq_mask; 266 new_val &= ~interrupt_mask; 267 new_val |= (~enabled_irq_mask & interrupt_mask); 268 269 if (new_val != dev_priv->irq_mask) { 270 dev_priv->irq_mask = new_val; 271 I915_WRITE(DEIMR, dev_priv->irq_mask); 272 POSTING_READ(DEIMR); 273 } 274 } 275 276 /** 277 * ilk_update_gt_irq - update GTIMR 278 * @dev_priv: driver private 279 * @interrupt_mask: mask of interrupt bits to update 280 * @enabled_irq_mask: mask of interrupt bits to enable 281 */ 282 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 283 uint32_t interrupt_mask, 284 uint32_t enabled_irq_mask) 285 { 286 lockdep_assert_held(&dev_priv->irq_lock); 287 288 WARN_ON(enabled_irq_mask & ~interrupt_mask); 289 290 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 291 return; 292 293 dev_priv->gt_irq_mask &= ~interrupt_mask; 294 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 295 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 296 } 297 298 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 299 { 300 ilk_update_gt_irq(dev_priv, mask, mask); 301 POSTING_READ_FW(GTIMR); 302 } 303 304 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 305 { 306 ilk_update_gt_irq(dev_priv, mask, 0); 307 } 308 309 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) 310 { 311 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; 312 } 313 314 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv) 315 { 316 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; 317 } 318 319 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv) 320 { 321 return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; 322 } 323 324 /** 325 * snb_update_pm_irq - update GEN6_PMIMR 326 * @dev_priv: driver private 327 * @interrupt_mask: mask of interrupt bits to update 328 * @enabled_irq_mask: mask of interrupt bits to enable 329 */ 330 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 331 uint32_t interrupt_mask, 332 uint32_t enabled_irq_mask) 333 { 334 uint32_t new_val; 335 336 WARN_ON(enabled_irq_mask & ~interrupt_mask); 337 338 lockdep_assert_held(&dev_priv->irq_lock); 339 340 new_val = dev_priv->pm_imr; 341 new_val &= ~interrupt_mask; 342 new_val |= (~enabled_irq_mask & interrupt_mask); 343 344 if (new_val != dev_priv->pm_imr) { 345 dev_priv->pm_imr = new_val; 346 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_imr); 347 POSTING_READ(gen6_pm_imr(dev_priv)); 348 } 349 } 350 351 void gen6_unmask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 352 { 353 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 354 return; 355 356 snb_update_pm_irq(dev_priv, mask, mask); 357 } 358 359 static void __gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 360 { 361 snb_update_pm_irq(dev_priv, mask, 0); 362 } 363 364 void gen6_mask_pm_irq(struct drm_i915_private *dev_priv, u32 mask) 365 { 366 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 367 return; 368 369 __gen6_mask_pm_irq(dev_priv, mask); 370 } 371 372 static void gen6_reset_pm_iir(struct drm_i915_private *dev_priv, u32 reset_mask) 373 { 374 i915_reg_t reg = gen6_pm_iir(dev_priv); 375 376 lockdep_assert_held(&dev_priv->irq_lock); 377 378 I915_WRITE(reg, reset_mask); 379 I915_WRITE(reg, reset_mask); 380 POSTING_READ(reg); 381 } 382 383 static void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, u32 enable_mask) 384 { 385 lockdep_assert_held(&dev_priv->irq_lock); 386 387 dev_priv->pm_ier |= enable_mask; 388 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 389 gen6_unmask_pm_irq(dev_priv, enable_mask); 390 /* unmask_pm_irq provides an implicit barrier (POSTING_READ) */ 391 } 392 393 static void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, u32 disable_mask) 394 { 395 lockdep_assert_held(&dev_priv->irq_lock); 396 397 dev_priv->pm_ier &= ~disable_mask; 398 __gen6_mask_pm_irq(dev_priv, disable_mask); 399 I915_WRITE(gen6_pm_ier(dev_priv), dev_priv->pm_ier); 400 /* though a barrier is missing here, but don't really need a one */ 401 } 402 403 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) 404 { 405 spin_lock_irq(&dev_priv->irq_lock); 406 gen6_reset_pm_iir(dev_priv, dev_priv->pm_rps_events); 407 dev_priv->gt_pm.rps.pm_iir = 0; 408 spin_unlock_irq(&dev_priv->irq_lock); 409 } 410 411 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) 412 { 413 struct intel_rps *rps = &dev_priv->gt_pm.rps; 414 415 if (READ_ONCE(rps->interrupts_enabled)) 416 return; 417 418 spin_lock_irq(&dev_priv->irq_lock); 419 WARN_ON_ONCE(rps->pm_iir); 420 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 421 rps->interrupts_enabled = true; 422 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 423 424 spin_unlock_irq(&dev_priv->irq_lock); 425 } 426 427 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) 428 { 429 struct intel_rps *rps = &dev_priv->gt_pm.rps; 430 431 if (!READ_ONCE(rps->interrupts_enabled)) 432 return; 433 434 spin_lock_irq(&dev_priv->irq_lock); 435 rps->interrupts_enabled = false; 436 437 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); 438 439 gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); 440 441 spin_unlock_irq(&dev_priv->irq_lock); 442 synchronize_irq(dev_priv->drm.irq); 443 444 /* Now that we will not be generating any more work, flush any 445 * outstanding tasks. As we are called on the RPS idle path, 446 * we will reset the GPU to minimum frequencies, so the current 447 * state of the worker can be discarded. 448 */ 449 cancel_work_sync(&rps->work); 450 gen6_reset_rps_interrupts(dev_priv); 451 } 452 453 void gen9_reset_guc_interrupts(struct drm_i915_private *dev_priv) 454 { 455 spin_lock_irq(&dev_priv->irq_lock); 456 gen6_reset_pm_iir(dev_priv, dev_priv->pm_guc_events); 457 spin_unlock_irq(&dev_priv->irq_lock); 458 } 459 460 void gen9_enable_guc_interrupts(struct drm_i915_private *dev_priv) 461 { 462 spin_lock_irq(&dev_priv->irq_lock); 463 if (!dev_priv->guc.interrupts_enabled) { 464 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & 465 dev_priv->pm_guc_events); 466 dev_priv->guc.interrupts_enabled = true; 467 gen6_enable_pm_irq(dev_priv, dev_priv->pm_guc_events); 468 } 469 spin_unlock_irq(&dev_priv->irq_lock); 470 } 471 472 void gen9_disable_guc_interrupts(struct drm_i915_private *dev_priv) 473 { 474 spin_lock_irq(&dev_priv->irq_lock); 475 dev_priv->guc.interrupts_enabled = false; 476 477 gen6_disable_pm_irq(dev_priv, dev_priv->pm_guc_events); 478 479 spin_unlock_irq(&dev_priv->irq_lock); 480 synchronize_irq(dev_priv->drm.irq); 481 482 gen9_reset_guc_interrupts(dev_priv); 483 } 484 485 /** 486 * bdw_update_port_irq - update DE port interrupt 487 * @dev_priv: driver private 488 * @interrupt_mask: mask of interrupt bits to update 489 * @enabled_irq_mask: mask of interrupt bits to enable 490 */ 491 static void bdw_update_port_irq(struct drm_i915_private *dev_priv, 492 uint32_t interrupt_mask, 493 uint32_t enabled_irq_mask) 494 { 495 uint32_t new_val; 496 uint32_t old_val; 497 498 lockdep_assert_held(&dev_priv->irq_lock); 499 500 WARN_ON(enabled_irq_mask & ~interrupt_mask); 501 502 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 503 return; 504 505 old_val = I915_READ(GEN8_DE_PORT_IMR); 506 507 new_val = old_val; 508 new_val &= ~interrupt_mask; 509 new_val |= (~enabled_irq_mask & interrupt_mask); 510 511 if (new_val != old_val) { 512 I915_WRITE(GEN8_DE_PORT_IMR, new_val); 513 POSTING_READ(GEN8_DE_PORT_IMR); 514 } 515 } 516 517 /** 518 * bdw_update_pipe_irq - update DE pipe interrupt 519 * @dev_priv: driver private 520 * @pipe: pipe whose interrupt to update 521 * @interrupt_mask: mask of interrupt bits to update 522 * @enabled_irq_mask: mask of interrupt bits to enable 523 */ 524 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, 525 enum i915_pipe pipe, 526 uint32_t interrupt_mask, 527 uint32_t enabled_irq_mask) 528 { 529 uint32_t new_val; 530 531 lockdep_assert_held(&dev_priv->irq_lock); 532 533 WARN_ON(enabled_irq_mask & ~interrupt_mask); 534 535 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 536 return; 537 538 new_val = dev_priv->de_irq_mask[pipe]; 539 new_val &= ~interrupt_mask; 540 new_val |= (~enabled_irq_mask & interrupt_mask); 541 542 if (new_val != dev_priv->de_irq_mask[pipe]) { 543 dev_priv->de_irq_mask[pipe] = new_val; 544 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 545 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 546 } 547 } 548 549 /** 550 * ibx_display_interrupt_update - update SDEIMR 551 * @dev_priv: driver private 552 * @interrupt_mask: mask of interrupt bits to update 553 * @enabled_irq_mask: mask of interrupt bits to enable 554 */ 555 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 556 uint32_t interrupt_mask, 557 uint32_t enabled_irq_mask) 558 { 559 uint32_t sdeimr = I915_READ(SDEIMR); 560 sdeimr &= ~interrupt_mask; 561 sdeimr |= (~enabled_irq_mask & interrupt_mask); 562 563 WARN_ON(enabled_irq_mask & ~interrupt_mask); 564 565 lockdep_assert_held(&dev_priv->irq_lock); 566 567 if (WARN_ON(!intel_irqs_enabled(dev_priv))) 568 return; 569 570 I915_WRITE(SDEIMR, sdeimr); 571 POSTING_READ(SDEIMR); 572 } 573 574 u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, 575 enum i915_pipe pipe) 576 { 577 u32 status_mask = dev_priv->pipestat_irq_mask[pipe]; 578 u32 enable_mask = status_mask << 16; 579 580 lockdep_assert_held(&dev_priv->irq_lock); 581 582 if (INTEL_GEN(dev_priv) < 5) 583 goto out; 584 585 /* 586 * On pipe A we don't support the PSR interrupt yet, 587 * on pipe B and C the same bit MBZ. 588 */ 589 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 590 return 0; 591 /* 592 * On pipe B and C we don't support the PSR interrupt yet, on pipe 593 * A the same bit is for perf counters which we don't use either. 594 */ 595 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 596 return 0; 597 598 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 599 SPRITE0_FLIP_DONE_INT_EN_VLV | 600 SPRITE1_FLIP_DONE_INT_EN_VLV); 601 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 602 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 603 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 604 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 605 606 out: 607 WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 608 status_mask & ~PIPESTAT_INT_STATUS_MASK, 609 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 610 pipe_name(pipe), enable_mask, status_mask); 611 612 return enable_mask; 613 } 614 615 void i915_enable_pipestat(struct drm_i915_private *dev_priv, 616 enum i915_pipe pipe, u32 status_mask) 617 { 618 i915_reg_t reg = PIPESTAT(pipe); 619 u32 enable_mask; 620 621 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 622 "pipe %c: status_mask=0x%x\n", 623 pipe_name(pipe), status_mask); 624 625 lockdep_assert_held(&dev_priv->irq_lock); 626 WARN_ON(!intel_irqs_enabled(dev_priv)); 627 628 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask) 629 return; 630 631 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 632 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 633 634 I915_WRITE(reg, enable_mask | status_mask); 635 POSTING_READ(reg); 636 } 637 638 void i915_disable_pipestat(struct drm_i915_private *dev_priv, 639 enum i915_pipe pipe, u32 status_mask) 640 { 641 i915_reg_t reg = PIPESTAT(pipe); 642 u32 enable_mask; 643 644 WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK, 645 "pipe %c: status_mask=0x%x\n", 646 pipe_name(pipe), status_mask); 647 648 lockdep_assert_held(&dev_priv->irq_lock); 649 WARN_ON(!intel_irqs_enabled(dev_priv)); 650 651 if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0) 652 return; 653 654 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 655 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 656 657 I915_WRITE(reg, enable_mask | status_mask); 658 POSTING_READ(reg); 659 } 660 661 /** 662 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 663 * @dev_priv: i915 device private 664 */ 665 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) 666 { 667 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv)) 668 return; 669 670 spin_lock_irq(&dev_priv->irq_lock); 671 672 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 673 if (INTEL_GEN(dev_priv) >= 4) 674 i915_enable_pipestat(dev_priv, PIPE_A, 675 PIPE_LEGACY_BLC_EVENT_STATUS); 676 677 spin_unlock_irq(&dev_priv->irq_lock); 678 } 679 680 /* 681 * This timing diagram depicts the video signal in and 682 * around the vertical blanking period. 683 * 684 * Assumptions about the fictitious mode used in this example: 685 * vblank_start >= 3 686 * vsync_start = vblank_start + 1 687 * vsync_end = vblank_start + 2 688 * vtotal = vblank_start + 3 689 * 690 * start of vblank: 691 * latch double buffered registers 692 * increment frame counter (ctg+) 693 * generate start of vblank interrupt (gen4+) 694 * | 695 * | frame start: 696 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 697 * | may be shifted forward 1-3 extra lines via PIPECONF 698 * | | 699 * | | start of vsync: 700 * | | generate vsync interrupt 701 * | | | 702 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 703 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 704 * ----va---> <-----------------vb--------------------> <--------va------------- 705 * | | <----vs-----> | 706 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 707 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 708 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 709 * | | | 710 * last visible pixel first visible pixel 711 * | increment frame counter (gen3/4) 712 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 713 * 714 * x = horizontal active 715 * _ = horizontal blanking 716 * hs = horizontal sync 717 * va = vertical active 718 * vb = vertical blanking 719 * vs = vertical sync 720 * vbs = vblank_start (number) 721 * 722 * Summary: 723 * - most events happen at the start of horizontal sync 724 * - frame start happens at the start of horizontal blank, 1-4 lines 725 * (depending on PIPECONF settings) after the start of vblank 726 * - gen3/4 pixel and frame counter are synchronized with the start 727 * of horizontal active on the first line of vertical active 728 */ 729 730 /* Called from drm generic code, passed a 'crtc', which 731 * we use as a pipe index 732 */ 733 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 734 { 735 struct drm_i915_private *dev_priv = to_i915(dev); 736 i915_reg_t high_frame, low_frame; 737 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 738 const struct drm_display_mode *mode = &dev->vblank[pipe].hwmode; 739 unsigned long irqflags; 740 741 htotal = mode->crtc_htotal; 742 hsync_start = mode->crtc_hsync_start; 743 vbl_start = mode->crtc_vblank_start; 744 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 745 vbl_start = DIV_ROUND_UP(vbl_start, 2); 746 747 /* Convert to pixel count */ 748 vbl_start *= htotal; 749 750 /* Start of vblank event occurs at start of hsync */ 751 vbl_start -= htotal - hsync_start; 752 753 high_frame = PIPEFRAME(pipe); 754 low_frame = PIPEFRAMEPIXEL(pipe); 755 756 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 757 758 /* 759 * High & low register fields aren't synchronized, so make sure 760 * we get a low value that's stable across two reads of the high 761 * register. 762 */ 763 do { 764 high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 765 low = I915_READ_FW(low_frame); 766 high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK; 767 } while (high1 != high2); 768 769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 770 771 high1 >>= PIPE_FRAME_HIGH_SHIFT; 772 pixel = low & PIPE_PIXEL_MASK; 773 low >>= PIPE_FRAME_LOW_SHIFT; 774 775 /* 776 * The frame counter increments at beginning of active. 777 * Cook up a vblank counter by also checking the pixel 778 * counter against vblank start. 779 */ 780 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 781 } 782 783 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 784 { 785 struct drm_i915_private *dev_priv = to_i915(dev); 786 787 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 788 } 789 790 /* 791 * On certain encoders on certain platforms, pipe 792 * scanline register will not work to get the scanline, 793 * since the timings are driven from the PORT or issues 794 * with scanline register updates. 795 * This function will use Framestamp and current 796 * timestamp registers to calculate the scanline. 797 */ 798 static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc) 799 { 800 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 801 struct drm_vblank_crtc *vblank = 802 &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 803 const struct drm_display_mode *mode = &vblank->hwmode; 804 u32 vblank_start = mode->crtc_vblank_start; 805 u32 vtotal = mode->crtc_vtotal; 806 u32 htotal = mode->crtc_htotal; 807 u32 clock = mode->crtc_clock; 808 u32 scanline, scan_prev_time, scan_curr_time, scan_post_time; 809 810 /* 811 * To avoid the race condition where we might cross into the 812 * next vblank just between the PIPE_FRMTMSTMP and TIMESTAMP_CTR 813 * reads. We make sure we read PIPE_FRMTMSTMP and TIMESTAMP_CTR 814 * during the same frame. 815 */ 816 do { 817 /* 818 * This field provides read back of the display 819 * pipe frame time stamp. The time stamp value 820 * is sampled at every start of vertical blank. 821 */ 822 scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 823 824 /* 825 * The TIMESTAMP_CTR register has the current 826 * time stamp value. 827 */ 828 scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR); 829 830 scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe)); 831 } while (scan_post_time != scan_prev_time); 832 833 scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time, 834 clock), 1000 * htotal); 835 scanline = min(scanline, vtotal - 1); 836 scanline = (scanline + vblank_start) % vtotal; 837 838 return scanline; 839 } 840 841 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */ 842 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 843 { 844 struct drm_device *dev = crtc->base.dev; 845 struct drm_i915_private *dev_priv = to_i915(dev); 846 const struct drm_display_mode *mode; 847 struct drm_vblank_crtc *vblank; 848 enum i915_pipe pipe = crtc->pipe; 849 int position, vtotal; 850 851 if (!crtc->active) 852 return -1; 853 854 vblank = &crtc->base.dev->vblank[drm_crtc_index(&crtc->base)]; 855 mode = &vblank->hwmode; 856 857 if (mode->private_flags & I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP) 858 return __intel_get_crtc_scanline_from_timestamp(crtc); 859 860 vtotal = mode->crtc_vtotal; 861 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 862 vtotal /= 2; 863 864 if (IS_GEN2(dev_priv)) 865 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 866 else 867 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 868 869 /* 870 * On HSW, the DSL reg (0x70000) appears to return 0 if we 871 * read it just before the start of vblank. So try it again 872 * so we don't accidentally end up spanning a vblank frame 873 * increment, causing the pipe_update_end() code to squak at us. 874 * 875 * The nature of this problem means we can't simply check the ISR 876 * bit and return the vblank start value; nor can we use the scanline 877 * debug register in the transcoder as it appears to have the same 878 * problem. We may need to extend this to include other platforms, 879 * but so far testing only shows the problem on HSW. 880 */ 881 if (HAS_DDI(dev_priv) && !position) { 882 int i, temp; 883 884 for (i = 0; i < 100; i++) { 885 udelay(1); 886 temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 887 if (temp != position) { 888 position = temp; 889 break; 890 } 891 } 892 } 893 894 /* 895 * See update_scanline_offset() for the details on the 896 * scanline_offset adjustment. 897 */ 898 return (position + crtc->scanline_offset) % vtotal; 899 } 900 901 static bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, 902 bool in_vblank_irq, int *vpos, int *hpos, 903 ktime_t *stime, ktime_t *etime, 904 const struct drm_display_mode *mode) 905 { 906 struct drm_i915_private *dev_priv = to_i915(dev); 907 struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, 908 pipe); 909 int position; 910 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 911 unsigned long irqflags; 912 913 if (WARN_ON(!mode->crtc_clock)) { 914 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 915 "pipe %c\n", pipe_name(pipe)); 916 return false; 917 } 918 919 htotal = mode->crtc_htotal; 920 hsync_start = mode->crtc_hsync_start; 921 vtotal = mode->crtc_vtotal; 922 vbl_start = mode->crtc_vblank_start; 923 vbl_end = mode->crtc_vblank_end; 924 925 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 926 vbl_start = DIV_ROUND_UP(vbl_start, 2); 927 vbl_end /= 2; 928 vtotal /= 2; 929 } 930 931 /* 932 * Lock uncore.lock, as we will do multiple timing critical raw 933 * register reads, potentially with preemption disabled, so the 934 * following code must not block on uncore.lock. 935 */ 936 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 937 938 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 939 940 /* Get optional system timestamp before query. */ 941 if (stime) 942 *stime = ktime_get(); 943 944 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 945 /* No obvious pixelcount register. Only query vertical 946 * scanout position from Display scan line register. 947 */ 948 position = __intel_get_crtc_scanline(intel_crtc); 949 } else { 950 /* Have access to pixelcount since start of frame. 951 * We can split this into vertical and horizontal 952 * scanout position. 953 */ 954 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 955 956 /* convert to pixel counts */ 957 vbl_start *= htotal; 958 vbl_end *= htotal; 959 vtotal *= htotal; 960 961 /* 962 * In interlaced modes, the pixel counter counts all pixels, 963 * so one field will have htotal more pixels. In order to avoid 964 * the reported position from jumping backwards when the pixel 965 * counter is beyond the length of the shorter field, just 966 * clamp the position the length of the shorter field. This 967 * matches how the scanline counter based position works since 968 * the scanline counter doesn't count the two half lines. 969 */ 970 if (position >= vtotal) 971 position = vtotal - 1; 972 973 /* 974 * Start of vblank interrupt is triggered at start of hsync, 975 * just prior to the first active line of vblank. However we 976 * consider lines to start at the leading edge of horizontal 977 * active. So, should we get here before we've crossed into 978 * the horizontal active of the first line in vblank, we would 979 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 980 * always add htotal-hsync_start to the current pixel position. 981 */ 982 position = (position + htotal - hsync_start) % vtotal; 983 } 984 985 /* Get optional system timestamp after query. */ 986 if (etime) 987 *etime = ktime_get(); 988 989 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 990 991 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 992 993 /* 994 * While in vblank, position will be negative 995 * counting up towards 0 at vbl_end. And outside 996 * vblank, position will be positive counting 997 * up since vbl_end. 998 */ 999 if (position >= vbl_start) 1000 position -= vbl_end; 1001 else 1002 position += vtotal - vbl_end; 1003 1004 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 1005 *vpos = position; 1006 *hpos = 0; 1007 } else { 1008 *vpos = position / htotal; 1009 *hpos = position - (*vpos * htotal); 1010 } 1011 1012 return true; 1013 } 1014 1015 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1016 { 1017 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 1018 unsigned long irqflags; 1019 int position; 1020 1021 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 1022 position = __intel_get_crtc_scanline(crtc); 1023 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 1024 1025 return position; 1026 } 1027 1028 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) 1029 { 1030 u32 busy_up, busy_down, max_avg, min_avg; 1031 u8 new_delay; 1032 1033 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 1034 1035 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1036 1037 new_delay = dev_priv->ips.cur_delay; 1038 1039 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1040 busy_up = I915_READ(RCPREVBSYTUPAVG); 1041 busy_down = I915_READ(RCPREVBSYTDNAVG); 1042 max_avg = I915_READ(RCBMAXAVG); 1043 min_avg = I915_READ(RCBMINAVG); 1044 1045 /* Handle RCS change request from hw */ 1046 if (busy_up > max_avg) { 1047 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1048 new_delay = dev_priv->ips.cur_delay - 1; 1049 if (new_delay < dev_priv->ips.max_delay) 1050 new_delay = dev_priv->ips.max_delay; 1051 } else if (busy_down < min_avg) { 1052 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1053 new_delay = dev_priv->ips.cur_delay + 1; 1054 if (new_delay > dev_priv->ips.min_delay) 1055 new_delay = dev_priv->ips.min_delay; 1056 } 1057 1058 if (ironlake_set_drps(dev_priv, new_delay)) 1059 dev_priv->ips.cur_delay = new_delay; 1060 1061 lockmgr(&mchdev_lock, LK_RELEASE); 1062 1063 return; 1064 } 1065 1066 static void notify_ring(struct intel_engine_cs *engine) 1067 { 1068 struct drm_i915_gem_request *rq = NULL; 1069 struct intel_wait *wait; 1070 1071 atomic_inc(&engine->irq_count); 1072 set_bit(ENGINE_IRQ_BREADCRUMB, &engine->irq_posted); 1073 1074 lockmgr(&engine->breadcrumbs.irq_lock, LK_EXCLUSIVE); 1075 wait = engine->breadcrumbs.irq_wait; 1076 if (wait) { 1077 bool wakeup = engine->irq_seqno_barrier; 1078 1079 /* We use a callback from the dma-fence to submit 1080 * requests after waiting on our own requests. To 1081 * ensure minimum delay in queuing the next request to 1082 * hardware, signal the fence now rather than wait for 1083 * the signaler to be woken up. We still wake up the 1084 * waiter in order to handle the irq-seqno coherency 1085 * issues (we may receive the interrupt before the 1086 * seqno is written, see __i915_request_irq_complete()) 1087 * and to handle coalescing of multiple seqno updates 1088 * and many waiters. 1089 */ 1090 if (i915_seqno_passed(intel_engine_get_seqno(engine), 1091 wait->seqno)) { 1092 struct drm_i915_gem_request *waiter = wait->request; 1093 1094 wakeup = true; 1095 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, 1096 &waiter->fence.flags) && 1097 intel_wait_check_request(wait, waiter)) 1098 rq = i915_gem_request_get(waiter); 1099 } 1100 1101 if (wakeup) 1102 wake_up_process(wait->tsk); 1103 } else { 1104 __intel_engine_disarm_breadcrumbs(engine); 1105 } 1106 lockmgr(&engine->breadcrumbs.irq_lock, LK_RELEASE); 1107 1108 if (rq) { 1109 dma_fence_signal(&rq->fence); 1110 i915_gem_request_put(rq); 1111 } 1112 1113 trace_intel_engine_notify(engine, wait); 1114 } 1115 1116 static void vlv_c0_read(struct drm_i915_private *dev_priv, 1117 struct intel_rps_ei *ei) 1118 { 1119 ei->ktime = ktime_get_raw(); 1120 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); 1121 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); 1122 } 1123 1124 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) 1125 { 1126 memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); 1127 } 1128 1129 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) 1130 { 1131 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1132 const struct intel_rps_ei *prev = &rps->ei; 1133 struct intel_rps_ei now; 1134 u32 events = 0; 1135 1136 if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) 1137 return 0; 1138 1139 vlv_c0_read(dev_priv, &now); 1140 1141 if (prev->ktime) { 1142 u64 time, c0; 1143 u32 render, media; 1144 1145 time = ktime_us_delta(now.ktime, prev->ktime); 1146 1147 time *= dev_priv->czclk_freq; 1148 1149 /* Workload can be split between render + media, 1150 * e.g. SwapBuffers being blitted in X after being rendered in 1151 * mesa. To account for this we need to combine both engines 1152 * into our activity counter. 1153 */ 1154 render = now.render_c0 - prev->render_c0; 1155 media = now.media_c0 - prev->media_c0; 1156 c0 = max(render, media); 1157 c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ 1158 1159 if (c0 > time * rps->up_threshold) 1160 events = GEN6_PM_RP_UP_THRESHOLD; 1161 else if (c0 < time * rps->down_threshold) 1162 events = GEN6_PM_RP_DOWN_THRESHOLD; 1163 } 1164 1165 rps->ei = now; 1166 return events; 1167 } 1168 1169 static void gen6_pm_rps_work(struct work_struct *work) 1170 { 1171 struct drm_i915_private *dev_priv = 1172 container_of(work, struct drm_i915_private, gt_pm.rps.work); 1173 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1174 bool client_boost = false; 1175 int new_delay, adj, min, max; 1176 u32 pm_iir = 0; 1177 1178 spin_lock_irq(&dev_priv->irq_lock); 1179 if (rps->interrupts_enabled) { 1180 pm_iir = fetch_and_zero(&rps->pm_iir); 1181 client_boost = atomic_read(&rps->num_waiters); 1182 } 1183 spin_unlock_irq(&dev_priv->irq_lock); 1184 1185 /* Make sure we didn't queue anything we're not going to process. */ 1186 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1187 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1188 goto out; 1189 1190 mutex_lock(&dev_priv->pcu_lock); 1191 1192 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); 1193 1194 adj = rps->last_adj; 1195 new_delay = rps->cur_freq; 1196 min = rps->min_freq_softlimit; 1197 max = rps->max_freq_softlimit; 1198 if (client_boost) 1199 max = rps->max_freq; 1200 if (client_boost && new_delay < rps->boost_freq) { 1201 new_delay = rps->boost_freq; 1202 adj = 0; 1203 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1204 if (adj > 0) 1205 adj *= 2; 1206 else /* CHV needs even encode values */ 1207 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; 1208 1209 if (new_delay >= rps->max_freq_softlimit) 1210 adj = 0; 1211 } else if (client_boost) { 1212 adj = 0; 1213 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1214 if (rps->cur_freq > rps->efficient_freq) 1215 new_delay = rps->efficient_freq; 1216 else if (rps->cur_freq > rps->min_freq_softlimit) 1217 new_delay = rps->min_freq_softlimit; 1218 adj = 0; 1219 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1220 if (adj < 0) 1221 adj *= 2; 1222 else /* CHV needs even encode values */ 1223 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; 1224 1225 if (new_delay <= rps->min_freq_softlimit) 1226 adj = 0; 1227 } else { /* unknown event */ 1228 adj = 0; 1229 } 1230 1231 rps->last_adj = adj; 1232 1233 /* sysfs frequency interfaces may have snuck in while servicing the 1234 * interrupt 1235 */ 1236 new_delay += adj; 1237 new_delay = clamp_t(int, new_delay, min, max); 1238 1239 if (intel_set_rps(dev_priv, new_delay)) { 1240 DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); 1241 rps->last_adj = 0; 1242 } 1243 1244 mutex_unlock(&dev_priv->pcu_lock); 1245 1246 out: 1247 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1248 spin_lock_irq(&dev_priv->irq_lock); 1249 if (rps->interrupts_enabled) 1250 gen6_unmask_pm_irq(dev_priv, dev_priv->pm_rps_events); 1251 spin_unlock_irq(&dev_priv->irq_lock); 1252 } 1253 1254 1255 /** 1256 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1257 * occurred. 1258 * @work: workqueue struct 1259 * 1260 * Doesn't actually do anything except notify userspace. As a consequence of 1261 * this event, userspace should try to remap the bad rows since statistically 1262 * it is likely the same row is more likely to go bad again. 1263 */ 1264 static void ivybridge_parity_work(struct work_struct *work) 1265 { 1266 struct drm_i915_private *dev_priv = 1267 container_of(work, typeof(*dev_priv), l3_parity.error_work); 1268 u32 error_status, row, bank, subbank; 1269 char *parity_event[6]; 1270 uint32_t misccpctl; 1271 uint8_t slice = 0; 1272 1273 /* We must turn off DOP level clock gating to access the L3 registers. 1274 * In order to prevent a get/put style interface, acquire struct mutex 1275 * any time we access those registers. 1276 */ 1277 mutex_lock(&dev_priv->drm.struct_mutex); 1278 1279 /* If we've screwed up tracking, just let the interrupt fire again */ 1280 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1281 goto out; 1282 1283 misccpctl = I915_READ(GEN7_MISCCPCTL); 1284 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1285 POSTING_READ(GEN7_MISCCPCTL); 1286 1287 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1288 i915_reg_t reg; 1289 1290 slice--; 1291 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv))) 1292 break; 1293 1294 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1295 1296 reg = GEN7_L3CDERRST1(slice); 1297 1298 error_status = I915_READ(reg); 1299 row = GEN7_PARITY_ERROR_ROW(error_status); 1300 bank = GEN7_PARITY_ERROR_BANK(error_status); 1301 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1302 1303 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1304 POSTING_READ(reg); 1305 1306 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1307 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); 1308 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); 1309 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1310 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1311 parity_event[5] = NULL; 1312 1313 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj, 1314 KOBJ_CHANGE, parity_event); 1315 1316 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1317 slice, row, bank, subbank); 1318 1319 kfree(parity_event[4]); 1320 kfree(parity_event[3]); 1321 kfree(parity_event[2]); 1322 kfree(parity_event[1]); 1323 } 1324 1325 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1326 1327 out: 1328 WARN_ON(dev_priv->l3_parity.which_slice); 1329 spin_lock_irq(&dev_priv->irq_lock); 1330 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1331 spin_unlock_irq(&dev_priv->irq_lock); 1332 1333 mutex_unlock(&dev_priv->drm.struct_mutex); 1334 } 1335 1336 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1337 u32 iir) 1338 { 1339 if (!HAS_L3_DPF(dev_priv)) 1340 return; 1341 1342 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1343 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1344 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1345 1346 iir &= GT_PARITY_ERROR(dev_priv); 1347 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1348 dev_priv->l3_parity.which_slice |= 1 << 1; 1349 1350 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1351 dev_priv->l3_parity.which_slice |= 1 << 0; 1352 1353 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1354 } 1355 1356 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1357 u32 gt_iir) 1358 { 1359 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1360 notify_ring(dev_priv->engine[RCS]); 1361 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1362 notify_ring(dev_priv->engine[VCS]); 1363 } 1364 1365 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1366 u32 gt_iir) 1367 { 1368 if (gt_iir & GT_RENDER_USER_INTERRUPT) 1369 notify_ring(dev_priv->engine[RCS]); 1370 if (gt_iir & GT_BSD_USER_INTERRUPT) 1371 notify_ring(dev_priv->engine[VCS]); 1372 if (gt_iir & GT_BLT_USER_INTERRUPT) 1373 notify_ring(dev_priv->engine[BCS]); 1374 1375 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1376 GT_BSD_CS_ERROR_INTERRUPT | 1377 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) 1378 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); 1379 1380 if (gt_iir & GT_PARITY_ERROR(dev_priv)) 1381 ivybridge_parity_error_irq_handler(dev_priv, gt_iir); 1382 } 1383 1384 static void 1385 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift) 1386 { 1387 struct intel_engine_execlists * const execlists = &engine->execlists; 1388 bool tasklet = false; 1389 1390 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift)) { 1391 if (READ_ONCE(engine->execlists.active)) { 1392 __set_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted); 1393 tasklet = true; 1394 } 1395 } 1396 1397 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift)) { 1398 notify_ring(engine); 1399 tasklet |= i915_modparams.enable_guc_submission; 1400 } 1401 1402 if (tasklet) 1403 tasklet_hi_schedule(&execlists->irq_tasklet); 1404 } 1405 1406 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1407 u32 master_ctl, 1408 u32 gt_iir[4]) 1409 { 1410 irqreturn_t ret = IRQ_NONE; 1411 1412 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1413 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1414 if (gt_iir[0]) { 1415 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1416 ret = IRQ_HANDLED; 1417 } else 1418 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1419 } 1420 1421 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1422 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1423 if (gt_iir[1]) { 1424 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1425 ret = IRQ_HANDLED; 1426 } else 1427 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1428 } 1429 1430 if (master_ctl & GEN8_GT_VECS_IRQ) { 1431 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1432 if (gt_iir[3]) { 1433 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1434 ret = IRQ_HANDLED; 1435 } else 1436 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1437 } 1438 1439 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1440 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1441 if (gt_iir[2] & (dev_priv->pm_rps_events | 1442 dev_priv->pm_guc_events)) { 1443 I915_WRITE_FW(GEN8_GT_IIR(2), 1444 gt_iir[2] & (dev_priv->pm_rps_events | 1445 dev_priv->pm_guc_events)); 1446 ret = IRQ_HANDLED; 1447 } else 1448 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1449 } 1450 1451 return ret; 1452 } 1453 1454 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1455 u32 gt_iir[4]) 1456 { 1457 if (gt_iir[0]) { 1458 gen8_cs_irq_handler(dev_priv->engine[RCS], 1459 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1460 gen8_cs_irq_handler(dev_priv->engine[BCS], 1461 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1462 } 1463 1464 if (gt_iir[1]) { 1465 gen8_cs_irq_handler(dev_priv->engine[VCS], 1466 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1467 gen8_cs_irq_handler(dev_priv->engine[VCS2], 1468 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1469 } 1470 1471 if (gt_iir[3]) 1472 gen8_cs_irq_handler(dev_priv->engine[VECS], 1473 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1474 1475 if (gt_iir[2] & dev_priv->pm_rps_events) 1476 gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1477 1478 if (gt_iir[2] & dev_priv->pm_guc_events) 1479 gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1480 } 1481 1482 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) 1483 { 1484 switch (port) { 1485 case PORT_A: 1486 return val & PORTA_HOTPLUG_LONG_DETECT; 1487 case PORT_B: 1488 return val & PORTB_HOTPLUG_LONG_DETECT; 1489 case PORT_C: 1490 return val & PORTC_HOTPLUG_LONG_DETECT; 1491 default: 1492 return false; 1493 } 1494 } 1495 1496 static bool spt_port_hotplug2_long_detect(enum port port, u32 val) 1497 { 1498 switch (port) { 1499 case PORT_E: 1500 return val & PORTE_HOTPLUG_LONG_DETECT; 1501 default: 1502 return false; 1503 } 1504 } 1505 1506 static bool spt_port_hotplug_long_detect(enum port port, u32 val) 1507 { 1508 switch (port) { 1509 case PORT_A: 1510 return val & PORTA_HOTPLUG_LONG_DETECT; 1511 case PORT_B: 1512 return val & PORTB_HOTPLUG_LONG_DETECT; 1513 case PORT_C: 1514 return val & PORTC_HOTPLUG_LONG_DETECT; 1515 case PORT_D: 1516 return val & PORTD_HOTPLUG_LONG_DETECT; 1517 default: 1518 return false; 1519 } 1520 } 1521 1522 static bool ilk_port_hotplug_long_detect(enum port port, u32 val) 1523 { 1524 switch (port) { 1525 case PORT_A: 1526 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT; 1527 default: 1528 return false; 1529 } 1530 } 1531 1532 static bool pch_port_hotplug_long_detect(enum port port, u32 val) 1533 { 1534 switch (port) { 1535 case PORT_B: 1536 return val & PORTB_HOTPLUG_LONG_DETECT; 1537 case PORT_C: 1538 return val & PORTC_HOTPLUG_LONG_DETECT; 1539 case PORT_D: 1540 return val & PORTD_HOTPLUG_LONG_DETECT; 1541 default: 1542 return false; 1543 } 1544 } 1545 1546 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val) 1547 { 1548 switch (port) { 1549 case PORT_B: 1550 return val & PORTB_HOTPLUG_INT_LONG_PULSE; 1551 case PORT_C: 1552 return val & PORTC_HOTPLUG_INT_LONG_PULSE; 1553 case PORT_D: 1554 return val & PORTD_HOTPLUG_INT_LONG_PULSE; 1555 default: 1556 return false; 1557 } 1558 } 1559 1560 /* 1561 * Get a bit mask of pins that have triggered, and which ones may be long. 1562 * This can be called multiple times with the same masks to accumulate 1563 * hotplug detection results from several registers. 1564 * 1565 * Note that the caller is expected to zero out the masks initially. 1566 */ 1567 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask, 1568 u32 hotplug_trigger, u32 dig_hotplug_reg, 1569 const u32 hpd[HPD_NUM_PINS], 1570 bool long_pulse_detect(enum port port, u32 val)) 1571 { 1572 enum port port; 1573 int i; 1574 1575 for_each_hpd_pin(i) { 1576 if ((hpd[i] & hotplug_trigger) == 0) 1577 continue; 1578 1579 *pin_mask |= BIT(i); 1580 1581 port = intel_hpd_pin_to_port(i); 1582 if (port == PORT_NONE) 1583 continue; 1584 1585 if (long_pulse_detect(port, dig_hotplug_reg)) 1586 *long_mask |= BIT(i); 1587 } 1588 1589 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n", 1590 hotplug_trigger, dig_hotplug_reg, *pin_mask); 1591 1592 } 1593 1594 static void gmbus_irq_handler(struct drm_i915_private *dev_priv) 1595 { 1596 wake_up_all(&dev_priv->gmbus_wait_queue); 1597 } 1598 1599 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv) 1600 { 1601 wake_up_all(&dev_priv->gmbus_wait_queue); 1602 } 1603 1604 #if defined(CONFIG_DEBUG_FS) 1605 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1606 enum i915_pipe pipe, 1607 uint32_t crc0, uint32_t crc1, 1608 uint32_t crc2, uint32_t crc3, 1609 uint32_t crc4) 1610 { 1611 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1612 struct intel_pipe_crc_entry *entry; 1613 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); 1614 struct drm_driver *driver = dev_priv->drm.driver; 1615 uint32_t crcs[5]; 1616 int head, tail; 1617 1618 lockmgr(&pipe_crc->lock, LK_EXCLUSIVE); 1619 if (pipe_crc->source) { 1620 if (!pipe_crc->entries) { 1621 lockmgr(&pipe_crc->lock, LK_RELEASE); 1622 DRM_DEBUG_KMS("spurious interrupt\n"); 1623 return; 1624 } 1625 1626 head = pipe_crc->head; 1627 tail = pipe_crc->tail; 1628 1629 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1630 lockmgr(&pipe_crc->lock, LK_RELEASE); 1631 DRM_ERROR("CRC buffer overflowing\n"); 1632 return; 1633 } 1634 1635 entry = &pipe_crc->entries[head]; 1636 1637 entry->frame = driver->get_vblank_counter(&dev_priv->drm, pipe); 1638 entry->crc[0] = crc0; 1639 entry->crc[1] = crc1; 1640 entry->crc[2] = crc2; 1641 entry->crc[3] = crc3; 1642 entry->crc[4] = crc4; 1643 1644 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1645 pipe_crc->head = head; 1646 1647 lockmgr(&pipe_crc->lock, LK_RELEASE); 1648 1649 wake_up_interruptible(&pipe_crc->wq); 1650 } else { 1651 /* 1652 * For some not yet identified reason, the first CRC is 1653 * bonkers. So let's just wait for the next vblank and read 1654 * out the buggy result. 1655 * 1656 * On GEN8+ sometimes the second CRC is bonkers as well, so 1657 * don't trust that one either. 1658 */ 1659 if (pipe_crc->skipped == 0 || 1660 (INTEL_GEN(dev_priv) >= 8 && pipe_crc->skipped == 1)) { 1661 pipe_crc->skipped++; 1662 lockmgr(&pipe_crc->lock, LK_RELEASE); 1663 return; 1664 } 1665 lockmgr(&pipe_crc->lock, LK_RELEASE); 1666 crcs[0] = crc0; 1667 crcs[1] = crc1; 1668 crcs[2] = crc2; 1669 crcs[3] = crc3; 1670 crcs[4] = crc4; 1671 drm_crtc_add_crc_entry(&crtc->base, true, 1672 drm_crtc_accurate_vblank_count(&crtc->base), 1673 crcs); 1674 } 1675 } 1676 #else 1677 static inline void 1678 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1679 enum i915_pipe pipe, 1680 uint32_t crc0, uint32_t crc1, 1681 uint32_t crc2, uint32_t crc3, 1682 uint32_t crc4) {} 1683 #endif 1684 1685 1686 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1687 enum i915_pipe pipe) 1688 { 1689 display_pipe_crc_irq_handler(dev_priv, pipe, 1690 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1691 0, 0, 0, 0); 1692 } 1693 1694 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1695 enum i915_pipe pipe) 1696 { 1697 display_pipe_crc_irq_handler(dev_priv, pipe, 1698 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1699 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1700 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1701 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1702 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1703 } 1704 1705 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, 1706 enum i915_pipe pipe) 1707 { 1708 uint32_t res1, res2; 1709 1710 if (INTEL_GEN(dev_priv) >= 3) 1711 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1712 else 1713 res1 = 0; 1714 1715 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) 1716 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1717 else 1718 res2 = 0; 1719 1720 display_pipe_crc_irq_handler(dev_priv, pipe, 1721 I915_READ(PIPE_CRC_RES_RED(pipe)), 1722 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1723 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1724 res1, res2); 1725 } 1726 1727 /* The RPS events need forcewake, so we add them to a work queue and mask their 1728 * IMR bits until the work is done. Other interrupts can be processed without 1729 * the work queue. */ 1730 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1731 { 1732 struct intel_rps *rps = &dev_priv->gt_pm.rps; 1733 1734 if (pm_iir & dev_priv->pm_rps_events) { 1735 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1736 gen6_mask_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1737 if (rps->interrupts_enabled) { 1738 rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; 1739 schedule_work(&rps->work); 1740 } 1741 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1742 } 1743 1744 if (INTEL_GEN(dev_priv) >= 8) 1745 return; 1746 1747 if (HAS_VEBOX(dev_priv)) { 1748 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1749 notify_ring(dev_priv->engine[VECS]); 1750 1751 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) 1752 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); 1753 } 1754 } 1755 1756 static void gen9_guc_irq_handler(struct drm_i915_private *dev_priv, u32 gt_iir) 1757 { 1758 if (gt_iir & GEN9_GUC_TO_HOST_INT_EVENT) { 1759 /* Sample the log buffer flush related bits & clear them out now 1760 * itself from the message identity register to minimize the 1761 * probability of losing a flush interrupt, when there are back 1762 * to back flush interrupts. 1763 * There can be a new flush interrupt, for different log buffer 1764 * type (like for ISR), whilst Host is handling one (for DPC). 1765 * Since same bit is used in message register for ISR & DPC, it 1766 * could happen that GuC sets the bit for 2nd interrupt but Host 1767 * clears out the bit on handling the 1st interrupt. 1768 */ 1769 u32 msg, flush; 1770 1771 msg = I915_READ(SOFT_SCRATCH(15)); 1772 flush = msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | 1773 INTEL_GUC_RECV_MSG_FLUSH_LOG_BUFFER); 1774 if (flush) { 1775 /* Clear the message bits that are handled */ 1776 I915_WRITE(SOFT_SCRATCH(15), msg & ~flush); 1777 1778 /* Handle flush interrupt in bottom half */ 1779 queue_work(dev_priv->guc.log.runtime.flush_wq, 1780 &dev_priv->guc.log.runtime.flush_work); 1781 1782 dev_priv->guc.log.flush_interrupt_count++; 1783 } else { 1784 /* Not clearing of unhandled event bits won't result in 1785 * re-triggering of the interrupt. 1786 */ 1787 } 1788 } 1789 } 1790 1791 static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) 1792 { 1793 enum i915_pipe pipe; 1794 1795 for_each_pipe(dev_priv, pipe) { 1796 I915_WRITE(PIPESTAT(pipe), 1797 PIPESTAT_INT_STATUS_MASK | 1798 PIPE_FIFO_UNDERRUN_STATUS); 1799 1800 dev_priv->pipestat_irq_mask[pipe] = 0; 1801 } 1802 } 1803 1804 static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, 1805 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1806 { 1807 int pipe; 1808 1809 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1810 1811 if (!dev_priv->display_irqs_enabled) { 1812 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1813 return; 1814 } 1815 1816 for_each_pipe(dev_priv, pipe) { 1817 i915_reg_t reg; 1818 u32 status_mask, enable_mask, iir_bit = 0; 1819 1820 /* 1821 * PIPESTAT bits get signalled even when the interrupt is 1822 * disabled with the mask bits, and some of the status bits do 1823 * not generate interrupts at all (like the underrun bit). Hence 1824 * we need to be careful that we only handle what we want to 1825 * handle. 1826 */ 1827 1828 /* fifo underruns are filterered in the underrun handler. */ 1829 status_mask = PIPE_FIFO_UNDERRUN_STATUS; 1830 1831 switch (pipe) { 1832 case PIPE_A: 1833 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1834 break; 1835 case PIPE_B: 1836 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1837 break; 1838 case PIPE_C: 1839 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1840 break; 1841 } 1842 if (iir & iir_bit) 1843 status_mask |= dev_priv->pipestat_irq_mask[pipe]; 1844 1845 if (!status_mask) 1846 continue; 1847 1848 reg = PIPESTAT(pipe); 1849 pipe_stats[pipe] = I915_READ(reg) & status_mask; 1850 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); 1851 1852 /* 1853 * Clear the PIPE*STAT regs before the IIR 1854 */ 1855 if (pipe_stats[pipe]) 1856 I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1857 } 1858 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1859 } 1860 1861 static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1862 u16 iir, u32 pipe_stats[I915_MAX_PIPES]) 1863 { 1864 enum i915_pipe pipe; 1865 1866 for_each_pipe(dev_priv, pipe) { 1867 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1868 drm_handle_vblank(&dev_priv->drm, pipe); 1869 1870 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1871 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1872 1873 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1874 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1875 } 1876 } 1877 1878 static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1879 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1880 { 1881 bool blc_event = false; 1882 enum i915_pipe pipe; 1883 1884 for_each_pipe(dev_priv, pipe) { 1885 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) 1886 drm_handle_vblank(&dev_priv->drm, pipe); 1887 1888 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1889 blc_event = true; 1890 1891 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1892 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1893 1894 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1895 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1896 } 1897 1898 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1899 intel_opregion_asle_intr(dev_priv); 1900 } 1901 1902 static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1903 u32 iir, u32 pipe_stats[I915_MAX_PIPES]) 1904 { 1905 bool blc_event = false; 1906 enum i915_pipe pipe; 1907 1908 for_each_pipe(dev_priv, pipe) { 1909 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1910 drm_handle_vblank(&dev_priv->drm, pipe); 1911 1912 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1913 blc_event = true; 1914 1915 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1916 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1917 1918 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1919 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1920 } 1921 1922 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 1923 intel_opregion_asle_intr(dev_priv); 1924 1925 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1926 gmbus_irq_handler(dev_priv); 1927 } 1928 1929 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, 1930 u32 pipe_stats[I915_MAX_PIPES]) 1931 { 1932 enum i915_pipe pipe; 1933 1934 for_each_pipe(dev_priv, pipe) { 1935 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1936 drm_handle_vblank(&dev_priv->drm, pipe); 1937 1938 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1939 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 1940 1941 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1942 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1943 } 1944 1945 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1946 gmbus_irq_handler(dev_priv); 1947 } 1948 1949 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1950 { 1951 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1952 1953 if (hotplug_status) 1954 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1955 1956 return hotplug_status; 1957 } 1958 1959 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, 1960 u32 hotplug_status) 1961 { 1962 u32 pin_mask = 0, long_mask = 0; 1963 1964 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || 1965 IS_CHERRYVIEW(dev_priv)) { 1966 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1967 1968 if (hotplug_trigger) { 1969 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1970 hotplug_trigger, hpd_status_g4x, 1971 i9xx_port_hotplug_long_detect); 1972 1973 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1974 } 1975 1976 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1977 dp_aux_irq_handler(dev_priv); 1978 } else { 1979 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1980 1981 if (hotplug_trigger) { 1982 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1983 hotplug_trigger, hpd_status_i915, 1984 i9xx_port_hotplug_long_detect); 1985 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 1986 } 1987 } 1988 } 1989 1990 static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1991 { 1992 struct drm_device *dev = arg; 1993 struct drm_i915_private *dev_priv = to_i915(dev); 1994 irqreturn_t ret = IRQ_NONE; 1995 1996 if (!intel_irqs_enabled(dev_priv)) 1997 return IRQ_NONE; 1998 1999 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2000 disable_rpm_wakeref_asserts(dev_priv); 2001 2002 do { 2003 u32 iir, gt_iir, pm_iir; 2004 u32 pipe_stats[I915_MAX_PIPES] = {}; 2005 u32 hotplug_status = 0; 2006 u32 ier = 0; 2007 2008 gt_iir = I915_READ(GTIIR); 2009 pm_iir = I915_READ(GEN6_PMIIR); 2010 iir = I915_READ(VLV_IIR); 2011 2012 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 2013 break; 2014 2015 ret = IRQ_HANDLED; 2016 2017 /* 2018 * Theory on interrupt generation, based on empirical evidence: 2019 * 2020 * x = ((VLV_IIR & VLV_IER) || 2021 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) && 2022 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE))); 2023 * 2024 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2025 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to 2026 * guarantee the CPU interrupt will be raised again even if we 2027 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR 2028 * bits this time around. 2029 */ 2030 I915_WRITE(VLV_MASTER_IER, 0); 2031 ier = I915_READ(VLV_IER); 2032 I915_WRITE(VLV_IER, 0); 2033 2034 if (gt_iir) 2035 I915_WRITE(GTIIR, gt_iir); 2036 if (pm_iir) 2037 I915_WRITE(GEN6_PMIIR, pm_iir); 2038 2039 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2040 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2041 2042 /* Call regardless, as some status bits might not be 2043 * signalled in iir */ 2044 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2045 2046 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2047 I915_LPE_PIPE_B_INTERRUPT)) 2048 intel_lpe_audio_irq_handler(dev_priv); 2049 2050 /* 2051 * VLV_IIR is single buffered, and reflects the level 2052 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2053 */ 2054 if (iir) 2055 I915_WRITE(VLV_IIR, iir); 2056 2057 I915_WRITE(VLV_IER, ier); 2058 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2059 POSTING_READ(VLV_MASTER_IER); 2060 2061 if (gt_iir) 2062 snb_gt_irq_handler(dev_priv, gt_iir); 2063 if (pm_iir) 2064 gen6_rps_irq_handler(dev_priv, pm_iir); 2065 2066 if (hotplug_status) 2067 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2068 2069 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2070 } while (0); 2071 2072 enable_rpm_wakeref_asserts(dev_priv); 2073 2074 return ret; 2075 } 2076 2077 static irqreturn_t cherryview_irq_handler(int irq, void *arg) 2078 { 2079 struct drm_device *dev = arg; 2080 struct drm_i915_private *dev_priv = to_i915(dev); 2081 irqreturn_t ret = IRQ_NONE; 2082 2083 if (!intel_irqs_enabled(dev_priv)) 2084 return IRQ_NONE; 2085 2086 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2087 disable_rpm_wakeref_asserts(dev_priv); 2088 2089 do { 2090 u32 master_ctl, iir; 2091 u32 gt_iir[4] = {}; 2092 u32 pipe_stats[I915_MAX_PIPES] = {}; 2093 u32 hotplug_status = 0; 2094 u32 ier = 0; 2095 2096 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 2097 iir = I915_READ(VLV_IIR); 2098 2099 if (master_ctl == 0 && iir == 0) 2100 break; 2101 2102 ret = IRQ_HANDLED; 2103 2104 /* 2105 * Theory on interrupt generation, based on empirical evidence: 2106 * 2107 * x = ((VLV_IIR & VLV_IER) || 2108 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) && 2109 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL))); 2110 * 2111 * A CPU interrupt will only be raised when 'x' has a 0->1 edge. 2112 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to 2113 * guarantee the CPU interrupt will be raised again even if we 2114 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL 2115 * bits this time around. 2116 */ 2117 I915_WRITE(GEN8_MASTER_IRQ, 0); 2118 ier = I915_READ(VLV_IER); 2119 I915_WRITE(VLV_IER, 0); 2120 2121 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2122 2123 if (iir & I915_DISPLAY_PORT_INTERRUPT) 2124 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 2125 2126 /* Call regardless, as some status bits might not be 2127 * signalled in iir */ 2128 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 2129 2130 if (iir & (I915_LPE_PIPE_A_INTERRUPT | 2131 I915_LPE_PIPE_B_INTERRUPT | 2132 I915_LPE_PIPE_C_INTERRUPT)) 2133 intel_lpe_audio_irq_handler(dev_priv); 2134 2135 /* 2136 * VLV_IIR is single buffered, and reflects the level 2137 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. 2138 */ 2139 if (iir) 2140 I915_WRITE(VLV_IIR, iir); 2141 2142 I915_WRITE(VLV_IER, ier); 2143 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2144 POSTING_READ(GEN8_MASTER_IRQ); 2145 2146 gen8_gt_irq_handler(dev_priv, gt_iir); 2147 2148 if (hotplug_status) 2149 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 2150 2151 valleyview_pipestat_irq_handler(dev_priv, pipe_stats); 2152 } while (0); 2153 2154 enable_rpm_wakeref_asserts(dev_priv); 2155 2156 return ret; 2157 } 2158 2159 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, 2160 u32 hotplug_trigger, 2161 const u32 hpd[HPD_NUM_PINS]) 2162 { 2163 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2164 2165 /* 2166 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU 2167 * unless we touch the hotplug register, even if hotplug_trigger is 2168 * zero. Not acking leads to "The master control interrupt lied (SDE)!" 2169 * errors. 2170 */ 2171 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2172 if (!hotplug_trigger) { 2173 u32 mask = PORTA_HOTPLUG_STATUS_MASK | 2174 PORTD_HOTPLUG_STATUS_MASK | 2175 PORTC_HOTPLUG_STATUS_MASK | 2176 PORTB_HOTPLUG_STATUS_MASK; 2177 dig_hotplug_reg &= ~mask; 2178 } 2179 2180 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2181 if (!hotplug_trigger) 2182 return; 2183 2184 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2185 dig_hotplug_reg, hpd, 2186 pch_port_hotplug_long_detect); 2187 2188 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2189 } 2190 2191 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2192 { 2193 int pipe; 2194 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 2195 2196 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); 2197 2198 if (pch_iir & SDE_AUDIO_POWER_MASK) { 2199 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 2200 SDE_AUDIO_POWER_SHIFT); 2201 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 2202 port_name(port)); 2203 } 2204 2205 if (pch_iir & SDE_AUX_MASK) 2206 dp_aux_irq_handler(dev_priv); 2207 2208 if (pch_iir & SDE_GMBUS) 2209 gmbus_irq_handler(dev_priv); 2210 2211 if (pch_iir & SDE_AUDIO_HDCP_MASK) 2212 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 2213 2214 if (pch_iir & SDE_AUDIO_TRANS_MASK) 2215 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 2216 2217 if (pch_iir & SDE_POISON) 2218 DRM_ERROR("PCH poison interrupt\n"); 2219 2220 if (pch_iir & SDE_FDI_MASK) 2221 for_each_pipe(dev_priv, pipe) 2222 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2223 pipe_name(pipe), 2224 I915_READ(FDI_RX_IIR(pipe))); 2225 2226 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 2227 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 2228 2229 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 2230 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 2231 2232 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 2233 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A); 2234 2235 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 2236 intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_B); 2237 } 2238 2239 static void ivb_err_int_handler(struct drm_i915_private *dev_priv) 2240 { 2241 u32 err_int = I915_READ(GEN7_ERR_INT); 2242 enum i915_pipe pipe; 2243 2244 if (err_int & ERR_INT_POISON) 2245 DRM_ERROR("Poison interrupt\n"); 2246 2247 for_each_pipe(dev_priv, pipe) { 2248 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) 2249 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2250 2251 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2252 if (IS_IVYBRIDGE(dev_priv)) 2253 ivb_pipe_crc_irq_handler(dev_priv, pipe); 2254 else 2255 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2256 } 2257 } 2258 2259 I915_WRITE(GEN7_ERR_INT, err_int); 2260 } 2261 2262 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) 2263 { 2264 u32 serr_int = I915_READ(SERR_INT); 2265 enum i915_pipe pipe; 2266 2267 if (serr_int & SERR_INT_POISON) 2268 DRM_ERROR("PCH poison interrupt\n"); 2269 2270 for_each_pipe(dev_priv, pipe) 2271 if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) 2272 intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); 2273 2274 I915_WRITE(SERR_INT, serr_int); 2275 } 2276 2277 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2278 { 2279 int pipe; 2280 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2281 2282 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); 2283 2284 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2285 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2286 SDE_AUDIO_POWER_SHIFT_CPT); 2287 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 2288 port_name(port)); 2289 } 2290 2291 if (pch_iir & SDE_AUX_MASK_CPT) 2292 dp_aux_irq_handler(dev_priv); 2293 2294 if (pch_iir & SDE_GMBUS_CPT) 2295 gmbus_irq_handler(dev_priv); 2296 2297 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2298 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2299 2300 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2301 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2302 2303 if (pch_iir & SDE_FDI_MASK_CPT) 2304 for_each_pipe(dev_priv, pipe) 2305 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2306 pipe_name(pipe), 2307 I915_READ(FDI_RX_IIR(pipe))); 2308 2309 if (pch_iir & SDE_ERROR_CPT) 2310 cpt_serr_int_handler(dev_priv); 2311 } 2312 2313 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) 2314 { 2315 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2316 ~SDE_PORTE_HOTPLUG_SPT; 2317 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2318 u32 pin_mask = 0, long_mask = 0; 2319 2320 if (hotplug_trigger) { 2321 u32 dig_hotplug_reg; 2322 2323 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2324 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2325 2326 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2327 dig_hotplug_reg, hpd_spt, 2328 spt_port_hotplug_long_detect); 2329 } 2330 2331 if (hotplug2_trigger) { 2332 u32 dig_hotplug_reg; 2333 2334 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); 2335 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); 2336 2337 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger, 2338 dig_hotplug_reg, hpd_spt, 2339 spt_port_hotplug2_long_detect); 2340 } 2341 2342 if (pin_mask) 2343 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2344 2345 if (pch_iir & SDE_GMBUS_CPT) 2346 gmbus_irq_handler(dev_priv); 2347 } 2348 2349 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, 2350 u32 hotplug_trigger, 2351 const u32 hpd[HPD_NUM_PINS]) 2352 { 2353 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2354 2355 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2356 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); 2357 2358 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2359 dig_hotplug_reg, hpd, 2360 ilk_port_hotplug_long_detect); 2361 2362 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2363 } 2364 2365 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, 2366 u32 de_iir) 2367 { 2368 enum i915_pipe pipe; 2369 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2370 2371 if (hotplug_trigger) 2372 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk); 2373 2374 if (de_iir & DE_AUX_CHANNEL_A) 2375 dp_aux_irq_handler(dev_priv); 2376 2377 if (de_iir & DE_GSE) 2378 intel_opregion_asle_intr(dev_priv); 2379 2380 if (de_iir & DE_POISON) 2381 DRM_ERROR("Poison interrupt\n"); 2382 2383 for_each_pipe(dev_priv, pipe) { 2384 if (de_iir & DE_PIPE_VBLANK(pipe)) 2385 drm_handle_vblank(&dev_priv->drm, pipe); 2386 2387 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2388 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2389 2390 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2391 i9xx_pipe_crc_irq_handler(dev_priv, pipe); 2392 } 2393 2394 /* check event from PCH */ 2395 if (de_iir & DE_PCH_EVENT) { 2396 u32 pch_iir = I915_READ(SDEIIR); 2397 2398 if (HAS_PCH_CPT(dev_priv)) 2399 cpt_irq_handler(dev_priv, pch_iir); 2400 else 2401 ibx_irq_handler(dev_priv, pch_iir); 2402 2403 /* should clear PCH hotplug event before clear CPU irq */ 2404 I915_WRITE(SDEIIR, pch_iir); 2405 } 2406 2407 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT) 2408 ironlake_rps_change_irq_handler(dev_priv); 2409 } 2410 2411 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, 2412 u32 de_iir) 2413 { 2414 enum i915_pipe pipe; 2415 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2416 2417 if (hotplug_trigger) 2418 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb); 2419 2420 if (de_iir & DE_ERR_INT_IVB) 2421 ivb_err_int_handler(dev_priv); 2422 2423 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2424 dp_aux_irq_handler(dev_priv); 2425 2426 if (de_iir & DE_GSE_IVB) 2427 intel_opregion_asle_intr(dev_priv); 2428 2429 for_each_pipe(dev_priv, pipe) { 2430 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2431 drm_handle_vblank(&dev_priv->drm, pipe); 2432 } 2433 2434 /* check event from PCH */ 2435 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { 2436 u32 pch_iir = I915_READ(SDEIIR); 2437 2438 cpt_irq_handler(dev_priv, pch_iir); 2439 2440 /* clear PCH hotplug event before clear CPU irq */ 2441 I915_WRITE(SDEIIR, pch_iir); 2442 } 2443 } 2444 2445 /* 2446 * To handle irqs with the minimum potential races with fresh interrupts, we: 2447 * 1 - Disable Master Interrupt Control. 2448 * 2 - Find the source(s) of the interrupt. 2449 * 3 - Clear the Interrupt Identity bits (IIR). 2450 * 4 - Process the interrupt(s) that had bits set in the IIRs. 2451 * 5 - Re-enable Master Interrupt Control. 2452 */ 2453 static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2454 { 2455 struct drm_device *dev = arg; 2456 struct drm_i915_private *dev_priv = to_i915(dev); 2457 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2458 irqreturn_t ret = IRQ_NONE; 2459 2460 if (!intel_irqs_enabled(dev_priv)) 2461 return IRQ_NONE; 2462 2463 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2464 disable_rpm_wakeref_asserts(dev_priv); 2465 2466 /* disable master interrupt before clearing iir */ 2467 de_ier = I915_READ(DEIER); 2468 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2469 POSTING_READ(DEIER); 2470 2471 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2472 * interrupts will will be stored on its back queue, and then we'll be 2473 * able to process them after we restore SDEIER (as soon as we restore 2474 * it, we'll get an interrupt if SDEIIR still has something to process 2475 * due to its back queue). */ 2476 if (!HAS_PCH_NOP(dev_priv)) { 2477 sde_ier = I915_READ(SDEIER); 2478 I915_WRITE(SDEIER, 0); 2479 POSTING_READ(SDEIER); 2480 } 2481 2482 /* Find, clear, then process each source of interrupt */ 2483 2484 gt_iir = I915_READ(GTIIR); 2485 if (gt_iir) { 2486 I915_WRITE(GTIIR, gt_iir); 2487 ret = IRQ_HANDLED; 2488 if (INTEL_GEN(dev_priv) >= 6) 2489 snb_gt_irq_handler(dev_priv, gt_iir); 2490 else 2491 ilk_gt_irq_handler(dev_priv, gt_iir); 2492 } 2493 2494 de_iir = I915_READ(DEIIR); 2495 if (de_iir) { 2496 I915_WRITE(DEIIR, de_iir); 2497 ret = IRQ_HANDLED; 2498 if (INTEL_GEN(dev_priv) >= 7) 2499 ivb_display_irq_handler(dev_priv, de_iir); 2500 else 2501 ilk_display_irq_handler(dev_priv, de_iir); 2502 } 2503 2504 if (INTEL_GEN(dev_priv) >= 6) { 2505 u32 pm_iir = I915_READ(GEN6_PMIIR); 2506 if (pm_iir) { 2507 I915_WRITE(GEN6_PMIIR, pm_iir); 2508 ret = IRQ_HANDLED; 2509 gen6_rps_irq_handler(dev_priv, pm_iir); 2510 } 2511 } 2512 2513 I915_WRITE(DEIER, de_ier); 2514 POSTING_READ(DEIER); 2515 if (!HAS_PCH_NOP(dev_priv)) { 2516 I915_WRITE(SDEIER, sde_ier); 2517 POSTING_READ(SDEIER); 2518 } 2519 2520 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2521 enable_rpm_wakeref_asserts(dev_priv); 2522 2523 return ret; 2524 } 2525 2526 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, 2527 u32 hotplug_trigger, 2528 const u32 hpd[HPD_NUM_PINS]) 2529 { 2530 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2531 2532 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2533 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); 2534 2535 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 2536 dig_hotplug_reg, hpd, 2537 bxt_port_hotplug_long_detect); 2538 2539 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); 2540 } 2541 2542 static irqreturn_t 2543 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2544 { 2545 irqreturn_t ret = IRQ_NONE; 2546 u32 iir; 2547 enum i915_pipe pipe; 2548 2549 if (master_ctl & GEN8_DE_MISC_IRQ) { 2550 iir = I915_READ(GEN8_DE_MISC_IIR); 2551 if (iir) { 2552 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2553 ret = IRQ_HANDLED; 2554 if (iir & GEN8_DE_MISC_GSE) 2555 intel_opregion_asle_intr(dev_priv); 2556 else 2557 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2558 } 2559 else 2560 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2561 } 2562 2563 if (master_ctl & GEN8_DE_PORT_IRQ) { 2564 iir = I915_READ(GEN8_DE_PORT_IIR); 2565 if (iir) { 2566 u32 tmp_mask; 2567 bool found = false; 2568 2569 I915_WRITE(GEN8_DE_PORT_IIR, iir); 2570 ret = IRQ_HANDLED; 2571 2572 tmp_mask = GEN8_AUX_CHANNEL_A; 2573 if (INTEL_GEN(dev_priv) >= 9) 2574 tmp_mask |= GEN9_AUX_CHANNEL_B | 2575 GEN9_AUX_CHANNEL_C | 2576 GEN9_AUX_CHANNEL_D; 2577 2578 if (iir & tmp_mask) { 2579 dp_aux_irq_handler(dev_priv); 2580 found = true; 2581 } 2582 2583 if (IS_GEN9_LP(dev_priv)) { 2584 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2585 if (tmp_mask) { 2586 bxt_hpd_irq_handler(dev_priv, tmp_mask, 2587 hpd_bxt); 2588 found = true; 2589 } 2590 } else if (IS_BROADWELL(dev_priv)) { 2591 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2592 if (tmp_mask) { 2593 ilk_hpd_irq_handler(dev_priv, 2594 tmp_mask, hpd_bdw); 2595 found = true; 2596 } 2597 } 2598 2599 if (IS_GEN9_LP(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) { 2600 gmbus_irq_handler(dev_priv); 2601 found = true; 2602 } 2603 2604 if (!found) 2605 DRM_ERROR("Unexpected DE Port interrupt\n"); 2606 } 2607 else 2608 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2609 } 2610 2611 for_each_pipe(dev_priv, pipe) { 2612 u32 fault_errors; 2613 2614 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2615 continue; 2616 2617 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2618 if (!iir) { 2619 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2620 continue; 2621 } 2622 2623 ret = IRQ_HANDLED; 2624 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2625 2626 if (iir & GEN8_PIPE_VBLANK) 2627 drm_handle_vblank(&dev_priv->drm, pipe); 2628 2629 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2630 hsw_pipe_crc_irq_handler(dev_priv, pipe); 2631 2632 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2633 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2634 2635 fault_errors = iir; 2636 if (INTEL_GEN(dev_priv) >= 9) 2637 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 2638 else 2639 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2640 2641 if (fault_errors) 2642 DRM_ERROR("Fault errors on pipe %c: 0x%08x\n", 2643 pipe_name(pipe), 2644 fault_errors); 2645 } 2646 2647 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) && 2648 master_ctl & GEN8_DE_PCH_IRQ) { 2649 /* 2650 * FIXME(BDW): Assume for now that the new interrupt handling 2651 * scheme also closed the SDE interrupt handling race we've seen 2652 * on older pch-split platforms. But this needs testing. 2653 */ 2654 iir = I915_READ(SDEIIR); 2655 if (iir) { 2656 I915_WRITE(SDEIIR, iir); 2657 ret = IRQ_HANDLED; 2658 2659 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 2660 HAS_PCH_CNP(dev_priv)) 2661 spt_irq_handler(dev_priv, iir); 2662 else 2663 cpt_irq_handler(dev_priv, iir); 2664 } else { 2665 /* 2666 * Like on previous PCH there seems to be something 2667 * fishy going on with forwarding PCH interrupts. 2668 */ 2669 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n"); 2670 } 2671 } 2672 2673 return ret; 2674 } 2675 2676 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2677 { 2678 struct drm_device *dev = arg; 2679 struct drm_i915_private *dev_priv = to_i915(dev); 2680 u32 master_ctl; 2681 u32 gt_iir[4] = {}; 2682 irqreturn_t ret; 2683 2684 if (!intel_irqs_enabled(dev_priv)) 2685 return IRQ_NONE; 2686 2687 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ); 2688 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2689 if (!master_ctl) 2690 return IRQ_NONE; 2691 2692 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2693 2694 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2695 disable_rpm_wakeref_asserts(dev_priv); 2696 2697 /* Find, clear, then process each source of interrupt */ 2698 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2699 gen8_gt_irq_handler(dev_priv, gt_iir); 2700 ret |= gen8_de_irq_handler(dev_priv, master_ctl); 2701 2702 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2703 POSTING_READ_FW(GEN8_MASTER_IRQ); 2704 2705 enable_rpm_wakeref_asserts(dev_priv); 2706 2707 return ret; 2708 } 2709 2710 struct wedge_me { 2711 struct delayed_work work; 2712 struct drm_i915_private *i915; 2713 const char *name; 2714 }; 2715 2716 static void wedge_me(struct work_struct *work) 2717 { 2718 struct wedge_me *w = container_of(work, typeof(*w), work.work); 2719 2720 dev_err(w->i915->drm.dev, 2721 "%s timed out, cancelling all in-flight rendering.\n", 2722 w->name); 2723 i915_gem_set_wedged(w->i915); 2724 } 2725 2726 static void __init_wedge(struct wedge_me *w, 2727 struct drm_i915_private *i915, 2728 long timeout, 2729 const char *name) 2730 { 2731 w->i915 = i915; 2732 w->name = name; 2733 2734 INIT_DELAYED_WORK_ONSTACK(&w->work, wedge_me); 2735 schedule_delayed_work(&w->work, timeout); 2736 } 2737 2738 static void __fini_wedge(struct wedge_me *w) 2739 { 2740 cancel_delayed_work_sync(&w->work); 2741 destroy_delayed_work_on_stack(&w->work); 2742 w->i915 = NULL; 2743 } 2744 2745 #define i915_wedge_on_timeout(W, DEV, TIMEOUT) \ 2746 for (__init_wedge((W), (DEV), (TIMEOUT), __func__); \ 2747 (W)->i915; \ 2748 __fini_wedge((W))) 2749 2750 /** 2751 * i915_reset_device - do process context error handling work 2752 * @dev_priv: i915 device private 2753 * 2754 * Fire an error uevent so userspace can see that a hang or error 2755 * was detected. 2756 */ 2757 static void i915_reset_device(struct drm_i915_private *dev_priv) 2758 { 2759 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj; 2760 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2761 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2762 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2763 struct wedge_me w; 2764 2765 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); 2766 2767 DRM_DEBUG_DRIVER("resetting chip\n"); 2768 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); 2769 2770 /* Use a watchdog to ensure that our reset completes */ 2771 i915_wedge_on_timeout(&w, dev_priv, 5*HZ) { 2772 intel_prepare_reset(dev_priv); 2773 2774 /* Signal that locked waiters should reset the GPU */ 2775 set_bit(I915_RESET_HANDOFF, &dev_priv->gpu_error.flags); 2776 wake_up_all(&dev_priv->gpu_error.wait_queue); 2777 2778 /* Wait for anyone holding the lock to wakeup, without 2779 * blocking indefinitely on struct_mutex. 2780 */ 2781 do { 2782 if (mutex_trylock(&dev_priv->drm.struct_mutex)) { 2783 i915_reset(dev_priv, 0); 2784 mutex_unlock(&dev_priv->drm.struct_mutex); 2785 } 2786 } while (wait_on_bit_timeout(&dev_priv->gpu_error.flags, 2787 I915_RESET_HANDOFF, 2788 TASK_UNINTERRUPTIBLE, 2789 1)); 2790 2791 intel_finish_reset(dev_priv); 2792 } 2793 2794 if (!test_bit(I915_WEDGED, &dev_priv->gpu_error.flags)) 2795 kobject_uevent_env(kobj, 2796 KOBJ_CHANGE, reset_done_event); 2797 } 2798 2799 static void i915_clear_error_registers(struct drm_i915_private *dev_priv) 2800 { 2801 u32 eir; 2802 2803 if (!IS_GEN2(dev_priv)) 2804 I915_WRITE(PGTBL_ER, I915_READ(PGTBL_ER)); 2805 2806 if (INTEL_GEN(dev_priv) < 4) 2807 I915_WRITE(IPEIR, I915_READ(IPEIR)); 2808 else 2809 I915_WRITE(IPEIR_I965, I915_READ(IPEIR_I965)); 2810 2811 I915_WRITE(EIR, I915_READ(EIR)); 2812 eir = I915_READ(EIR); 2813 if (eir) { 2814 /* 2815 * some errors might have become stuck, 2816 * mask them. 2817 */ 2818 DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masking\n", eir); 2819 I915_WRITE(EMR, I915_READ(EMR) | eir); 2820 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2821 } 2822 } 2823 2824 /** 2825 * i915_handle_error - handle a gpu error 2826 * @dev_priv: i915 device private 2827 * @engine_mask: mask representing engines that are hung 2828 * @fmt: Error message format string 2829 * 2830 * Do some basic checking of register state at error time and 2831 * dump it to the syslog. Also call i915_capture_error_state() to make 2832 * sure we get a record and make it available in debugfs. Fire a uevent 2833 * so userspace knows something bad happened (should trigger collection 2834 * of a ring dump etc.). 2835 */ 2836 void i915_handle_error(struct drm_i915_private *dev_priv, 2837 u32 engine_mask, 2838 const char *fmt, ...) 2839 { 2840 struct intel_engine_cs *engine; 2841 unsigned int tmp; 2842 va_list args; 2843 char error_msg[80]; 2844 2845 va_start(args, fmt); 2846 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2847 va_end(args); 2848 2849 /* 2850 * In most cases it's guaranteed that we get here with an RPM 2851 * reference held, for example because there is a pending GPU 2852 * request that won't finish until the reset is done. This 2853 * isn't the case at least when we get here by doing a 2854 * simulated reset via debugfs, so get an RPM reference. 2855 */ 2856 intel_runtime_pm_get(dev_priv); 2857 2858 i915_capture_error_state(dev_priv, engine_mask, error_msg); 2859 i915_clear_error_registers(dev_priv); 2860 2861 /* 2862 * Try engine reset when available. We fall back to full reset if 2863 * single reset fails. 2864 */ 2865 if (intel_has_reset_engine(dev_priv)) { 2866 for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { 2867 BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); 2868 if (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2869 &dev_priv->gpu_error.flags)) 2870 continue; 2871 2872 if (i915_reset_engine(engine, 0) == 0) 2873 engine_mask &= ~intel_engine_flag(engine); 2874 2875 clear_bit(I915_RESET_ENGINE + engine->id, 2876 &dev_priv->gpu_error.flags); 2877 wake_up_bit(&dev_priv->gpu_error.flags, 2878 I915_RESET_ENGINE + engine->id); 2879 } 2880 } 2881 2882 if (!engine_mask) 2883 goto out; 2884 2885 /* Full reset needs the mutex, stop any other user trying to do so. */ 2886 if (test_and_set_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags)) { 2887 wait_event(dev_priv->gpu_error.reset_queue, 2888 !test_bit(I915_RESET_BACKOFF, 2889 &dev_priv->gpu_error.flags)); 2890 goto out; 2891 } 2892 2893 /* Prevent any other reset-engine attempt. */ 2894 for_each_engine(engine, dev_priv, tmp) { 2895 while (test_and_set_bit(I915_RESET_ENGINE + engine->id, 2896 &dev_priv->gpu_error.flags)) 2897 wait_on_bit(&dev_priv->gpu_error.flags, 2898 I915_RESET_ENGINE + engine->id, 2899 TASK_UNINTERRUPTIBLE); 2900 } 2901 2902 i915_reset_device(dev_priv); 2903 2904 for_each_engine(engine, dev_priv, tmp) { 2905 clear_bit(I915_RESET_ENGINE + engine->id, 2906 &dev_priv->gpu_error.flags); 2907 } 2908 2909 clear_bit(I915_RESET_BACKOFF, &dev_priv->gpu_error.flags); 2910 wake_up_all(&dev_priv->gpu_error.reset_queue); 2911 2912 out: 2913 intel_runtime_pm_put(dev_priv); 2914 } 2915 2916 /* Called from drm generic code, passed 'crtc' which 2917 * we use as a pipe index 2918 */ 2919 static int i8xx_enable_vblank(struct drm_device *dev, unsigned int pipe) 2920 { 2921 struct drm_i915_private *dev_priv = to_i915(dev); 2922 unsigned long irqflags; 2923 2924 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2925 i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2926 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2927 2928 return 0; 2929 } 2930 2931 static int i965_enable_vblank(struct drm_device *dev, unsigned int pipe) 2932 { 2933 struct drm_i915_private *dev_priv = to_i915(dev); 2934 unsigned long irqflags; 2935 2936 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2937 i915_enable_pipestat(dev_priv, pipe, 2938 PIPE_START_VBLANK_INTERRUPT_STATUS); 2939 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2940 2941 return 0; 2942 } 2943 2944 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2945 { 2946 struct drm_i915_private *dev_priv = to_i915(dev); 2947 unsigned long irqflags; 2948 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2949 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 2950 2951 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2952 ilk_enable_display_irq(dev_priv, bit); 2953 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2954 2955 return 0; 2956 } 2957 2958 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2959 { 2960 struct drm_i915_private *dev_priv = to_i915(dev); 2961 unsigned long irqflags; 2962 2963 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2964 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2966 2967 return 0; 2968 } 2969 2970 /* Called from drm generic code, passed 'crtc' which 2971 * we use as a pipe index 2972 */ 2973 static void i8xx_disable_vblank(struct drm_device *dev, unsigned int pipe) 2974 { 2975 struct drm_i915_private *dev_priv = to_i915(dev); 2976 unsigned long irqflags; 2977 2978 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2979 i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); 2980 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2981 } 2982 2983 static void i965_disable_vblank(struct drm_device *dev, unsigned int pipe) 2984 { 2985 struct drm_i915_private *dev_priv = to_i915(dev); 2986 unsigned long irqflags; 2987 2988 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2989 i915_disable_pipestat(dev_priv, pipe, 2990 PIPE_START_VBLANK_INTERRUPT_STATUS); 2991 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2992 } 2993 2994 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2995 { 2996 struct drm_i915_private *dev_priv = to_i915(dev); 2997 unsigned long irqflags; 2998 uint32_t bit = INTEL_GEN(dev_priv) >= 7 ? 2999 DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); 3000 3001 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3002 ilk_disable_display_irq(dev_priv, bit); 3003 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3004 } 3005 3006 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 3007 { 3008 struct drm_i915_private *dev_priv = to_i915(dev); 3009 unsigned long irqflags; 3010 3011 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3012 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 3013 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3014 } 3015 3016 static void ibx_irq_reset(struct drm_i915_private *dev_priv) 3017 { 3018 if (HAS_PCH_NOP(dev_priv)) 3019 return; 3020 3021 GEN3_IRQ_RESET(SDE); 3022 3023 if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3024 I915_WRITE(SERR_INT, 0xffffffff); 3025 } 3026 3027 /* 3028 * SDEIER is also touched by the interrupt handler to work around missed PCH 3029 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3030 * instead we unconditionally enable all PCH interrupt sources here, but then 3031 * only unmask them as needed with SDEIMR. 3032 * 3033 * This function needs to be called before interrupts are enabled. 3034 */ 3035 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3036 { 3037 struct drm_i915_private *dev_priv = to_i915(dev); 3038 3039 if (HAS_PCH_NOP(dev_priv)) 3040 return; 3041 3042 WARN_ON(I915_READ(SDEIER) != 0); 3043 I915_WRITE(SDEIER, 0xffffffff); 3044 POSTING_READ(SDEIER); 3045 } 3046 3047 static void gen5_gt_irq_reset(struct drm_i915_private *dev_priv) 3048 { 3049 GEN3_IRQ_RESET(GT); 3050 if (INTEL_GEN(dev_priv) >= 6) 3051 GEN3_IRQ_RESET(GEN6_PM); 3052 } 3053 3054 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) 3055 { 3056 if (IS_CHERRYVIEW(dev_priv)) 3057 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3058 else 3059 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3060 3061 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); 3062 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3063 3064 i9xx_pipestat_irq_reset(dev_priv); 3065 3066 GEN3_IRQ_RESET(VLV_); 3067 dev_priv->irq_mask = ~0; 3068 } 3069 3070 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) 3071 { 3072 u32 pipestat_mask; 3073 u32 enable_mask; 3074 enum i915_pipe pipe; 3075 3076 pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; 3077 3078 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3079 for_each_pipe(dev_priv, pipe) 3080 i915_enable_pipestat(dev_priv, pipe, pipestat_mask); 3081 3082 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3083 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3084 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3085 I915_LPE_PIPE_A_INTERRUPT | 3086 I915_LPE_PIPE_B_INTERRUPT; 3087 3088 if (IS_CHERRYVIEW(dev_priv)) 3089 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | 3090 I915_LPE_PIPE_C_INTERRUPT; 3091 3092 WARN_ON(dev_priv->irq_mask != ~0); 3093 3094 dev_priv->irq_mask = ~enable_mask; 3095 3096 GEN3_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); 3097 } 3098 3099 /* drm_dma.h hooks 3100 */ 3101 static void ironlake_irq_reset(struct drm_device *dev) 3102 { 3103 struct drm_i915_private *dev_priv = to_i915(dev); 3104 3105 if (IS_GEN5(dev_priv)) 3106 I915_WRITE(HWSTAM, 0xffffffff); 3107 3108 GEN3_IRQ_RESET(DE); 3109 if (IS_GEN7(dev_priv)) 3110 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3111 3112 gen5_gt_irq_reset(dev_priv); 3113 3114 ibx_irq_reset(dev_priv); 3115 } 3116 3117 static void valleyview_irq_reset(struct drm_device *dev) 3118 { 3119 struct drm_i915_private *dev_priv = to_i915(dev); 3120 3121 I915_WRITE(VLV_MASTER_IER, 0); 3122 POSTING_READ(VLV_MASTER_IER); 3123 3124 gen5_gt_irq_reset(dev_priv); 3125 3126 spin_lock_irq(&dev_priv->irq_lock); 3127 if (dev_priv->display_irqs_enabled) 3128 vlv_display_irq_reset(dev_priv); 3129 spin_unlock_irq(&dev_priv->irq_lock); 3130 } 3131 3132 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3133 { 3134 GEN8_IRQ_RESET_NDX(GT, 0); 3135 GEN8_IRQ_RESET_NDX(GT, 1); 3136 GEN8_IRQ_RESET_NDX(GT, 2); 3137 GEN8_IRQ_RESET_NDX(GT, 3); 3138 } 3139 3140 static void gen8_irq_reset(struct drm_device *dev) 3141 { 3142 struct drm_i915_private *dev_priv = to_i915(dev); 3143 int pipe; 3144 3145 I915_WRITE(GEN8_MASTER_IRQ, 0); 3146 POSTING_READ(GEN8_MASTER_IRQ); 3147 3148 gen8_gt_irq_reset(dev_priv); 3149 3150 for_each_pipe(dev_priv, pipe) 3151 if (intel_display_power_is_enabled(dev_priv, 3152 POWER_DOMAIN_PIPE(pipe))) 3153 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3154 3155 GEN3_IRQ_RESET(GEN8_DE_PORT_); 3156 GEN3_IRQ_RESET(GEN8_DE_MISC_); 3157 GEN3_IRQ_RESET(GEN8_PCU_); 3158 3159 if (HAS_PCH_SPLIT(dev_priv)) 3160 ibx_irq_reset(dev_priv); 3161 } 3162 3163 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, 3164 u8 pipe_mask) 3165 { 3166 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3167 enum i915_pipe pipe; 3168 3169 spin_lock_irq(&dev_priv->irq_lock); 3170 3171 if (!intel_irqs_enabled(dev_priv)) { 3172 spin_unlock_irq(&dev_priv->irq_lock); 3173 return; 3174 } 3175 3176 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3177 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3178 dev_priv->de_irq_mask[pipe], 3179 ~dev_priv->de_irq_mask[pipe] | extra_ier); 3180 3181 spin_unlock_irq(&dev_priv->irq_lock); 3182 } 3183 3184 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, 3185 u8 pipe_mask) 3186 { 3187 enum i915_pipe pipe; 3188 3189 spin_lock_irq(&dev_priv->irq_lock); 3190 3191 if (!intel_irqs_enabled(dev_priv)) { 3192 spin_unlock_irq(&dev_priv->irq_lock); 3193 return; 3194 } 3195 3196 for_each_pipe_masked(dev_priv, pipe, pipe_mask) 3197 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3198 3199 spin_unlock_irq(&dev_priv->irq_lock); 3200 3201 /* make sure we're done processing display irqs */ 3202 synchronize_irq(dev_priv->drm.irq); 3203 } 3204 3205 static void cherryview_irq_reset(struct drm_device *dev) 3206 { 3207 struct drm_i915_private *dev_priv = to_i915(dev); 3208 3209 I915_WRITE(GEN8_MASTER_IRQ, 0); 3210 POSTING_READ(GEN8_MASTER_IRQ); 3211 3212 gen8_gt_irq_reset(dev_priv); 3213 3214 GEN3_IRQ_RESET(GEN8_PCU_); 3215 3216 spin_lock_irq(&dev_priv->irq_lock); 3217 if (dev_priv->display_irqs_enabled) 3218 vlv_display_irq_reset(dev_priv); 3219 spin_unlock_irq(&dev_priv->irq_lock); 3220 } 3221 3222 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, 3223 const u32 hpd[HPD_NUM_PINS]) 3224 { 3225 struct intel_encoder *encoder; 3226 u32 enabled_irqs = 0; 3227 3228 for_each_intel_encoder(&dev_priv->drm, encoder) 3229 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3230 enabled_irqs |= hpd[encoder->hpd_pin]; 3231 3232 return enabled_irqs; 3233 } 3234 3235 static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) 3236 { 3237 u32 hotplug; 3238 3239 /* 3240 * Enable digital hotplug on the PCH, and configure the DP short pulse 3241 * duration to 2ms (which is the minimum in the Display Port spec). 3242 * The pulse duration bits are reserved on LPT+. 3243 */ 3244 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3245 hotplug &= ~(PORTB_PULSE_DURATION_MASK | 3246 PORTC_PULSE_DURATION_MASK | 3247 PORTD_PULSE_DURATION_MASK); 3248 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3249 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3250 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3251 /* 3252 * When CPU and PCH are on the same package, port A 3253 * HPD must be enabled in both north and south. 3254 */ 3255 if (HAS_PCH_LPT_LP(dev_priv)) 3256 hotplug |= PORTA_HOTPLUG_ENABLE; 3257 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3258 } 3259 3260 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) 3261 { 3262 u32 hotplug_irqs, enabled_irqs; 3263 3264 if (HAS_PCH_IBX(dev_priv)) { 3265 hotplug_irqs = SDE_HOTPLUG_MASK; 3266 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx); 3267 } else { 3268 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3269 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt); 3270 } 3271 3272 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3273 3274 ibx_hpd_detection_setup(dev_priv); 3275 } 3276 3277 static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3278 { 3279 u32 val, hotplug; 3280 3281 /* Display WA #1179 WaHardHangonHotPlug: cnp */ 3282 if (HAS_PCH_CNP(dev_priv)) { 3283 val = I915_READ(SOUTH_CHICKEN1); 3284 val &= ~CHASSIS_CLK_REQ_DURATION_MASK; 3285 val |= CHASSIS_CLK_REQ_DURATION(0xf); 3286 I915_WRITE(SOUTH_CHICKEN1, val); 3287 } 3288 3289 /* Enable digital hotplug on the PCH */ 3290 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3291 hotplug |= PORTA_HOTPLUG_ENABLE | 3292 PORTB_HOTPLUG_ENABLE | 3293 PORTC_HOTPLUG_ENABLE | 3294 PORTD_HOTPLUG_ENABLE; 3295 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3296 3297 hotplug = I915_READ(PCH_PORT_HOTPLUG2); 3298 hotplug |= PORTE_HOTPLUG_ENABLE; 3299 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3300 } 3301 3302 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3303 { 3304 u32 hotplug_irqs, enabled_irqs; 3305 3306 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3307 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt); 3308 3309 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3310 3311 spt_hpd_detection_setup(dev_priv); 3312 } 3313 3314 static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) 3315 { 3316 u32 hotplug; 3317 3318 /* 3319 * Enable digital hotplug on the CPU, and configure the DP short pulse 3320 * duration to 2ms (which is the minimum in the Display Port spec) 3321 * The pulse duration bits are reserved on HSW+. 3322 */ 3323 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 3324 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK; 3325 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | 3326 DIGITAL_PORTA_PULSE_DURATION_2ms; 3327 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3328 } 3329 3330 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) 3331 { 3332 u32 hotplug_irqs, enabled_irqs; 3333 3334 if (INTEL_GEN(dev_priv) >= 8) { 3335 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3336 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw); 3337 3338 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3339 } else if (INTEL_GEN(dev_priv) >= 7) { 3340 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3341 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb); 3342 3343 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3344 } else { 3345 hotplug_irqs = DE_DP_A_HOTPLUG; 3346 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk); 3347 3348 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3349 } 3350 3351 ilk_hpd_detection_setup(dev_priv); 3352 3353 ibx_hpd_irq_setup(dev_priv); 3354 } 3355 3356 static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv, 3357 u32 enabled_irqs) 3358 { 3359 u32 hotplug; 3360 3361 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3362 hotplug |= PORTA_HOTPLUG_ENABLE | 3363 PORTB_HOTPLUG_ENABLE | 3364 PORTC_HOTPLUG_ENABLE; 3365 3366 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n", 3367 hotplug, enabled_irqs); 3368 hotplug &= ~BXT_DDI_HPD_INVERT_MASK; 3369 3370 /* 3371 * For BXT invert bit has to be set based on AOB design 3372 * for HPD detection logic, update it based on VBT fields. 3373 */ 3374 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) && 3375 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A)) 3376 hotplug |= BXT_DDIA_HPD_INVERT; 3377 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) && 3378 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B)) 3379 hotplug |= BXT_DDIB_HPD_INVERT; 3380 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) && 3381 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C)) 3382 hotplug |= BXT_DDIC_HPD_INVERT; 3383 3384 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3385 } 3386 3387 static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) 3388 { 3389 __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK); 3390 } 3391 3392 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) 3393 { 3394 u32 hotplug_irqs, enabled_irqs; 3395 3396 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt); 3397 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3398 3399 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3400 3401 __bxt_hpd_detection_setup(dev_priv, enabled_irqs); 3402 } 3403 3404 static void ibx_irq_postinstall(struct drm_device *dev) 3405 { 3406 struct drm_i915_private *dev_priv = to_i915(dev); 3407 u32 mask; 3408 3409 if (HAS_PCH_NOP(dev_priv)) 3410 return; 3411 3412 if (HAS_PCH_IBX(dev_priv)) 3413 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3414 else if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) 3415 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3416 else 3417 mask = SDE_GMBUS_CPT; 3418 3419 gen3_assert_iir_is_zero(dev_priv, SDEIIR); 3420 I915_WRITE(SDEIMR, ~mask); 3421 3422 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) || 3423 HAS_PCH_LPT(dev_priv)) 3424 ibx_hpd_detection_setup(dev_priv); 3425 else 3426 spt_hpd_detection_setup(dev_priv); 3427 } 3428 3429 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3430 { 3431 struct drm_i915_private *dev_priv = to_i915(dev); 3432 u32 pm_irqs, gt_irqs; 3433 3434 pm_irqs = gt_irqs = 0; 3435 3436 dev_priv->gt_irq_mask = ~0; 3437 if (HAS_L3_DPF(dev_priv)) { 3438 /* L3 parity interrupt is always unmasked. */ 3439 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev_priv); 3440 gt_irqs |= GT_PARITY_ERROR(dev_priv); 3441 } 3442 3443 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3444 if (IS_GEN5(dev_priv)) { 3445 gt_irqs |= ILK_BSD_USER_INTERRUPT; 3446 } else { 3447 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3448 } 3449 3450 GEN3_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3451 3452 if (INTEL_GEN(dev_priv) >= 6) { 3453 /* 3454 * RPS interrupts will get enabled/disabled on demand when RPS 3455 * itself is enabled/disabled. 3456 */ 3457 if (HAS_VEBOX(dev_priv)) { 3458 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3459 dev_priv->pm_ier |= PM_VEBOX_USER_INTERRUPT; 3460 } 3461 3462 dev_priv->pm_imr = 0xffffffff; 3463 GEN3_IRQ_INIT(GEN6_PM, dev_priv->pm_imr, pm_irqs); 3464 } 3465 } 3466 3467 static int ironlake_irq_postinstall(struct drm_device *dev) 3468 { 3469 struct drm_i915_private *dev_priv = to_i915(dev); 3470 u32 display_mask, extra_mask; 3471 3472 if (INTEL_GEN(dev_priv) >= 7) { 3473 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3474 DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); 3475 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3476 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB | 3477 DE_DP_A_HOTPLUG_IVB); 3478 } else { 3479 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3480 DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE | 3481 DE_PIPEA_CRC_DONE | DE_POISON); 3482 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3483 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN | 3484 DE_DP_A_HOTPLUG); 3485 } 3486 3487 dev_priv->irq_mask = ~display_mask; 3488 3489 ibx_irq_pre_postinstall(dev); 3490 3491 GEN3_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3492 3493 gen5_gt_irq_postinstall(dev); 3494 3495 ilk_hpd_detection_setup(dev_priv); 3496 3497 ibx_irq_postinstall(dev); 3498 3499 if (IS_IRONLAKE_M(dev_priv)) { 3500 /* Enable PCU event interrupts 3501 * 3502 * spinlocking not required here for correctness since interrupt 3503 * setup is guaranteed to run in single-threaded context. But we 3504 * need it to make the assert_spin_locked happy. */ 3505 spin_lock_irq(&dev_priv->irq_lock); 3506 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT); 3507 spin_unlock_irq(&dev_priv->irq_lock); 3508 } 3509 3510 return 0; 3511 } 3512 3513 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3514 { 3515 lockdep_assert_held(&dev_priv->irq_lock); 3516 3517 if (dev_priv->display_irqs_enabled) 3518 return; 3519 3520 dev_priv->display_irqs_enabled = true; 3521 3522 if (intel_irqs_enabled(dev_priv)) { 3523 vlv_display_irq_reset(dev_priv); 3524 vlv_display_irq_postinstall(dev_priv); 3525 } 3526 } 3527 3528 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3529 { 3530 lockdep_assert_held(&dev_priv->irq_lock); 3531 3532 if (!dev_priv->display_irqs_enabled) 3533 return; 3534 3535 dev_priv->display_irqs_enabled = false; 3536 3537 if (intel_irqs_enabled(dev_priv)) 3538 vlv_display_irq_reset(dev_priv); 3539 } 3540 3541 3542 static int valleyview_irq_postinstall(struct drm_device *dev) 3543 { 3544 struct drm_i915_private *dev_priv = to_i915(dev); 3545 3546 gen5_gt_irq_postinstall(dev); 3547 3548 spin_lock_irq(&dev_priv->irq_lock); 3549 if (dev_priv->display_irqs_enabled) 3550 vlv_display_irq_postinstall(dev_priv); 3551 spin_unlock_irq(&dev_priv->irq_lock); 3552 3553 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3554 POSTING_READ(VLV_MASTER_IER); 3555 3556 return 0; 3557 } 3558 3559 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3560 { 3561 /* These are interrupts we'll toggle with the ring mask register */ 3562 uint32_t gt_interrupts[] = { 3563 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3564 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3565 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | 3566 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3567 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3568 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3569 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | 3570 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3571 0, 3572 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | 3573 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3574 }; 3575 3576 if (HAS_L3_DPF(dev_priv)) 3577 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 3578 3579 dev_priv->pm_ier = 0x0; 3580 dev_priv->pm_imr = ~dev_priv->pm_ier; 3581 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3582 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3583 /* 3584 * RPS interrupts will get enabled/disabled on demand when RPS itself 3585 * is enabled/disabled. Same wil be the case for GuC interrupts. 3586 */ 3587 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_imr, dev_priv->pm_ier); 3588 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3589 } 3590 3591 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3592 { 3593 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; 3594 uint32_t de_pipe_enables; 3595 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3596 u32 de_port_enables; 3597 u32 de_misc_masked = GEN8_DE_MISC_GSE; 3598 enum i915_pipe pipe; 3599 3600 if (INTEL_GEN(dev_priv) >= 9) { 3601 de_pipe_masked |= GEN9_DE_PIPE_IRQ_FAULT_ERRORS; 3602 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | 3603 GEN9_AUX_CHANNEL_D; 3604 if (IS_GEN9_LP(dev_priv)) 3605 de_port_masked |= BXT_DE_PORT_GMBUS; 3606 } else { 3607 de_pipe_masked |= GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3608 } 3609 3610 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3611 GEN8_PIPE_FIFO_UNDERRUN; 3612 3613 de_port_enables = de_port_masked; 3614 if (IS_GEN9_LP(dev_priv)) 3615 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; 3616 else if (IS_BROADWELL(dev_priv)) 3617 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; 3618 3619 for_each_pipe(dev_priv, pipe) { 3620 dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; 3621 3622 if (intel_display_power_is_enabled(dev_priv, 3623 POWER_DOMAIN_PIPE(pipe))) 3624 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3625 dev_priv->de_irq_mask[pipe], 3626 de_pipe_enables); 3627 } 3628 3629 GEN3_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3630 GEN3_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked); 3631 3632 if (IS_GEN9_LP(dev_priv)) 3633 bxt_hpd_detection_setup(dev_priv); 3634 else if (IS_BROADWELL(dev_priv)) 3635 ilk_hpd_detection_setup(dev_priv); 3636 } 3637 3638 static int gen8_irq_postinstall(struct drm_device *dev) 3639 { 3640 struct drm_i915_private *dev_priv = to_i915(dev); 3641 3642 if (HAS_PCH_SPLIT(dev_priv)) 3643 ibx_irq_pre_postinstall(dev); 3644 3645 gen8_gt_irq_postinstall(dev_priv); 3646 gen8_de_irq_postinstall(dev_priv); 3647 3648 if (HAS_PCH_SPLIT(dev_priv)) 3649 ibx_irq_postinstall(dev); 3650 3651 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3652 POSTING_READ(GEN8_MASTER_IRQ); 3653 3654 return 0; 3655 } 3656 3657 static int cherryview_irq_postinstall(struct drm_device *dev) 3658 { 3659 struct drm_i915_private *dev_priv = to_i915(dev); 3660 3661 gen8_gt_irq_postinstall(dev_priv); 3662 3663 spin_lock_irq(&dev_priv->irq_lock); 3664 if (dev_priv->display_irqs_enabled) 3665 vlv_display_irq_postinstall(dev_priv); 3666 spin_unlock_irq(&dev_priv->irq_lock); 3667 3668 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 3669 POSTING_READ(GEN8_MASTER_IRQ); 3670 3671 return 0; 3672 } 3673 3674 static void i8xx_irq_reset(struct drm_device *dev) 3675 { 3676 struct drm_i915_private *dev_priv = to_i915(dev); 3677 3678 i9xx_pipestat_irq_reset(dev_priv); 3679 3680 I915_WRITE16(HWSTAM, 0xffff); 3681 3682 GEN2_IRQ_RESET(); 3683 } 3684 3685 static int i8xx_irq_postinstall(struct drm_device *dev) 3686 { 3687 struct drm_i915_private *dev_priv = to_i915(dev); 3688 u16 enable_mask; 3689 3690 I915_WRITE16(EMR, ~(I915_ERROR_PAGE_TABLE | 3691 I915_ERROR_MEMORY_REFRESH)); 3692 3693 /* Unmask the interrupts that we always want on. */ 3694 dev_priv->irq_mask = 3695 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3696 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 3697 3698 enable_mask = 3699 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3700 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3701 I915_USER_INTERRUPT; 3702 3703 GEN2_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3704 3705 /* Interrupt setup is already guaranteed to be single-threaded, this is 3706 * just to make the assert_spin_locked check happy. */ 3707 spin_lock_irq(&dev_priv->irq_lock); 3708 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3709 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3710 spin_unlock_irq(&dev_priv->irq_lock); 3711 3712 return 0; 3713 } 3714 3715 static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3716 { 3717 struct drm_device *dev = arg; 3718 struct drm_i915_private *dev_priv = to_i915(dev); 3719 irqreturn_t ret = IRQ_NONE; 3720 3721 if (!intel_irqs_enabled(dev_priv)) 3722 return IRQ_NONE; 3723 3724 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3725 disable_rpm_wakeref_asserts(dev_priv); 3726 3727 do { 3728 u32 pipe_stats[I915_MAX_PIPES] = {}; 3729 u16 iir; 3730 3731 iir = I915_READ16(IIR); 3732 if (iir == 0) 3733 break; 3734 3735 ret = IRQ_HANDLED; 3736 3737 /* Call regardless, as some status bits might not be 3738 * signalled in iir */ 3739 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3740 3741 I915_WRITE16(IIR, iir); 3742 3743 if (iir & I915_USER_INTERRUPT) 3744 notify_ring(dev_priv->engine[RCS]); 3745 3746 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3747 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3748 3749 i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3750 } while (0); 3751 3752 enable_rpm_wakeref_asserts(dev_priv); 3753 3754 return ret; 3755 } 3756 3757 static void i915_irq_reset(struct drm_device *dev) 3758 { 3759 struct drm_i915_private *dev_priv = to_i915(dev); 3760 3761 if (I915_HAS_HOTPLUG(dev_priv)) { 3762 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3763 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3764 } 3765 3766 i9xx_pipestat_irq_reset(dev_priv); 3767 3768 I915_WRITE(HWSTAM, 0xffffffff); 3769 3770 GEN3_IRQ_RESET(); 3771 } 3772 3773 static int i915_irq_postinstall(struct drm_device *dev) 3774 { 3775 struct drm_i915_private *dev_priv = to_i915(dev); 3776 u32 enable_mask; 3777 3778 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | 3779 I915_ERROR_MEMORY_REFRESH)); 3780 3781 /* Unmask the interrupts that we always want on. */ 3782 dev_priv->irq_mask = 3783 ~(I915_ASLE_INTERRUPT | 3784 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3785 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 3786 3787 enable_mask = 3788 I915_ASLE_INTERRUPT | 3789 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3790 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3791 I915_USER_INTERRUPT; 3792 3793 if (I915_HAS_HOTPLUG(dev_priv)) { 3794 /* Enable in IER... */ 3795 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3796 /* and unmask in IMR */ 3797 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3798 } 3799 3800 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3801 3802 /* Interrupt setup is already guaranteed to be single-threaded, this is 3803 * just to make the assert_spin_locked check happy. */ 3804 spin_lock_irq(&dev_priv->irq_lock); 3805 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3806 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3807 spin_unlock_irq(&dev_priv->irq_lock); 3808 3809 i915_enable_asle_pipestat(dev_priv); 3810 3811 return 0; 3812 } 3813 3814 static irqreturn_t i915_irq_handler(int irq, void *arg) 3815 { 3816 struct drm_device *dev = arg; 3817 struct drm_i915_private *dev_priv = to_i915(dev); 3818 irqreturn_t ret = IRQ_NONE; 3819 3820 if (!intel_irqs_enabled(dev_priv)) 3821 return IRQ_NONE; 3822 3823 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3824 disable_rpm_wakeref_asserts(dev_priv); 3825 3826 do { 3827 u32 pipe_stats[I915_MAX_PIPES] = {}; 3828 u32 hotplug_status = 0; 3829 u32 iir; 3830 3831 iir = I915_READ(IIR); 3832 if (iir == 0) 3833 break; 3834 3835 ret = IRQ_HANDLED; 3836 3837 if (I915_HAS_HOTPLUG(dev_priv) && 3838 iir & I915_DISPLAY_PORT_INTERRUPT) 3839 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3840 3841 /* Call regardless, as some status bits might not be 3842 * signalled in iir */ 3843 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3844 3845 I915_WRITE(IIR, iir); 3846 3847 if (iir & I915_USER_INTERRUPT) 3848 notify_ring(dev_priv->engine[RCS]); 3849 3850 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3851 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3852 3853 if (hotplug_status) 3854 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 3855 3856 i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); 3857 } while (0); 3858 3859 enable_rpm_wakeref_asserts(dev_priv); 3860 3861 return ret; 3862 } 3863 3864 static void i965_irq_reset(struct drm_device *dev) 3865 { 3866 struct drm_i915_private *dev_priv = to_i915(dev); 3867 3868 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 3869 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3870 3871 i9xx_pipestat_irq_reset(dev_priv); 3872 3873 I915_WRITE(HWSTAM, 0xffffffff); 3874 3875 GEN3_IRQ_RESET(); 3876 } 3877 3878 static int i965_irq_postinstall(struct drm_device *dev) 3879 { 3880 struct drm_i915_private *dev_priv = to_i915(dev); 3881 u32 enable_mask; 3882 u32 error_mask; 3883 3884 /* 3885 * Enable some error detection, note the instruction error mask 3886 * bit is reserved, so we leave it masked. 3887 */ 3888 if (IS_G4X(dev_priv)) { 3889 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3890 GM45_ERROR_MEM_PRIV | 3891 GM45_ERROR_CP_PRIV | 3892 I915_ERROR_MEMORY_REFRESH); 3893 } else { 3894 error_mask = ~(I915_ERROR_PAGE_TABLE | 3895 I915_ERROR_MEMORY_REFRESH); 3896 } 3897 I915_WRITE(EMR, error_mask); 3898 3899 /* Unmask the interrupts that we always want on. */ 3900 dev_priv->irq_mask = 3901 ~(I915_ASLE_INTERRUPT | 3902 I915_DISPLAY_PORT_INTERRUPT | 3903 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3904 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3905 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3906 3907 enable_mask = 3908 I915_ASLE_INTERRUPT | 3909 I915_DISPLAY_PORT_INTERRUPT | 3910 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3911 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3912 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3913 I915_USER_INTERRUPT; 3914 3915 if (IS_G4X(dev_priv)) 3916 enable_mask |= I915_BSD_USER_INTERRUPT; 3917 3918 GEN3_IRQ_INIT(, dev_priv->irq_mask, enable_mask); 3919 3920 /* Interrupt setup is already guaranteed to be single-threaded, this is 3921 * just to make the assert_spin_locked check happy. */ 3922 spin_lock_irq(&dev_priv->irq_lock); 3923 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3924 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3925 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3926 spin_unlock_irq(&dev_priv->irq_lock); 3927 3928 i915_enable_asle_pipestat(dev_priv); 3929 3930 return 0; 3931 } 3932 3933 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) 3934 { 3935 u32 hotplug_en; 3936 3937 lockdep_assert_held(&dev_priv->irq_lock); 3938 3939 /* Note HDMI and DP share hotplug bits */ 3940 /* enable bits are the same for all generations */ 3941 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); 3942 /* Programming the CRT detection parameters tends 3943 to generate a spurious hotplug event about three 3944 seconds later. So just do it once. 3945 */ 3946 if (IS_G4X(dev_priv)) 3947 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3948 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3949 3950 /* Ignore TV since it's buggy */ 3951 i915_hotplug_interrupt_update_locked(dev_priv, 3952 HOTPLUG_INT_EN_MASK | 3953 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | 3954 CRT_HOTPLUG_ACTIVATION_PERIOD_64, 3955 hotplug_en); 3956 } 3957 3958 static irqreturn_t i965_irq_handler(int irq, void *arg) 3959 { 3960 struct drm_device *dev = arg; 3961 struct drm_i915_private *dev_priv = to_i915(dev); 3962 irqreturn_t ret = IRQ_NONE; 3963 3964 if (!intel_irqs_enabled(dev_priv)) 3965 return IRQ_NONE; 3966 3967 /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 3968 disable_rpm_wakeref_asserts(dev_priv); 3969 3970 do { 3971 u32 pipe_stats[I915_MAX_PIPES] = {}; 3972 u32 hotplug_status = 0; 3973 u32 iir; 3974 3975 iir = I915_READ(IIR); 3976 if (iir == 0) 3977 break; 3978 3979 ret = IRQ_HANDLED; 3980 3981 if (iir & I915_DISPLAY_PORT_INTERRUPT) 3982 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 3983 3984 /* Call regardless, as some status bits might not be 3985 * signalled in iir */ 3986 i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); 3987 3988 I915_WRITE(IIR, iir); 3989 3990 if (iir & I915_USER_INTERRUPT) 3991 notify_ring(dev_priv->engine[RCS]); 3992 3993 if (iir & I915_BSD_USER_INTERRUPT) 3994 notify_ring(dev_priv->engine[VCS]); 3995 3996 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3997 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); 3998 3999 if (hotplug_status) 4000 i9xx_hpd_irq_handler(dev_priv, hotplug_status); 4001 4002 i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); 4003 } while (0); 4004 4005 enable_rpm_wakeref_asserts(dev_priv); 4006 4007 return ret; 4008 } 4009 4010 /** 4011 * intel_irq_init - initializes irq support 4012 * @dev_priv: i915 device instance 4013 * 4014 * This function initializes all the irq support including work items, timers 4015 * and all the vtables. It does not setup the interrupt itself though. 4016 */ 4017 void intel_irq_init(struct drm_i915_private *dev_priv) 4018 { 4019 struct drm_device *dev = &dev_priv->drm; 4020 struct intel_rps *rps = &dev_priv->gt_pm.rps; 4021 int i; 4022 4023 intel_hpd_init_work(dev_priv); 4024 4025 INIT_WORK(&rps->work, gen6_pm_rps_work); 4026 4027 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4028 for (i = 0; i < MAX_L3_SLICES; ++i) 4029 dev_priv->l3_parity.remap_info[i] = NULL; 4030 4031 if (HAS_GUC_SCHED(dev_priv)) 4032 dev_priv->pm_guc_events = GEN9_GUC_TO_HOST_INT_EVENT; 4033 4034 /* Let's track the enabled rps events */ 4035 if (IS_VALLEYVIEW(dev_priv)) 4036 /* WaGsvRC0ResidencyMethod:vlv */ 4037 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4038 else 4039 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4040 4041 rps->pm_intrmsk_mbz = 0; 4042 4043 /* 4044 * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer 4045 * if GEN6_PM_UP_EI_EXPIRED is masked. 4046 * 4047 * TODO: verify if this can be reproduced on VLV,CHV. 4048 */ 4049 if (INTEL_GEN(dev_priv) <= 7) 4050 rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; 4051 4052 if (INTEL_GEN(dev_priv) >= 8) 4053 rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; 4054 4055 if (IS_GEN2(dev_priv)) { 4056 /* Gen2 doesn't have a hardware frame counter */ 4057 dev->max_vblank_count = 0; 4058 } else if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) { 4059 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4060 dev->driver->get_vblank_counter = g4x_get_vblank_counter; 4061 } else { 4062 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4063 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4064 } 4065 4066 /* 4067 * Opt out of the vblank disable timer on everything except gen2. 4068 * Gen2 doesn't have a hardware frame counter and so depends on 4069 * vblank interrupts to produce sane vblank seuquence numbers. 4070 */ 4071 if (!IS_GEN2(dev_priv)) 4072 dev->vblank_disable_immediate = true; 4073 4074 /* Most platforms treat the display irq block as an always-on 4075 * power domain. vlv/chv can disable it at runtime and need 4076 * special care to avoid writing any of the display block registers 4077 * outside of the power domain. We defer setting up the display irqs 4078 * in this case to the runtime pm. 4079 */ 4080 dev_priv->display_irqs_enabled = true; 4081 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 4082 dev_priv->display_irqs_enabled = false; 4083 4084 dev_priv->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; 4085 4086 dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos; 4087 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4088 4089 if (IS_CHERRYVIEW(dev_priv)) { 4090 dev->driver->irq_handler = cherryview_irq_handler; 4091 dev->driver->irq_preinstall = cherryview_irq_reset; 4092 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4093 dev->driver->irq_uninstall = cherryview_irq_reset; 4094 dev->driver->enable_vblank = i965_enable_vblank; 4095 dev->driver->disable_vblank = i965_disable_vblank; 4096 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4097 } else if (IS_VALLEYVIEW(dev_priv)) { 4098 dev->driver->irq_handler = valleyview_irq_handler; 4099 dev->driver->irq_preinstall = valleyview_irq_reset; 4100 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4101 dev->driver->irq_uninstall = valleyview_irq_reset; 4102 dev->driver->enable_vblank = i965_enable_vblank; 4103 dev->driver->disable_vblank = i965_disable_vblank; 4104 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4105 } else if (INTEL_GEN(dev_priv) >= 8) { 4106 dev->driver->irq_handler = gen8_irq_handler; 4107 dev->driver->irq_preinstall = gen8_irq_reset; 4108 dev->driver->irq_postinstall = gen8_irq_postinstall; 4109 dev->driver->irq_uninstall = gen8_irq_reset; 4110 dev->driver->enable_vblank = gen8_enable_vblank; 4111 dev->driver->disable_vblank = gen8_disable_vblank; 4112 if (IS_GEN9_LP(dev_priv)) 4113 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; 4114 else if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv) || 4115 HAS_PCH_CNP(dev_priv)) 4116 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; 4117 else 4118 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4119 } else if (HAS_PCH_SPLIT(dev_priv)) { 4120 dev->driver->irq_handler = ironlake_irq_handler; 4121 dev->driver->irq_preinstall = ironlake_irq_reset; 4122 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4123 dev->driver->irq_uninstall = ironlake_irq_reset; 4124 dev->driver->enable_vblank = ironlake_enable_vblank; 4125 dev->driver->disable_vblank = ironlake_disable_vblank; 4126 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4127 } else { 4128 if (IS_GEN2(dev_priv)) { 4129 dev->driver->irq_preinstall = i8xx_irq_reset; 4130 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4131 dev->driver->irq_handler = i8xx_irq_handler; 4132 dev->driver->irq_uninstall = i8xx_irq_reset; 4133 dev->driver->enable_vblank = i8xx_enable_vblank; 4134 dev->driver->disable_vblank = i8xx_disable_vblank; 4135 } else if (IS_GEN3(dev_priv)) { 4136 dev->driver->irq_preinstall = i915_irq_reset; 4137 dev->driver->irq_postinstall = i915_irq_postinstall; 4138 dev->driver->irq_uninstall = i915_irq_reset; 4139 dev->driver->irq_handler = i915_irq_handler; 4140 dev->driver->enable_vblank = i8xx_enable_vblank; 4141 dev->driver->disable_vblank = i8xx_disable_vblank; 4142 } else { 4143 dev->driver->irq_preinstall = i965_irq_reset; 4144 dev->driver->irq_postinstall = i965_irq_postinstall; 4145 dev->driver->irq_uninstall = i965_irq_reset; 4146 dev->driver->irq_handler = i965_irq_handler; 4147 dev->driver->enable_vblank = i965_enable_vblank; 4148 dev->driver->disable_vblank = i965_disable_vblank; 4149 } 4150 if (I915_HAS_HOTPLUG(dev_priv)) 4151 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4152 } 4153 } 4154 4155 /** 4156 * intel_irq_fini - deinitializes IRQ support 4157 * @i915: i915 device instance 4158 * 4159 * This function deinitializes all the IRQ support. 4160 */ 4161 void intel_irq_fini(struct drm_i915_private *i915) 4162 { 4163 int i; 4164 4165 for (i = 0; i < MAX_L3_SLICES; ++i) 4166 kfree(i915->l3_parity.remap_info[i]); 4167 } 4168 4169 /** 4170 * intel_irq_install - enables the hardware interrupt 4171 * @dev_priv: i915 device instance 4172 * 4173 * This function enables the hardware interrupt handling, but leaves the hotplug 4174 * handling still disabled. It is called after intel_irq_init(). 4175 * 4176 * In the driver load and resume code we need working interrupts in a few places 4177 * but don't want to deal with the hassle of concurrent probe and hotplug 4178 * workers. Hence the split into this two-stage approach. 4179 */ 4180 int intel_irq_install(struct drm_i915_private *dev_priv) 4181 { 4182 /* 4183 * We enable some interrupt sources in our postinstall hooks, so mark 4184 * interrupts as enabled _before_ actually enabling them to avoid 4185 * special cases in our ordering checks. 4186 */ 4187 dev_priv->runtime_pm.irqs_enabled = true; 4188 4189 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq); 4190 } 4191 4192 /** 4193 * intel_irq_uninstall - finilizes all irq handling 4194 * @dev_priv: i915 device instance 4195 * 4196 * This stops interrupt and hotplug handling and unregisters and frees all 4197 * resources acquired in the init functions. 4198 */ 4199 void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4200 { 4201 drm_irq_uninstall(&dev_priv->drm); 4202 intel_hpd_cancel_work(dev_priv); 4203 dev_priv->runtime_pm.irqs_enabled = false; 4204 } 4205 4206 /** 4207 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling 4208 * @dev_priv: i915 device instance 4209 * 4210 * This function is used to disable interrupts at runtime, both in the runtime 4211 * pm and the system suspend/resume code. 4212 */ 4213 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4214 { 4215 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm); 4216 dev_priv->runtime_pm.irqs_enabled = false; 4217 synchronize_irq(dev_priv->drm.irq); 4218 } 4219 4220 /** 4221 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling 4222 * @dev_priv: i915 device instance 4223 * 4224 * This function is used to enable interrupts at runtime, both in the runtime 4225 * pm and the system suspend/resume code. 4226 */ 4227 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4228 { 4229 dev_priv->runtime_pm.irqs_enabled = true; 4230 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm); 4231 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm); 4232 } 4233