1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 static const u32 hpd_ibx[] = { 36 [HPD_CRT] = SDE_CRT_HOTPLUG, 37 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 38 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 39 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 40 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 41 }; 42 43 static const u32 hpd_cpt[] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 49 }; 50 51 static const u32 hpd_mask_i915[] = { 52 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 53 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 54 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 55 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 56 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 57 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 58 }; 59 60 static const u32 hpd_status_g4x[] = { 61 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 62 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 63 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 64 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 65 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 66 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 67 }; 68 69 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 70 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 71 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 72 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 73 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 74 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 75 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 76 }; 77 78 /* For display hotplug interrupt */ 79 static void 80 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 81 { 82 assert_spin_locked(&dev_priv->irq_lock); 83 84 if (dev_priv->pc8.irqs_disabled) { 85 WARN(1, "IRQs disabled\n"); 86 dev_priv->pc8.regsave.deimr &= ~mask; 87 return; 88 } 89 90 if ((dev_priv->irq_mask & mask) != 0) { 91 dev_priv->irq_mask &= ~mask; 92 I915_WRITE(DEIMR, dev_priv->irq_mask); 93 POSTING_READ(DEIMR); 94 } 95 } 96 97 static void 98 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 99 { 100 assert_spin_locked(&dev_priv->irq_lock); 101 102 if (dev_priv->pc8.irqs_disabled) { 103 WARN(1, "IRQs disabled\n"); 104 dev_priv->pc8.regsave.deimr |= mask; 105 return; 106 } 107 108 if ((dev_priv->irq_mask & mask) != mask) { 109 dev_priv->irq_mask |= mask; 110 I915_WRITE(DEIMR, dev_priv->irq_mask); 111 POSTING_READ(DEIMR); 112 } 113 } 114 115 /** 116 * ilk_update_gt_irq - update GTIMR 117 * @dev_priv: driver private 118 * @interrupt_mask: mask of interrupt bits to update 119 * @enabled_irq_mask: mask of interrupt bits to enable 120 */ 121 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 122 uint32_t interrupt_mask, 123 uint32_t enabled_irq_mask) 124 { 125 assert_spin_locked(&dev_priv->irq_lock); 126 127 if (dev_priv->pc8.irqs_disabled) { 128 WARN(1, "IRQs disabled\n"); 129 dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; 130 dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & 131 interrupt_mask); 132 return; 133 } 134 135 dev_priv->gt_irq_mask &= ~interrupt_mask; 136 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 137 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 138 POSTING_READ(GTIMR); 139 } 140 141 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 142 { 143 ilk_update_gt_irq(dev_priv, mask, mask); 144 } 145 146 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 147 { 148 ilk_update_gt_irq(dev_priv, mask, 0); 149 } 150 151 /** 152 * snb_update_pm_irq - update GEN6_PMIMR 153 * @dev_priv: driver private 154 * @interrupt_mask: mask of interrupt bits to update 155 * @enabled_irq_mask: mask of interrupt bits to enable 156 */ 157 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 158 uint32_t interrupt_mask, 159 uint32_t enabled_irq_mask) 160 { 161 uint32_t new_val; 162 163 assert_spin_locked(&dev_priv->irq_lock); 164 165 if (dev_priv->pc8.irqs_disabled) { 166 WARN(1, "IRQs disabled\n"); 167 dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; 168 dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & 169 interrupt_mask); 170 return; 171 } 172 173 new_val = dev_priv->pm_irq_mask; 174 new_val &= ~interrupt_mask; 175 new_val |= (~enabled_irq_mask & interrupt_mask); 176 177 if (new_val != dev_priv->pm_irq_mask) { 178 dev_priv->pm_irq_mask = new_val; 179 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 180 POSTING_READ(GEN6_PMIMR); 181 } 182 } 183 184 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 185 { 186 snb_update_pm_irq(dev_priv, mask, mask); 187 } 188 189 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 190 { 191 snb_update_pm_irq(dev_priv, mask, 0); 192 } 193 194 static bool ivb_can_enable_err_int(struct drm_device *dev) 195 { 196 struct drm_i915_private *dev_priv = dev->dev_private; 197 struct intel_crtc *crtc; 198 enum i915_pipe pipe; 199 200 assert_spin_locked(&dev_priv->irq_lock); 201 202 for_each_pipe(pipe) { 203 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 204 205 if (crtc->cpu_fifo_underrun_disabled) 206 return false; 207 } 208 209 return true; 210 } 211 212 static bool cpt_can_enable_serr_int(struct drm_device *dev) 213 { 214 struct drm_i915_private *dev_priv = dev->dev_private; 215 enum i915_pipe pipe; 216 struct intel_crtc *crtc; 217 218 assert_spin_locked(&dev_priv->irq_lock); 219 220 for_each_pipe(pipe) { 221 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 222 223 if (crtc->pch_fifo_underrun_disabled) 224 return false; 225 } 226 227 return true; 228 } 229 230 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 231 enum i915_pipe pipe, bool enable) 232 { 233 struct drm_i915_private *dev_priv = dev->dev_private; 234 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 235 DE_PIPEB_FIFO_UNDERRUN; 236 237 if (enable) 238 ironlake_enable_display_irq(dev_priv, bit); 239 else 240 ironlake_disable_display_irq(dev_priv, bit); 241 } 242 243 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 244 enum i915_pipe pipe, bool enable) 245 { 246 struct drm_i915_private *dev_priv = dev->dev_private; 247 if (enable) { 248 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 249 250 if (!ivb_can_enable_err_int(dev)) 251 return; 252 253 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 254 } else { 255 bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); 256 257 /* Change the state _after_ we've read out the current one. */ 258 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 259 260 if (!was_enabled && 261 (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { 262 DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", 263 pipe_name(pipe)); 264 } 265 } 266 } 267 268 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 269 enum i915_pipe pipe, bool enable) 270 { 271 struct drm_i915_private *dev_priv = dev->dev_private; 272 273 assert_spin_locked(&dev_priv->irq_lock); 274 275 if (enable) 276 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 277 else 278 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 279 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 280 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 281 } 282 283 /** 284 * ibx_display_interrupt_update - update SDEIMR 285 * @dev_priv: driver private 286 * @interrupt_mask: mask of interrupt bits to update 287 * @enabled_irq_mask: mask of interrupt bits to enable 288 */ 289 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 290 uint32_t interrupt_mask, 291 uint32_t enabled_irq_mask) 292 { 293 uint32_t sdeimr = I915_READ(SDEIMR); 294 sdeimr &= ~interrupt_mask; 295 sdeimr |= (~enabled_irq_mask & interrupt_mask); 296 297 assert_spin_locked(&dev_priv->irq_lock); 298 299 if (dev_priv->pc8.irqs_disabled && 300 (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { 301 WARN(1, "IRQs disabled\n"); 302 dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; 303 dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & 304 interrupt_mask); 305 return; 306 } 307 308 I915_WRITE(SDEIMR, sdeimr); 309 POSTING_READ(SDEIMR); 310 } 311 #define ibx_enable_display_interrupt(dev_priv, bits) \ 312 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 313 #define ibx_disable_display_interrupt(dev_priv, bits) \ 314 ibx_display_interrupt_update((dev_priv), (bits), 0) 315 316 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 317 enum transcoder pch_transcoder, 318 bool enable) 319 { 320 struct drm_i915_private *dev_priv = dev->dev_private; 321 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 322 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 323 324 if (enable) 325 ibx_enable_display_interrupt(dev_priv, bit); 326 else 327 ibx_disable_display_interrupt(dev_priv, bit); 328 } 329 330 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 331 enum transcoder pch_transcoder, 332 bool enable) 333 { 334 struct drm_i915_private *dev_priv = dev->dev_private; 335 336 if (enable) { 337 I915_WRITE(SERR_INT, 338 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 339 340 if (!cpt_can_enable_serr_int(dev)) 341 return; 342 343 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 344 } else { 345 uint32_t tmp = I915_READ(SERR_INT); 346 bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); 347 348 /* Change the state _after_ we've read out the current one. */ 349 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 350 351 if (!was_enabled && 352 (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { 353 DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", 354 transcoder_name(pch_transcoder)); 355 } 356 } 357 } 358 359 /** 360 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 361 * @dev: drm device 362 * @pipe: pipe 363 * @enable: true if we want to report FIFO underrun errors, false otherwise 364 * 365 * This function makes us disable or enable CPU fifo underruns for a specific 366 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 367 * reporting for one pipe may also disable all the other CPU error interruts for 368 * the other pipes, due to the fact that there's just one interrupt mask/enable 369 * bit for all the pipes. 370 * 371 * Returns the previous state of underrun reporting. 372 */ 373 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 374 enum i915_pipe pipe, bool enable) 375 { 376 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 379 bool ret; 380 381 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 382 383 ret = !intel_crtc->cpu_fifo_underrun_disabled; 384 385 if (enable == ret) 386 goto done; 387 388 intel_crtc->cpu_fifo_underrun_disabled = !enable; 389 390 if (IS_GEN5(dev) || IS_GEN6(dev)) 391 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 392 else if (IS_GEN7(dev)) 393 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); 394 else if (IS_GEN8(dev)) 395 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 396 397 done: 398 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 399 return ret; 400 } 401 402 /** 403 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 404 * @dev: drm device 405 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 406 * @enable: true if we want to report FIFO underrun errors, false otherwise 407 * 408 * This function makes us disable or enable PCH fifo underruns for a specific 409 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 410 * underrun reporting for one transcoder may also disable all the other PCH 411 * error interruts for the other transcoders, due to the fact that there's just 412 * one interrupt mask/enable bit for all the transcoders. 413 * 414 * Returns the previous state of underrun reporting. 415 */ 416 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 417 enum transcoder pch_transcoder, 418 bool enable) 419 { 420 struct drm_i915_private *dev_priv = dev->dev_private; 421 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 422 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 423 bool ret; 424 425 /* 426 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 427 * has only one pch transcoder A that all pipes can use. To avoid racy 428 * pch transcoder -> pipe lookups from interrupt code simply store the 429 * underrun statistics in crtc A. Since we never expose this anywhere 430 * nor use it outside of the fifo underrun code here using the "wrong" 431 * crtc on LPT won't cause issues. 432 */ 433 434 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 435 436 ret = !intel_crtc->pch_fifo_underrun_disabled; 437 438 if (enable == ret) 439 goto done; 440 441 intel_crtc->pch_fifo_underrun_disabled = !enable; 442 443 if (HAS_PCH_IBX(dev)) 444 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 445 else 446 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 447 448 done: 449 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 450 return ret; 451 } 452 453 454 void 455 i915_enable_pipestat(drm_i915_private_t *dev_priv, enum i915_pipe pipe, u32 mask) 456 { 457 u32 reg = PIPESTAT(pipe); 458 u32 pipestat = I915_READ(reg) & 0x7fff0000; 459 460 assert_spin_locked(&dev_priv->irq_lock); 461 462 if ((pipestat & mask) == mask) 463 return; 464 465 /* Enable the interrupt, clear any pending status */ 466 pipestat |= mask | (mask >> 16); 467 I915_WRITE(reg, pipestat); 468 POSTING_READ(reg); 469 } 470 471 void 472 i915_disable_pipestat(drm_i915_private_t *dev_priv, enum i915_pipe pipe, u32 mask) 473 { 474 u32 reg = PIPESTAT(pipe); 475 u32 pipestat = I915_READ(reg) & 0x7fff0000; 476 477 assert_spin_locked(&dev_priv->irq_lock); 478 479 if ((pipestat & mask) == 0) 480 return; 481 482 pipestat &= ~mask; 483 I915_WRITE(reg, pipestat); 484 POSTING_READ(reg); 485 } 486 487 /** 488 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 489 */ 490 static void i915_enable_asle_pipestat(struct drm_device *dev) 491 { 492 drm_i915_private_t *dev_priv = dev->dev_private; 493 494 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 495 return; 496 497 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 498 499 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_ENABLE); 500 if (INTEL_INFO(dev)->gen >= 4) 501 i915_enable_pipestat(dev_priv, PIPE_A, 502 PIPE_LEGACY_BLC_EVENT_ENABLE); 503 504 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 505 } 506 507 /** 508 * i915_pipe_enabled - check if a pipe is enabled 509 * @dev: DRM device 510 * @pipe: pipe to check 511 * 512 * Reading certain registers when the pipe is disabled can hang the chip. 513 * Use this routine to make sure the PLL is running and the pipe is active 514 * before reading such registers if unsure. 515 */ 516 static int 517 i915_pipe_enabled(struct drm_device *dev, int pipe) 518 { 519 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 520 521 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 522 /* Locking is horribly broken here, but whatever. */ 523 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 525 526 return intel_crtc->active; 527 } else { 528 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 529 } 530 } 531 532 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 533 { 534 /* Gen2 doesn't have a hardware frame counter */ 535 return 0; 536 } 537 538 /* Called from drm generic code, passed a 'crtc', which 539 * we use as a pipe index 540 */ 541 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 542 { 543 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 544 unsigned long high_frame; 545 unsigned long low_frame; 546 u32 high1, high2, low, pixel, vbl_start; 547 548 if (!i915_pipe_enabled(dev, pipe)) { 549 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 550 "pipe %c\n", pipe_name(pipe)); 551 return 0; 552 } 553 554 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 555 struct intel_crtc *intel_crtc = 556 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 557 const struct drm_display_mode *mode = 558 &intel_crtc->config.adjusted_mode; 559 560 vbl_start = mode->crtc_vblank_start * mode->crtc_htotal; 561 } else { 562 enum transcoder cpu_transcoder = (enum transcoder) pipe; 563 u32 htotal; 564 565 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 566 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 567 568 vbl_start *= htotal; 569 } 570 571 high_frame = PIPEFRAME(pipe); 572 low_frame = PIPEFRAMEPIXEL(pipe); 573 574 /* 575 * High & low register fields aren't synchronized, so make sure 576 * we get a low value that's stable across two reads of the high 577 * register. 578 */ 579 do { 580 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 581 low = I915_READ(low_frame); 582 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 583 } while (high1 != high2); 584 585 high1 >>= PIPE_FRAME_HIGH_SHIFT; 586 pixel = low & PIPE_PIXEL_MASK; 587 low >>= PIPE_FRAME_LOW_SHIFT; 588 589 /* 590 * The frame counter increments at beginning of active. 591 * Cook up a vblank counter by also checking the pixel 592 * counter against vblank start. 593 */ 594 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 595 } 596 597 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 598 { 599 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 600 int reg = PIPE_FRMCOUNT_GM45(pipe); 601 602 if (!i915_pipe_enabled(dev, pipe)) { 603 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 604 "pipe %c\n", pipe_name(pipe)); 605 return 0; 606 } 607 608 return I915_READ(reg); 609 } 610 611 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 612 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) 613 614 static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum i915_pipe pipe) 615 { 616 struct drm_i915_private *dev_priv = dev->dev_private; 617 uint32_t status; 618 int reg; 619 620 if (INTEL_INFO(dev)->gen >= 8) { 621 status = GEN8_PIPE_VBLANK; 622 reg = GEN8_DE_PIPE_ISR(pipe); 623 } else if (INTEL_INFO(dev)->gen >= 7) { 624 status = DE_PIPE_VBLANK_IVB(pipe); 625 reg = DEISR; 626 } else { 627 status = DE_PIPE_VBLANK(pipe); 628 reg = DEISR; 629 } 630 631 return __raw_i915_read32(dev_priv, reg) & status; 632 } 633 634 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 635 int *vpos, int *hpos) 636 { 637 struct drm_i915_private *dev_priv = dev->dev_private; 638 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 639 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 640 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 641 int position; 642 int vbl_start, vbl_end, htotal, vtotal; 643 bool in_vbl = true; 644 int ret = 0; 645 646 if (!intel_crtc->active) { 647 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 648 "pipe %c\n", pipe_name(pipe)); 649 return 0; 650 } 651 652 htotal = mode->crtc_htotal; 653 vtotal = mode->crtc_vtotal; 654 vbl_start = mode->crtc_vblank_start; 655 vbl_end = mode->crtc_vblank_end; 656 657 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 658 vbl_start = DIV_ROUND_UP(vbl_start, 2); 659 vbl_end /= 2; 660 vtotal /= 2; 661 } 662 663 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 664 665 /* 666 * Lock uncore.lock, as we will do multiple timing critical raw 667 * register reads, potentially with preemption disabled, so the 668 * following code must not block on uncore.lock. 669 */ 670 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 671 672 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 673 674 /* Get optional system timestamp before query. */ 675 #if 0 676 if (stime) 677 *stime = ktime_get(); 678 #endif 679 680 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 681 /* No obvious pixelcount register. Only query vertical 682 * scanout position from Display scan line register. 683 */ 684 if (IS_GEN2(dev)) 685 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 686 else 687 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 688 689 if (HAS_DDI(dev)) { 690 /* 691 * On HSW HDMI outputs there seems to be a 2 line 692 * difference, whereas eDP has the normal 1 line 693 * difference that earlier platforms have. External 694 * DP is unknown. For now just check for the 2 line 695 * difference case on all output types on HSW+. 696 * 697 * This might misinterpret the scanline counter being 698 * one line too far along on eDP, but that's less 699 * dangerous than the alternative since that would lead 700 * the vblank timestamp code astray when it sees a 701 * scanline count before vblank_start during a vblank 702 * interrupt. 703 */ 704 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 705 if ((in_vbl && (position == vbl_start - 2 || 706 position == vbl_start - 1)) || 707 (!in_vbl && (position == vbl_end - 2 || 708 position == vbl_end - 1))) 709 position = (position + 2) % vtotal; 710 } else if (HAS_PCH_SPLIT(dev)) { 711 /* 712 * The scanline counter increments at the leading edge 713 * of hsync, ie. it completely misses the active portion 714 * of the line. Fix up the counter at both edges of vblank 715 * to get a more accurate picture whether we're in vblank 716 * or not. 717 */ 718 in_vbl = ilk_pipe_in_vblank_locked(dev, pipe); 719 if ((in_vbl && position == vbl_start - 1) || 720 (!in_vbl && position == vbl_end - 1)) 721 position = (position + 1) % vtotal; 722 } else { 723 /* 724 * ISR vblank status bits don't work the way we'd want 725 * them to work on non-PCH platforms (for 726 * ilk_pipe_in_vblank_locked()), and there doesn't 727 * appear any other way to determine if we're currently 728 * in vblank. 729 * 730 * Instead let's assume that we're already in vblank if 731 * we got called from the vblank interrupt and the 732 * scanline counter value indicates that we're on the 733 * line just prior to vblank start. This should result 734 * in the correct answer, unless the vblank interrupt 735 * delivery really got delayed for almost exactly one 736 * full frame/field. 737 */ 738 #if 0 739 if (flags & DRM_CALLED_FROM_VBLIRQ && 740 position == vbl_start - 1) { 741 position = (position + 1) % vtotal; 742 743 /* Signal this correction as "applied". */ 744 ret |= 0x8; 745 } 746 #endif 747 } 748 } else { 749 /* Have access to pixelcount since start of frame. 750 * We can split this into vertical and horizontal 751 * scanout position. 752 */ 753 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 754 755 /* convert to pixel counts */ 756 vbl_start *= htotal; 757 vbl_end *= htotal; 758 vtotal *= htotal; 759 } 760 761 /* Get optional system timestamp after query. */ 762 #if 0 763 if (etime) 764 *etime = ktime_get(); 765 #endif 766 767 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 768 769 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 770 771 in_vbl = position >= vbl_start && position < vbl_end; 772 773 /* 774 * While in vblank, position will be negative 775 * counting up towards 0 at vbl_end. And outside 776 * vblank, position will be positive counting 777 * up since vbl_end. 778 */ 779 if (position >= vbl_start) 780 position -= vbl_end; 781 else 782 position += vtotal - vbl_end; 783 784 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 785 *vpos = position; 786 *hpos = 0; 787 } else { 788 *vpos = position / htotal; 789 *hpos = position - (*vpos * htotal); 790 } 791 792 /* In vblank? */ 793 if (in_vbl) 794 ret |= DRM_SCANOUTPOS_INVBL; 795 796 return ret; 797 } 798 799 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 800 int *max_error, 801 struct timeval *vblank_time, 802 unsigned flags) 803 { 804 struct drm_crtc *crtc; 805 806 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 807 DRM_ERROR("Invalid crtc %d\n", pipe); 808 return -EINVAL; 809 } 810 811 /* Get drm_crtc to timestamp: */ 812 crtc = intel_get_crtc_for_pipe(dev, pipe); 813 if (crtc == NULL) { 814 DRM_ERROR("Invalid crtc %d\n", pipe); 815 return -EINVAL; 816 } 817 818 if (!crtc->enabled) { 819 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 820 return -EBUSY; 821 } 822 823 /* Helper routine in DRM core does all the work: */ 824 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 825 vblank_time, flags, 826 crtc); 827 } 828 829 static bool intel_hpd_irq_event(struct drm_device *dev, 830 struct drm_connector *connector) 831 { 832 enum drm_connector_status old_status; 833 834 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 835 old_status = connector->status; 836 837 connector->status = connector->funcs->detect(connector, false); 838 if (old_status == connector->status) 839 return false; 840 841 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 842 connector->base.id, 843 drm_get_connector_name(connector), 844 drm_get_connector_status_name(old_status), 845 drm_get_connector_status_name(connector->status)); 846 847 return true; 848 } 849 850 /* 851 * Handle hotplug events outside the interrupt handler proper. 852 */ 853 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 854 855 static void i915_hotplug_work_func(struct work_struct *work) 856 { 857 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 858 hotplug_work); 859 struct drm_device *dev = dev_priv->dev; 860 struct drm_mode_config *mode_config = &dev->mode_config; 861 struct intel_connector *intel_connector; 862 struct intel_encoder *intel_encoder; 863 struct drm_connector *connector; 864 bool hpd_disabled = false; 865 bool changed = false; 866 u32 hpd_event_bits; 867 868 /* HPD irq before everything is fully set up. */ 869 if (!dev_priv->enable_hotplug_processing) 870 return; 871 872 mutex_lock(&mode_config->mutex); 873 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 874 875 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 876 877 hpd_event_bits = dev_priv->hpd_event_bits; 878 dev_priv->hpd_event_bits = 0; 879 list_for_each_entry(connector, &mode_config->connector_list, head) { 880 intel_connector = to_intel_connector(connector); 881 intel_encoder = intel_connector->encoder; 882 if (intel_encoder->hpd_pin > HPD_NONE && 883 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 884 connector->polled == DRM_CONNECTOR_POLL_HPD) { 885 DRM_INFO("HPD interrupt storm detected on connector %s: " 886 "switching from hotplug detection to polling\n", 887 drm_get_connector_name(connector)); 888 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 889 connector->polled = DRM_CONNECTOR_POLL_CONNECT 890 | DRM_CONNECTOR_POLL_DISCONNECT; 891 hpd_disabled = true; 892 } 893 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 894 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 895 drm_get_connector_name(connector), intel_encoder->hpd_pin); 896 } 897 } 898 /* if there were no outputs to poll, poll was disabled, 899 * therefore make sure it's enabled when disabling HPD on 900 * some connectors */ 901 if (hpd_disabled) { 902 drm_kms_helper_poll_enable(dev); 903 mod_timer(&dev_priv->hotplug_reenable_timer, 904 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 905 } 906 907 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 908 909 list_for_each_entry(connector, &mode_config->connector_list, head) { 910 intel_connector = to_intel_connector(connector); 911 intel_encoder = intel_connector->encoder; 912 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 913 if (intel_encoder->hot_plug) 914 intel_encoder->hot_plug(intel_encoder); 915 if (intel_hpd_irq_event(dev, connector)) 916 changed = true; 917 } 918 } 919 mutex_unlock(&mode_config->mutex); 920 921 if (changed) 922 drm_kms_helper_hotplug_event(dev); 923 } 924 925 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 926 { 927 drm_i915_private_t *dev_priv = dev->dev_private; 928 u32 busy_up, busy_down, max_avg, min_avg; 929 u8 new_delay; 930 931 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 932 933 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 934 935 new_delay = dev_priv->ips.cur_delay; 936 937 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 938 busy_up = I915_READ(RCPREVBSYTUPAVG); 939 busy_down = I915_READ(RCPREVBSYTDNAVG); 940 max_avg = I915_READ(RCBMAXAVG); 941 min_avg = I915_READ(RCBMINAVG); 942 943 /* Handle RCS change request from hw */ 944 if (busy_up > max_avg) { 945 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 946 new_delay = dev_priv->ips.cur_delay - 1; 947 if (new_delay < dev_priv->ips.max_delay) 948 new_delay = dev_priv->ips.max_delay; 949 } else if (busy_down < min_avg) { 950 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 951 new_delay = dev_priv->ips.cur_delay + 1; 952 if (new_delay > dev_priv->ips.min_delay) 953 new_delay = dev_priv->ips.min_delay; 954 } 955 956 if (ironlake_set_drps(dev, new_delay)) 957 dev_priv->ips.cur_delay = new_delay; 958 959 lockmgr(&mchdev_lock, LK_RELEASE); 960 961 return; 962 } 963 964 static void notify_ring(struct drm_device *dev, 965 struct intel_ring_buffer *ring) 966 { 967 if (ring->obj == NULL) 968 return; 969 970 trace_i915_gem_request_complete(ring); 971 972 wake_up_all(&ring->irq_queue); 973 i915_queue_hangcheck(dev); 974 } 975 976 static void gen6_pm_rps_work(struct work_struct *work) 977 { 978 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 979 rps.work); 980 u32 pm_iir; 981 int new_delay, adj; 982 983 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 984 pm_iir = dev_priv->rps.pm_iir; 985 dev_priv->rps.pm_iir = 0; 986 /* Make sure not to corrupt PMIMR state used by ringbuffer code */ 987 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 988 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 989 990 /* Make sure we didn't queue anything we're not going to process. */ 991 WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); 992 993 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) 994 return; 995 996 mutex_lock(&dev_priv->rps.hw_lock); 997 998 adj = dev_priv->rps.last_adj; 999 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1000 if (adj > 0) 1001 adj *= 2; 1002 else 1003 adj = 1; 1004 new_delay = dev_priv->rps.cur_delay + adj; 1005 1006 /* 1007 * For better performance, jump directly 1008 * to RPe if we're below it. 1009 */ 1010 if (new_delay < dev_priv->rps.rpe_delay) 1011 new_delay = dev_priv->rps.rpe_delay; 1012 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1013 if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) 1014 new_delay = dev_priv->rps.rpe_delay; 1015 else 1016 new_delay = dev_priv->rps.min_delay; 1017 adj = 0; 1018 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1019 if (adj < 0) 1020 adj *= 2; 1021 else 1022 adj = -1; 1023 new_delay = dev_priv->rps.cur_delay + adj; 1024 } else { /* unknown event */ 1025 new_delay = dev_priv->rps.cur_delay; 1026 } 1027 1028 /* sysfs frequency interfaces may have snuck in while servicing the 1029 * interrupt 1030 */ 1031 new_delay = clamp_t(int, new_delay, 1032 dev_priv->rps.min_delay, dev_priv->rps.max_delay); 1033 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; 1034 1035 if (IS_VALLEYVIEW(dev_priv->dev)) 1036 valleyview_set_rps(dev_priv->dev, new_delay); 1037 else 1038 gen6_set_rps(dev_priv->dev, new_delay); 1039 1040 mutex_unlock(&dev_priv->rps.hw_lock); 1041 } 1042 1043 1044 /** 1045 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1046 * occurred. 1047 * @work: workqueue struct 1048 * 1049 * Doesn't actually do anything except notify userspace. As a consequence of 1050 * this event, userspace should try to remap the bad rows since statistically 1051 * it is likely the same row is more likely to go bad again. 1052 */ 1053 static void ivybridge_parity_work(struct work_struct *work) 1054 { 1055 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 1056 l3_parity.error_work); 1057 u32 error_status, row, bank, subbank; 1058 char *parity_event[6]; 1059 uint32_t misccpctl; 1060 uint8_t slice = 0; 1061 1062 /* We must turn off DOP level clock gating to access the L3 registers. 1063 * In order to prevent a get/put style interface, acquire struct mutex 1064 * any time we access those registers. 1065 */ 1066 mutex_lock(&dev_priv->dev->struct_mutex); 1067 1068 /* If we've screwed up tracking, just let the interrupt fire again */ 1069 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1070 goto out; 1071 1072 misccpctl = I915_READ(GEN7_MISCCPCTL); 1073 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1074 POSTING_READ(GEN7_MISCCPCTL); 1075 1076 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1077 u32 reg; 1078 1079 slice--; 1080 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1081 break; 1082 1083 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1084 1085 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1086 1087 error_status = I915_READ(reg); 1088 row = GEN7_PARITY_ERROR_ROW(error_status); 1089 bank = GEN7_PARITY_ERROR_BANK(error_status); 1090 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1091 1092 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1093 POSTING_READ(reg); 1094 1095 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1096 parity_event[5] = NULL; 1097 1098 #if 0 1099 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1100 KOBJ_CHANGE, parity_event); 1101 #endif 1102 1103 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1104 slice, row, bank, subbank); 1105 1106 #if 0 1107 kfree(parity_event[4]); 1108 kfree(parity_event[3]); 1109 kfree(parity_event[2]); 1110 kfree(parity_event[1]); 1111 #endif 1112 } 1113 1114 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1115 1116 out: 1117 WARN_ON(dev_priv->l3_parity.which_slice); 1118 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1119 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1120 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1121 1122 mutex_unlock(&dev_priv->dev->struct_mutex); 1123 } 1124 1125 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1126 { 1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1128 1129 if (!HAS_L3_DPF(dev)) 1130 return; 1131 1132 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1133 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1134 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1135 1136 iir &= GT_PARITY_ERROR(dev); 1137 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1138 dev_priv->l3_parity.which_slice |= 1 << 1; 1139 1140 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1141 dev_priv->l3_parity.which_slice |= 1 << 0; 1142 1143 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1144 } 1145 1146 static void ilk_gt_irq_handler(struct drm_device *dev, 1147 struct drm_i915_private *dev_priv, 1148 u32 gt_iir) 1149 { 1150 if (gt_iir & 1151 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1152 notify_ring(dev, &dev_priv->ring[RCS]); 1153 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1154 notify_ring(dev, &dev_priv->ring[VCS]); 1155 } 1156 1157 static void snb_gt_irq_handler(struct drm_device *dev, 1158 struct drm_i915_private *dev_priv, 1159 u32 gt_iir) 1160 { 1161 1162 if (gt_iir & 1163 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1164 notify_ring(dev, &dev_priv->ring[RCS]); 1165 if (gt_iir & GT_BSD_USER_INTERRUPT) 1166 notify_ring(dev, &dev_priv->ring[VCS]); 1167 if (gt_iir & GT_BLT_USER_INTERRUPT) 1168 notify_ring(dev, &dev_priv->ring[BCS]); 1169 1170 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1171 GT_BSD_CS_ERROR_INTERRUPT | 1172 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1173 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 1174 i915_handle_error(dev, false); 1175 } 1176 1177 if (gt_iir & GT_PARITY_ERROR(dev)) 1178 ivybridge_parity_error_irq_handler(dev, gt_iir); 1179 } 1180 1181 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1182 struct drm_i915_private *dev_priv, 1183 u32 master_ctl) 1184 { 1185 u32 rcs, bcs, vcs; 1186 uint32_t tmp = 0; 1187 1188 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1189 tmp = I915_READ(GEN8_GT_IIR(0)); 1190 if (tmp) { 1191 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1192 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1193 if (rcs & GT_RENDER_USER_INTERRUPT) 1194 notify_ring(dev, &dev_priv->ring[RCS]); 1195 if (bcs & GT_RENDER_USER_INTERRUPT) 1196 notify_ring(dev, &dev_priv->ring[BCS]); 1197 I915_WRITE(GEN8_GT_IIR(0), tmp); 1198 } else 1199 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1200 } 1201 1202 if (master_ctl & GEN8_GT_VCS1_IRQ) { 1203 tmp = I915_READ(GEN8_GT_IIR(1)); 1204 if (tmp) { 1205 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1206 if (vcs & GT_RENDER_USER_INTERRUPT) 1207 notify_ring(dev, &dev_priv->ring[VCS]); 1208 I915_WRITE(GEN8_GT_IIR(1), tmp); 1209 } else 1210 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1211 } 1212 1213 if (master_ctl & GEN8_GT_VECS_IRQ) { 1214 tmp = I915_READ(GEN8_GT_IIR(3)); 1215 if (tmp) { 1216 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1217 if (vcs & GT_RENDER_USER_INTERRUPT) 1218 notify_ring(dev, &dev_priv->ring[VECS]); 1219 I915_WRITE(GEN8_GT_IIR(3), tmp); 1220 } else 1221 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1222 } 1223 1224 } 1225 1226 #define HPD_STORM_DETECT_PERIOD 1000 1227 #define HPD_STORM_THRESHOLD 5 1228 1229 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1230 u32 hotplug_trigger, 1231 const u32 *hpd) 1232 { 1233 drm_i915_private_t *dev_priv = dev->dev_private; 1234 int i; 1235 bool storm_detected = false; 1236 1237 if (!hotplug_trigger) 1238 return; 1239 1240 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1241 for (i = 1; i < HPD_NUM_PINS; i++) { 1242 1243 WARN_ONCE(hpd[i] & hotplug_trigger && 1244 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED, 1245 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1246 hotplug_trigger, i, hpd[i]); 1247 1248 if (!(hpd[i] & hotplug_trigger) || 1249 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1250 continue; 1251 1252 dev_priv->hpd_event_bits |= (1 << i); 1253 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1254 dev_priv->hpd_stats[i].hpd_last_jiffies 1255 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1256 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1257 dev_priv->hpd_stats[i].hpd_cnt = 0; 1258 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1259 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1260 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1261 dev_priv->hpd_event_bits &= ~(1 << i); 1262 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1263 storm_detected = true; 1264 } else { 1265 dev_priv->hpd_stats[i].hpd_cnt++; 1266 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1267 dev_priv->hpd_stats[i].hpd_cnt); 1268 } 1269 } 1270 1271 if (storm_detected) 1272 dev_priv->display.hpd_irq_setup(dev); 1273 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1274 1275 /* 1276 * Our hotplug handler can grab modeset locks (by calling down into the 1277 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1278 * queue for otherwise the flush_work in the pageflip code will 1279 * deadlock. 1280 */ 1281 schedule_work(&dev_priv->hotplug_work); 1282 } 1283 1284 static void gmbus_irq_handler(struct drm_device *dev) 1285 { 1286 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1287 1288 wake_up_all(&dev_priv->gmbus_wait_queue); 1289 } 1290 1291 static void dp_aux_irq_handler(struct drm_device *dev) 1292 { 1293 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; 1294 1295 wake_up_all(&dev_priv->gmbus_wait_queue); 1296 } 1297 1298 #if defined(CONFIG_DEBUG_FS) 1299 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1300 uint32_t crc0, uint32_t crc1, 1301 uint32_t crc2, uint32_t crc3, 1302 uint32_t crc4) 1303 { 1304 struct drm_i915_private *dev_priv = dev->dev_private; 1305 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1306 struct intel_pipe_crc_entry *entry; 1307 int head, tail; 1308 1309 spin_lock(&pipe_crc->lock); 1310 1311 if (!pipe_crc->entries) { 1312 spin_unlock(&pipe_crc->lock); 1313 DRM_ERROR("spurious interrupt\n"); 1314 return; 1315 } 1316 1317 head = pipe_crc->head; 1318 tail = pipe_crc->tail; 1319 1320 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1321 spin_unlock(&pipe_crc->lock); 1322 DRM_ERROR("CRC buffer overflowing\n"); 1323 return; 1324 } 1325 1326 entry = &pipe_crc->entries[head]; 1327 1328 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1329 entry->crc[0] = crc0; 1330 entry->crc[1] = crc1; 1331 entry->crc[2] = crc2; 1332 entry->crc[3] = crc3; 1333 entry->crc[4] = crc4; 1334 1335 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1336 pipe_crc->head = head; 1337 1338 spin_unlock(&pipe_crc->lock); 1339 1340 wake_up_interruptible(&pipe_crc->wq); 1341 } 1342 #else 1343 static inline void 1344 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1345 uint32_t crc0, uint32_t crc1, 1346 uint32_t crc2, uint32_t crc3, 1347 uint32_t crc4) {} 1348 #endif 1349 1350 1351 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1352 { 1353 struct drm_i915_private *dev_priv = dev->dev_private; 1354 1355 display_pipe_crc_irq_handler(dev, pipe, 1356 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1357 0, 0, 0, 0); 1358 } 1359 1360 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1361 { 1362 struct drm_i915_private *dev_priv = dev->dev_private; 1363 1364 display_pipe_crc_irq_handler(dev, pipe, 1365 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1366 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1367 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1368 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1369 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1370 } 1371 1372 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1373 { 1374 struct drm_i915_private *dev_priv = dev->dev_private; 1375 uint32_t res1, res2; 1376 1377 if (INTEL_INFO(dev)->gen >= 3) 1378 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1379 else 1380 res1 = 0; 1381 1382 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1383 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1384 else 1385 res2 = 0; 1386 1387 display_pipe_crc_irq_handler(dev, pipe, 1388 I915_READ(PIPE_CRC_RES_RED(pipe)), 1389 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1390 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1391 res1, res2); 1392 } 1393 1394 /* The RPS events need forcewake, so we add them to a work queue and mask their 1395 * IMR bits until the work is done. Other interrupts can be processed without 1396 * the work queue. */ 1397 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1398 { 1399 if (pm_iir & GEN6_PM_RPS_EVENTS) { 1400 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1401 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; 1402 snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); 1403 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1404 1405 queue_work(dev_priv->wq, &dev_priv->rps.work); 1406 } 1407 1408 if (HAS_VEBOX(dev_priv->dev)) { 1409 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1410 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1411 1412 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1413 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); 1414 i915_handle_error(dev_priv->dev, false); 1415 } 1416 } 1417 } 1418 1419 static irqreturn_t valleyview_irq_handler(void *arg) 1420 { 1421 struct drm_device *dev = (struct drm_device *) arg; 1422 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1423 u32 iir, gt_iir, pm_iir; 1424 int pipe; 1425 u32 pipe_stats[I915_MAX_PIPES]; 1426 1427 atomic_inc(&dev_priv->irq_received); 1428 1429 while (true) { 1430 iir = I915_READ(VLV_IIR); 1431 gt_iir = I915_READ(GTIIR); 1432 pm_iir = I915_READ(GEN6_PMIIR); 1433 1434 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1435 goto out; 1436 1437 1438 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1439 1440 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1441 for_each_pipe(pipe) { 1442 int reg = PIPESTAT(pipe); 1443 pipe_stats[pipe] = I915_READ(reg); 1444 1445 /* 1446 * Clear the PIPE*STAT regs before the IIR 1447 */ 1448 if (pipe_stats[pipe] & 0x8000ffff) { 1449 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1450 DRM_DEBUG_DRIVER("pipe %c underrun\n", 1451 pipe_name(pipe)); 1452 I915_WRITE(reg, pipe_stats[pipe]); 1453 } 1454 } 1455 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1456 1457 for_each_pipe(pipe) { 1458 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1459 drm_handle_vblank(dev, pipe); 1460 1461 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { 1462 intel_prepare_page_flip(dev, pipe); 1463 intel_finish_page_flip(dev, pipe); 1464 } 1465 1466 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1467 i9xx_pipe_crc_irq_handler(dev, pipe); 1468 } 1469 1470 /* Consume port. Then clear IIR or we'll miss events */ 1471 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 1472 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1473 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1474 1475 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1476 hotplug_status); 1477 1478 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1479 1480 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1481 dp_aux_irq_handler(dev); 1482 1483 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1484 I915_READ(PORT_HOTPLUG_STAT); 1485 } 1486 1487 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1488 gmbus_irq_handler(dev); 1489 1490 if (pm_iir) 1491 gen6_rps_irq_handler(dev_priv, pm_iir); 1492 1493 I915_WRITE(GTIIR, gt_iir); 1494 I915_WRITE(GEN6_PMIIR, pm_iir); 1495 I915_WRITE(VLV_IIR, iir); 1496 } 1497 1498 out: 1499 return; 1500 } 1501 1502 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1503 { 1504 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1505 int pipe; 1506 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1507 1508 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1509 1510 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1511 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1512 SDE_AUDIO_POWER_SHIFT); 1513 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1514 port_name(port)); 1515 } 1516 1517 if (pch_iir & SDE_AUX_MASK) 1518 dp_aux_irq_handler(dev); 1519 1520 if (pch_iir & SDE_GMBUS) 1521 gmbus_irq_handler(dev); 1522 1523 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1524 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1525 1526 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1527 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1528 1529 if (pch_iir & SDE_POISON) 1530 DRM_ERROR("PCH poison interrupt\n"); 1531 1532 if (pch_iir & SDE_FDI_MASK) 1533 for_each_pipe(pipe) 1534 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1535 pipe_name(pipe), 1536 I915_READ(FDI_RX_IIR(pipe))); 1537 1538 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1539 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1540 1541 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1542 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1543 1544 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1545 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1546 false)) 1547 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1548 1549 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1550 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1551 false)) 1552 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1553 } 1554 1555 static void ivb_err_int_handler(struct drm_device *dev) 1556 { 1557 struct drm_i915_private *dev_priv = dev->dev_private; 1558 u32 err_int = I915_READ(GEN7_ERR_INT); 1559 enum i915_pipe pipe; 1560 1561 if (err_int & ERR_INT_POISON) 1562 DRM_ERROR("Poison interrupt\n"); 1563 1564 for_each_pipe(pipe) { 1565 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1566 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1567 false)) 1568 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1569 pipe_name(pipe)); 1570 } 1571 1572 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1573 if (IS_IVYBRIDGE(dev)) 1574 ivb_pipe_crc_irq_handler(dev, pipe); 1575 else 1576 hsw_pipe_crc_irq_handler(dev, pipe); 1577 } 1578 } 1579 1580 I915_WRITE(GEN7_ERR_INT, err_int); 1581 } 1582 1583 static void cpt_serr_int_handler(struct drm_device *dev) 1584 { 1585 struct drm_i915_private *dev_priv = dev->dev_private; 1586 u32 serr_int = I915_READ(SERR_INT); 1587 1588 if (serr_int & SERR_INT_POISON) 1589 DRM_ERROR("PCH poison interrupt\n"); 1590 1591 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1592 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1593 false)) 1594 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); 1595 1596 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1597 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1598 false)) 1599 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); 1600 1601 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1602 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1603 false)) 1604 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); 1605 1606 I915_WRITE(SERR_INT, serr_int); 1607 } 1608 1609 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1610 { 1611 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1612 int pipe; 1613 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1614 1615 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1616 1617 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1618 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1619 SDE_AUDIO_POWER_SHIFT_CPT); 1620 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1621 port_name(port)); 1622 } 1623 1624 if (pch_iir & SDE_AUX_MASK_CPT) 1625 dp_aux_irq_handler(dev); 1626 1627 if (pch_iir & SDE_GMBUS_CPT) 1628 gmbus_irq_handler(dev); 1629 1630 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 1631 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 1632 1633 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 1634 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 1635 1636 if (pch_iir & SDE_FDI_MASK_CPT) 1637 for_each_pipe(pipe) 1638 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1639 pipe_name(pipe), 1640 I915_READ(FDI_RX_IIR(pipe))); 1641 1642 if (pch_iir & SDE_ERROR_CPT) 1643 cpt_serr_int_handler(dev); 1644 } 1645 1646 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 1647 { 1648 struct drm_i915_private *dev_priv = dev->dev_private; 1649 enum i915_pipe pipe; 1650 1651 if (de_iir & DE_AUX_CHANNEL_A) 1652 dp_aux_irq_handler(dev); 1653 1654 if (de_iir & DE_GSE) 1655 intel_opregion_asle_intr(dev); 1656 1657 if (de_iir & DE_POISON) 1658 DRM_ERROR("Poison interrupt\n"); 1659 1660 for_each_pipe(pipe) { 1661 if (de_iir & DE_PIPE_VBLANK(pipe)) 1662 drm_handle_vblank(dev, pipe); 1663 1664 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 1665 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1666 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1667 pipe_name(pipe)); 1668 1669 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 1670 i9xx_pipe_crc_irq_handler(dev, pipe); 1671 1672 /* plane/pipes map 1:1 on ilk+ */ 1673 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 1674 intel_prepare_page_flip(dev, pipe); 1675 intel_finish_page_flip_plane(dev, pipe); 1676 } 1677 } 1678 1679 /* check event from PCH */ 1680 if (de_iir & DE_PCH_EVENT) { 1681 u32 pch_iir = I915_READ(SDEIIR); 1682 1683 if (HAS_PCH_CPT(dev)) 1684 cpt_irq_handler(dev, pch_iir); 1685 else 1686 ibx_irq_handler(dev, pch_iir); 1687 1688 /* should clear PCH hotplug event before clear CPU irq */ 1689 I915_WRITE(SDEIIR, pch_iir); 1690 } 1691 1692 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 1693 ironlake_rps_change_irq_handler(dev); 1694 } 1695 1696 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 1697 { 1698 struct drm_i915_private *dev_priv = dev->dev_private; 1699 enum i915_pipe i; 1700 1701 if (de_iir & DE_ERR_INT_IVB) 1702 ivb_err_int_handler(dev); 1703 1704 if (de_iir & DE_AUX_CHANNEL_A_IVB) 1705 dp_aux_irq_handler(dev); 1706 1707 if (de_iir & DE_GSE_IVB) 1708 intel_opregion_asle_intr(dev); 1709 1710 for_each_pipe(i) { 1711 if (de_iir & (DE_PIPE_VBLANK_IVB(i))) 1712 drm_handle_vblank(dev, i); 1713 1714 /* plane/pipes map 1:1 on ilk+ */ 1715 if (de_iir & DE_PLANE_FLIP_DONE_IVB(i)) { 1716 intel_prepare_page_flip(dev, i); 1717 intel_finish_page_flip_plane(dev, i); 1718 } 1719 } 1720 1721 /* check event from PCH */ 1722 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 1723 u32 pch_iir = I915_READ(SDEIIR); 1724 1725 cpt_irq_handler(dev, pch_iir); 1726 1727 /* clear PCH hotplug event before clear CPU irq */ 1728 I915_WRITE(SDEIIR, pch_iir); 1729 } 1730 } 1731 1732 static irqreturn_t ironlake_irq_handler(void *arg) 1733 { 1734 struct drm_device *dev = (struct drm_device *) arg; 1735 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1736 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 1737 1738 atomic_inc(&dev_priv->irq_received); 1739 1740 /* We get interrupts on unclaimed registers, so check for this before we 1741 * do any I915_{READ,WRITE}. */ 1742 intel_uncore_check_errors(dev); 1743 1744 /* disable master interrupt before clearing iir */ 1745 de_ier = I915_READ(DEIER); 1746 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 1747 POSTING_READ(DEIER); 1748 1749 /* Disable south interrupts. We'll only write to SDEIIR once, so further 1750 * interrupts will will be stored on its back queue, and then we'll be 1751 * able to process them after we restore SDEIER (as soon as we restore 1752 * it, we'll get an interrupt if SDEIIR still has something to process 1753 * due to its back queue). */ 1754 if (!HAS_PCH_NOP(dev)) { 1755 sde_ier = I915_READ(SDEIER); 1756 I915_WRITE(SDEIER, 0); 1757 POSTING_READ(SDEIER); 1758 } 1759 1760 gt_iir = I915_READ(GTIIR); 1761 if (gt_iir) { 1762 if (INTEL_INFO(dev)->gen >= 6) 1763 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1764 else 1765 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 1766 I915_WRITE(GTIIR, gt_iir); 1767 } 1768 1769 de_iir = I915_READ(DEIIR); 1770 if (de_iir) { 1771 if (INTEL_INFO(dev)->gen >= 7) 1772 ivb_display_irq_handler(dev, de_iir); 1773 else 1774 ilk_display_irq_handler(dev, de_iir); 1775 I915_WRITE(DEIIR, de_iir); 1776 } 1777 1778 if (INTEL_INFO(dev)->gen >= 6) { 1779 u32 pm_iir = I915_READ(GEN6_PMIIR); 1780 if (pm_iir) { 1781 gen6_rps_irq_handler(dev_priv, pm_iir); 1782 I915_WRITE(GEN6_PMIIR, pm_iir); 1783 } 1784 } 1785 1786 I915_WRITE(DEIER, de_ier); 1787 POSTING_READ(DEIER); 1788 if (!HAS_PCH_NOP(dev)) { 1789 I915_WRITE(SDEIER, sde_ier); 1790 POSTING_READ(SDEIER); 1791 } 1792 1793 } 1794 1795 static irqreturn_t gen8_irq_handler(void *arg) 1796 { 1797 struct drm_device *dev = arg; 1798 struct drm_i915_private *dev_priv = dev->dev_private; 1799 u32 master_ctl; 1800 uint32_t tmp = 0; 1801 enum i915_pipe pipe; 1802 1803 atomic_inc(&dev_priv->irq_received); 1804 1805 master_ctl = I915_READ(GEN8_MASTER_IRQ); 1806 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 1807 if (!master_ctl) 1808 return; 1809 1810 I915_WRITE(GEN8_MASTER_IRQ, 0); 1811 POSTING_READ(GEN8_MASTER_IRQ); 1812 1813 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1814 1815 if (master_ctl & GEN8_DE_MISC_IRQ) { 1816 tmp = I915_READ(GEN8_DE_MISC_IIR); 1817 if (tmp & GEN8_DE_MISC_GSE) 1818 intel_opregion_asle_intr(dev); 1819 else if (tmp) 1820 DRM_ERROR("Unexpected DE Misc interrupt\n"); 1821 else 1822 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 1823 1824 if (tmp) { 1825 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 1826 } 1827 } 1828 1829 if (master_ctl & GEN8_DE_PORT_IRQ) { 1830 tmp = I915_READ(GEN8_DE_PORT_IIR); 1831 if (tmp & GEN8_AUX_CHANNEL_A) 1832 dp_aux_irq_handler(dev); 1833 else if (tmp) 1834 DRM_ERROR("Unexpected DE Port interrupt\n"); 1835 else 1836 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 1837 1838 if (tmp) { 1839 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 1840 } 1841 } 1842 1843 for_each_pipe(pipe) { 1844 uint32_t pipe_iir; 1845 1846 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 1847 continue; 1848 1849 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 1850 if (pipe_iir & GEN8_PIPE_VBLANK) 1851 drm_handle_vblank(dev, pipe); 1852 1853 if (pipe_iir & GEN8_PIPE_FLIP_DONE) { 1854 intel_prepare_page_flip(dev, pipe); 1855 intel_finish_page_flip_plane(dev, pipe); 1856 } 1857 1858 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 1859 hsw_pipe_crc_irq_handler(dev, pipe); 1860 1861 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 1862 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1863 false)) 1864 DRM_DEBUG_DRIVER("Pipe %c FIFO underrun\n", 1865 pipe_name(pipe)); 1866 } 1867 1868 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 1869 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 1870 pipe_name(pipe), 1871 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 1872 } 1873 1874 if (pipe_iir) { 1875 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 1876 } else 1877 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 1878 } 1879 1880 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 1881 /* 1882 * FIXME(BDW): Assume for now that the new interrupt handling 1883 * scheme also closed the SDE interrupt handling race we've seen 1884 * on older pch-split platforms. But this needs testing. 1885 */ 1886 u32 pch_iir = I915_READ(SDEIIR); 1887 1888 cpt_irq_handler(dev, pch_iir); 1889 1890 if (pch_iir) { 1891 I915_WRITE(SDEIIR, pch_iir); 1892 } 1893 } 1894 1895 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 1896 POSTING_READ(GEN8_MASTER_IRQ); 1897 1898 } 1899 1900 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 1901 bool reset_completed) 1902 { 1903 struct intel_ring_buffer *ring; 1904 int i; 1905 1906 /* 1907 * Notify all waiters for GPU completion events that reset state has 1908 * been changed, and that they need to restart their wait after 1909 * checking for potential errors (and bail out to drop locks if there is 1910 * a gpu reset pending so that i915_error_work_func can acquire them). 1911 */ 1912 1913 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 1914 for_each_ring(ring, dev_priv, i) 1915 wake_up_all(&ring->irq_queue); 1916 1917 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 1918 wake_up_all(&dev_priv->pending_flip_queue); 1919 1920 /* 1921 * Signal tasks blocked in i915_gem_wait_for_error that the pending 1922 * reset state is cleared. 1923 */ 1924 if (reset_completed) 1925 wake_up_all(&dev_priv->gpu_error.reset_queue); 1926 } 1927 1928 /** 1929 * i915_error_work_func - do process context error handling work 1930 * @work: work struct 1931 * 1932 * Fire an error uevent so userspace can see that a hang or error 1933 * was detected. 1934 */ 1935 static void i915_error_work_func(struct work_struct *work) 1936 { 1937 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 1938 work); 1939 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, 1940 gpu_error); 1941 struct drm_device *dev = dev_priv->dev; 1942 #if 0 1943 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 1944 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 1945 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 1946 #endif 1947 int ret; 1948 1949 /* kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); */ 1950 1951 /* 1952 * Note that there's only one work item which does gpu resets, so we 1953 * need not worry about concurrent gpu resets potentially incrementing 1954 * error->reset_counter twice. We only need to take care of another 1955 * racing irq/hangcheck declaring the gpu dead for a second time. A 1956 * quick check for that is good enough: schedule_work ensures the 1957 * correct ordering between hang detection and this work item, and since 1958 * the reset in-progress bit is only ever set by code outside of this 1959 * work we don't need to worry about any other races. 1960 */ 1961 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 1962 DRM_DEBUG_DRIVER("resetting chip\n"); 1963 #if 0 1964 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 1965 reset_event); 1966 #endif 1967 1968 /* 1969 * All state reset _must_ be completed before we update the 1970 * reset counter, for otherwise waiters might miss the reset 1971 * pending state and not properly drop locks, resulting in 1972 * deadlocks with the reset work. 1973 */ 1974 ret = i915_reset(dev); 1975 1976 intel_display_handle_reset(dev); 1977 1978 if (ret == 0) { 1979 /* 1980 * After all the gem state is reset, increment the reset 1981 * counter and wake up everyone waiting for the reset to 1982 * complete. 1983 * 1984 * Since unlock operations are a one-sided barrier only, 1985 * we need to insert a barrier here to order any seqno 1986 * updates before 1987 * the counter increment. 1988 */ 1989 cpu_sfence(); 1990 atomic_inc(&dev_priv->gpu_error.reset_counter); 1991 1992 #if 0 1993 kobject_uevent_env(&dev->primary->kdev->kobj, 1994 KOBJ_CHANGE, reset_done_event); 1995 #endif 1996 } else { 1997 atomic_set_mask(I915_WEDGED, &error->reset_counter); 1998 } 1999 2000 /* 2001 * Note: The wake_up also serves as a memory barrier so that 2002 * waiters see the update value of the reset counter atomic_t. 2003 */ 2004 i915_error_wake_up(dev_priv, true); 2005 } 2006 } 2007 2008 static void i915_report_and_clear_eir(struct drm_device *dev) 2009 { 2010 struct drm_i915_private *dev_priv = dev->dev_private; 2011 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2012 u32 eir = I915_READ(EIR); 2013 int pipe, i; 2014 2015 if (!eir) 2016 return; 2017 2018 pr_err("render error detected, EIR: 0x%08x\n", eir); 2019 2020 #if 0 2021 i915_get_extra_instdone(dev, instdone); 2022 #endif 2023 2024 if (IS_G4X(dev)) { 2025 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2026 u32 ipeir = I915_READ(IPEIR_I965); 2027 2028 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2029 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2030 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2031 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2032 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2033 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2034 I915_WRITE(IPEIR_I965, ipeir); 2035 POSTING_READ(IPEIR_I965); 2036 } 2037 if (eir & GM45_ERROR_PAGE_TABLE) { 2038 u32 pgtbl_err = I915_READ(PGTBL_ER); 2039 pr_err("page table error\n"); 2040 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2041 I915_WRITE(PGTBL_ER, pgtbl_err); 2042 POSTING_READ(PGTBL_ER); 2043 } 2044 } 2045 2046 if (!IS_GEN2(dev)) { 2047 if (eir & I915_ERROR_PAGE_TABLE) { 2048 u32 pgtbl_err = I915_READ(PGTBL_ER); 2049 pr_err("page table error\n"); 2050 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2051 I915_WRITE(PGTBL_ER, pgtbl_err); 2052 POSTING_READ(PGTBL_ER); 2053 } 2054 } 2055 2056 if (eir & I915_ERROR_MEMORY_REFRESH) { 2057 pr_err("memory refresh error:\n"); 2058 for_each_pipe(pipe) 2059 pr_err("pipe %c stat: 0x%08x\n", 2060 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2061 /* pipestat has already been acked */ 2062 } 2063 if (eir & I915_ERROR_INSTRUCTION) { 2064 pr_err("instruction error\n"); 2065 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2066 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2067 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2068 if (INTEL_INFO(dev)->gen < 4) { 2069 u32 ipeir = I915_READ(IPEIR); 2070 2071 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2072 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2073 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2074 I915_WRITE(IPEIR, ipeir); 2075 POSTING_READ(IPEIR); 2076 } else { 2077 u32 ipeir = I915_READ(IPEIR_I965); 2078 2079 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2080 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2081 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2082 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2083 I915_WRITE(IPEIR_I965, ipeir); 2084 POSTING_READ(IPEIR_I965); 2085 } 2086 } 2087 2088 I915_WRITE(EIR, eir); 2089 POSTING_READ(EIR); 2090 eir = I915_READ(EIR); 2091 if (eir) { 2092 /* 2093 * some errors might have become stuck, 2094 * mask them. 2095 */ 2096 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2097 I915_WRITE(EMR, I915_READ(EMR) | eir); 2098 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2099 } 2100 } 2101 2102 /** 2103 * i915_handle_error - handle an error interrupt 2104 * @dev: drm device 2105 * 2106 * Do some basic checking of regsiter state at error interrupt time and 2107 * dump it to the syslog. Also call i915_capture_error_state() to make 2108 * sure we get a record and make it available in debugfs. Fire a uevent 2109 * so userspace knows something bad happened (should trigger collection 2110 * of a ring dump etc.). 2111 */ 2112 void i915_handle_error(struct drm_device *dev, bool wedged) 2113 { 2114 struct drm_i915_private *dev_priv = dev->dev_private; 2115 2116 #if 0 2117 i915_capture_error_state(dev); 2118 #endif 2119 i915_report_and_clear_eir(dev); 2120 2121 if (wedged) { 2122 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2123 &dev_priv->gpu_error.reset_counter); 2124 2125 /* 2126 * Wakeup waiting processes so that the reset work function 2127 * i915_error_work_func doesn't deadlock trying to grab various 2128 * locks. By bumping the reset counter first, the woken 2129 * processes will see a reset in progress and back off, 2130 * releasing their locks and then wait for the reset completion. 2131 * We must do this for _all_ gpu waiters that might hold locks 2132 * that the reset work needs to acquire. 2133 * 2134 * Note: The wake_up serves as the required memory barrier to 2135 * ensure that the waiters see the updated value of the reset 2136 * counter atomic_t. 2137 */ 2138 i915_error_wake_up(dev_priv, false); 2139 } 2140 2141 /* 2142 * Our reset work can grab modeset locks (since it needs to reset the 2143 * state of outstanding pagelips). Hence it must not be run on our own 2144 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2145 * code will deadlock. 2146 */ 2147 schedule_work(&dev_priv->gpu_error.work); 2148 } 2149 2150 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2151 { 2152 drm_i915_private_t *dev_priv = dev->dev_private; 2153 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2154 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2155 struct drm_i915_gem_object *obj; 2156 struct intel_unpin_work *work; 2157 bool stall_detected; 2158 2159 /* Ignore early vblank irqs */ 2160 if (intel_crtc == NULL) 2161 return; 2162 2163 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2164 work = intel_crtc->unpin_work; 2165 2166 if (work == NULL || 2167 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2168 !work->enable_stall_check) { 2169 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2170 lockmgr(&dev->event_lock, LK_RELEASE); 2171 return; 2172 } 2173 2174 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2175 obj = work->pending_flip_obj; 2176 if (INTEL_INFO(dev)->gen >= 4) { 2177 int dspsurf = DSPSURF(intel_crtc->plane); 2178 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2179 i915_gem_obj_ggtt_offset(obj); 2180 } else { 2181 int dspaddr = DSPADDR(intel_crtc->plane); 2182 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2183 crtc->y * crtc->fb->pitches[0] + 2184 crtc->x * crtc->fb->bits_per_pixel/8); 2185 } 2186 2187 lockmgr(&dev->event_lock, LK_RELEASE); 2188 2189 if (stall_detected) { 2190 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2191 intel_prepare_page_flip(dev, intel_crtc->plane); 2192 } 2193 } 2194 2195 /* Called from drm generic code, passed 'crtc' which 2196 * we use as a pipe index 2197 */ 2198 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2199 { 2200 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2201 2202 if (!i915_pipe_enabled(dev, pipe)) 2203 return -EINVAL; 2204 2205 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2206 if (INTEL_INFO(dev)->gen >= 4) 2207 i915_enable_pipestat(dev_priv, pipe, 2208 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2209 else 2210 i915_enable_pipestat(dev_priv, pipe, 2211 PIPE_VBLANK_INTERRUPT_ENABLE); 2212 2213 /* maintain vblank delivery even in deep C-states */ 2214 if (dev_priv->info->gen == 3) 2215 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); 2216 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2217 2218 return 0; 2219 } 2220 2221 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2222 { 2223 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2224 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2225 DE_PIPE_VBLANK(pipe); 2226 2227 if (!i915_pipe_enabled(dev, pipe)) 2228 return -EINVAL; 2229 2230 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2231 ironlake_enable_display_irq(dev_priv, bit); 2232 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2233 2234 return 0; 2235 } 2236 2237 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2238 { 2239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2240 u32 imr; 2241 2242 if (!i915_pipe_enabled(dev, pipe)) 2243 return -EINVAL; 2244 2245 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2246 imr = I915_READ(VLV_IMR); 2247 if (pipe == PIPE_A) 2248 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2249 else 2250 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2251 I915_WRITE(VLV_IMR, imr); 2252 i915_enable_pipestat(dev_priv, pipe, 2253 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2254 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2255 2256 return 0; 2257 } 2258 2259 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2260 { 2261 struct drm_i915_private *dev_priv = dev->dev_private; 2262 2263 if (!i915_pipe_enabled(dev, pipe)) 2264 return -EINVAL; 2265 2266 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2267 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2268 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2269 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2270 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2271 return 0; 2272 } 2273 2274 /* Called from drm generic code, passed 'crtc' which 2275 * we use as a pipe index 2276 */ 2277 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2278 { 2279 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2280 2281 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2282 if (dev_priv->info->gen == 3) 2283 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); 2284 2285 i915_disable_pipestat(dev_priv, pipe, 2286 PIPE_VBLANK_INTERRUPT_ENABLE | 2287 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2288 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2289 } 2290 2291 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2292 { 2293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2294 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2295 DE_PIPE_VBLANK(pipe); 2296 2297 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2298 ironlake_disable_display_irq(dev_priv, bit); 2299 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2300 } 2301 2302 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2303 { 2304 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2305 u32 imr; 2306 2307 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2308 i915_disable_pipestat(dev_priv, pipe, 2309 PIPE_START_VBLANK_INTERRUPT_ENABLE); 2310 imr = I915_READ(VLV_IMR); 2311 if (pipe == PIPE_A) 2312 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 2313 else 2314 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2315 I915_WRITE(VLV_IMR, imr); 2316 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2317 } 2318 2319 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2320 { 2321 struct drm_i915_private *dev_priv = dev->dev_private; 2322 2323 if (!i915_pipe_enabled(dev, pipe)) 2324 return; 2325 2326 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2327 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2328 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2329 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2330 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2331 } 2332 2333 static u32 2334 ring_last_seqno(struct intel_ring_buffer *ring) 2335 { 2336 return list_entry(ring->request_list.prev, 2337 struct drm_i915_gem_request, list)->seqno; 2338 } 2339 2340 static bool 2341 ring_idle(struct intel_ring_buffer *ring, u32 seqno) 2342 { 2343 return (list_empty(&ring->request_list) || 2344 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2345 } 2346 2347 static struct intel_ring_buffer * 2348 semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) 2349 { 2350 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2351 u32 cmd, ipehr, acthd, acthd_min; 2352 2353 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2354 if ((ipehr & ~(0x3 << 16)) != 2355 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) 2356 return NULL; 2357 2358 /* ACTHD is likely pointing to the dword after the actual command, 2359 * so scan backwards until we find the MBOX. 2360 */ 2361 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; 2362 acthd_min = max((int)acthd - 3 * 4, 0); 2363 do { 2364 cmd = ioread32(ring->virtual_start + acthd); 2365 if (cmd == ipehr) 2366 break; 2367 2368 acthd -= 4; 2369 if (acthd < acthd_min) 2370 return NULL; 2371 } while (1); 2372 2373 *seqno = ioread32(ring->virtual_start+acthd+4)+1; 2374 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; 2375 } 2376 2377 static int semaphore_passed(struct intel_ring_buffer *ring) 2378 { 2379 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2380 struct intel_ring_buffer *signaller; 2381 u32 seqno, ctl; 2382 2383 ring->hangcheck.deadlock = true; 2384 2385 signaller = semaphore_waits_for(ring, &seqno); 2386 if (signaller == NULL || signaller->hangcheck.deadlock) 2387 return -1; 2388 2389 /* cursory check for an unkickable deadlock */ 2390 ctl = I915_READ_CTL(signaller); 2391 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) 2392 return -1; 2393 2394 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); 2395 } 2396 2397 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2398 { 2399 struct intel_ring_buffer *ring; 2400 int i; 2401 2402 for_each_ring(ring, dev_priv, i) 2403 ring->hangcheck.deadlock = false; 2404 } 2405 2406 static enum intel_ring_hangcheck_action 2407 ring_stuck(struct intel_ring_buffer *ring, u32 acthd) 2408 { 2409 struct drm_device *dev = ring->dev; 2410 struct drm_i915_private *dev_priv = dev->dev_private; 2411 u32 tmp; 2412 2413 if (ring->hangcheck.acthd != acthd) 2414 return HANGCHECK_ACTIVE; 2415 2416 if (IS_GEN2(dev)) 2417 return HANGCHECK_HUNG; 2418 2419 /* Is the chip hanging on a WAIT_FOR_EVENT? 2420 * If so we can simply poke the RB_WAIT bit 2421 * and break the hang. This should work on 2422 * all but the second generation chipsets. 2423 */ 2424 tmp = I915_READ_CTL(ring); 2425 if (tmp & RING_WAIT) { 2426 DRM_ERROR("Kicking stuck wait on %s\n", 2427 ring->name); 2428 i915_handle_error(dev, false); 2429 I915_WRITE_CTL(ring, tmp); 2430 return HANGCHECK_KICK; 2431 } 2432 2433 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2434 switch (semaphore_passed(ring)) { 2435 default: 2436 return HANGCHECK_HUNG; 2437 case 1: 2438 DRM_ERROR("Kicking stuck semaphore on %s\n", 2439 ring->name); 2440 i915_handle_error(dev, false); 2441 I915_WRITE_CTL(ring, tmp); 2442 return HANGCHECK_KICK; 2443 case 0: 2444 return HANGCHECK_WAIT; 2445 } 2446 } 2447 2448 return HANGCHECK_HUNG; 2449 } 2450 2451 /** 2452 * This is called when the chip hasn't reported back with completed 2453 * batchbuffers in a long time. We keep track per ring seqno progress and 2454 * if there are no progress, hangcheck score for that ring is increased. 2455 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2456 * we kick the ring. If we see no progress on three subsequent calls 2457 * we assume chip is wedged and try to fix it by resetting the chip. 2458 */ 2459 static void i915_hangcheck_elapsed(unsigned long data) 2460 { 2461 struct drm_device *dev = (struct drm_device *)data; 2462 drm_i915_private_t *dev_priv = dev->dev_private; 2463 struct intel_ring_buffer *ring; 2464 int i; 2465 int busy_count = 0, rings_hung = 0; 2466 bool stuck[I915_NUM_RINGS] = { 0 }; 2467 #define BUSY 1 2468 #define KICK 5 2469 #define HUNG 20 2470 #define FIRE 30 2471 2472 if (!i915_enable_hangcheck) 2473 return; 2474 2475 for_each_ring(ring, dev_priv, i) { 2476 u32 seqno, acthd; 2477 bool busy = true; 2478 2479 semaphore_clear_deadlocks(dev_priv); 2480 2481 seqno = ring->get_seqno(ring, false); 2482 acthd = intel_ring_get_active_head(ring); 2483 2484 if (ring->hangcheck.seqno == seqno) { 2485 if (ring_idle(ring, seqno)) { 2486 ring->hangcheck.action = HANGCHECK_IDLE; 2487 2488 if (waitqueue_active(&ring->irq_queue)) { 2489 /* Issue a wake-up to catch stuck h/w. */ 2490 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2491 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2492 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2493 ring->name); 2494 else 2495 DRM_INFO("Fake missed irq on %s\n", 2496 ring->name); 2497 wake_up_all(&ring->irq_queue); 2498 } 2499 /* Safeguard against driver failure */ 2500 ring->hangcheck.score += BUSY; 2501 } else 2502 busy = false; 2503 } else { 2504 /* We always increment the hangcheck score 2505 * if the ring is busy and still processing 2506 * the same request, so that no single request 2507 * can run indefinitely (such as a chain of 2508 * batches). The only time we do not increment 2509 * the hangcheck score on this ring, if this 2510 * ring is in a legitimate wait for another 2511 * ring. In that case the waiting ring is a 2512 * victim and we want to be sure we catch the 2513 * right culprit. Then every time we do kick 2514 * the ring, add a small increment to the 2515 * score so that we can catch a batch that is 2516 * being repeatedly kicked and so responsible 2517 * for stalling the machine. 2518 */ 2519 ring->hangcheck.action = ring_stuck(ring, 2520 acthd); 2521 2522 switch (ring->hangcheck.action) { 2523 case HANGCHECK_IDLE: 2524 case HANGCHECK_WAIT: 2525 break; 2526 case HANGCHECK_ACTIVE: 2527 ring->hangcheck.score += BUSY; 2528 break; 2529 case HANGCHECK_KICK: 2530 ring->hangcheck.score += KICK; 2531 break; 2532 case HANGCHECK_HUNG: 2533 ring->hangcheck.score += HUNG; 2534 stuck[i] = true; 2535 break; 2536 } 2537 } 2538 } else { 2539 ring->hangcheck.action = HANGCHECK_ACTIVE; 2540 2541 /* Gradually reduce the count so that we catch DoS 2542 * attempts across multiple batches. 2543 */ 2544 if (ring->hangcheck.score > 0) 2545 ring->hangcheck.score--; 2546 } 2547 2548 ring->hangcheck.seqno = seqno; 2549 ring->hangcheck.acthd = acthd; 2550 busy_count += busy; 2551 } 2552 2553 for_each_ring(ring, dev_priv, i) { 2554 if (ring->hangcheck.score > FIRE) { 2555 DRM_INFO("%s on %s\n", 2556 stuck[i] ? "stuck" : "no progress", 2557 ring->name); 2558 rings_hung++; 2559 } 2560 } 2561 2562 if (rings_hung) 2563 return i915_handle_error(dev, true); 2564 2565 if (busy_count) 2566 /* Reset timer case chip hangs without another request 2567 * being added */ 2568 i915_queue_hangcheck(dev); 2569 } 2570 2571 void i915_queue_hangcheck(struct drm_device *dev) 2572 { 2573 struct drm_i915_private *dev_priv = dev->dev_private; 2574 if (!i915_enable_hangcheck) 2575 return; 2576 2577 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 2578 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 2579 } 2580 2581 static void ibx_irq_preinstall(struct drm_device *dev) 2582 { 2583 struct drm_i915_private *dev_priv = dev->dev_private; 2584 2585 if (HAS_PCH_NOP(dev)) 2586 return; 2587 2588 /* south display irq */ 2589 I915_WRITE(SDEIMR, 0xffffffff); 2590 /* 2591 * SDEIER is also touched by the interrupt handler to work around missed 2592 * PCH interrupts. Hence we can't update it after the interrupt handler 2593 * is enabled - instead we unconditionally enable all PCH interrupt 2594 * sources here, but then only unmask them as needed with SDEIMR. 2595 */ 2596 I915_WRITE(SDEIER, 0xffffffff); 2597 POSTING_READ(SDEIER); 2598 } 2599 2600 static void gen5_gt_irq_preinstall(struct drm_device *dev) 2601 { 2602 struct drm_i915_private *dev_priv = dev->dev_private; 2603 2604 /* and GT */ 2605 I915_WRITE(GTIMR, 0xffffffff); 2606 I915_WRITE(GTIER, 0x0); 2607 POSTING_READ(GTIER); 2608 2609 if (INTEL_INFO(dev)->gen >= 6) { 2610 /* and PM */ 2611 I915_WRITE(GEN6_PMIMR, 0xffffffff); 2612 I915_WRITE(GEN6_PMIER, 0x0); 2613 POSTING_READ(GEN6_PMIER); 2614 } 2615 } 2616 2617 /* drm_dma.h hooks 2618 */ 2619 static void ironlake_irq_preinstall(struct drm_device *dev) 2620 { 2621 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2622 2623 atomic_set(&dev_priv->irq_received, 0); 2624 2625 I915_WRITE(HWSTAM, 0xeffe); 2626 2627 I915_WRITE(DEIMR, 0xffffffff); 2628 I915_WRITE(DEIER, 0x0); 2629 POSTING_READ(DEIER); 2630 2631 gen5_gt_irq_preinstall(dev); 2632 2633 ibx_irq_preinstall(dev); 2634 } 2635 2636 static void valleyview_irq_preinstall(struct drm_device *dev) 2637 { 2638 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2639 int pipe; 2640 2641 atomic_set(&dev_priv->irq_received, 0); 2642 2643 /* VLV magic */ 2644 I915_WRITE(VLV_IMR, 0); 2645 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 2646 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 2647 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 2648 2649 /* and GT */ 2650 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2651 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2652 2653 gen5_gt_irq_preinstall(dev); 2654 2655 I915_WRITE(DPINVGTT, 0xff); 2656 2657 I915_WRITE(PORT_HOTPLUG_EN, 0); 2658 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 2659 for_each_pipe(pipe) 2660 I915_WRITE(PIPESTAT(pipe), 0xffff); 2661 I915_WRITE(VLV_IIR, 0xffffffff); 2662 I915_WRITE(VLV_IMR, 0xffffffff); 2663 I915_WRITE(VLV_IER, 0x0); 2664 POSTING_READ(VLV_IER); 2665 } 2666 2667 static void gen8_irq_preinstall(struct drm_device *dev) 2668 { 2669 struct drm_i915_private *dev_priv = dev->dev_private; 2670 int pipe; 2671 2672 atomic_set(&dev_priv->irq_received, 0); 2673 2674 I915_WRITE(GEN8_MASTER_IRQ, 0); 2675 POSTING_READ(GEN8_MASTER_IRQ); 2676 2677 /* IIR can theoretically queue up two events. Be paranoid */ 2678 #define GEN8_IRQ_INIT_NDX(type, which) do { \ 2679 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 2680 POSTING_READ(GEN8_##type##_IMR(which)); \ 2681 I915_WRITE(GEN8_##type##_IER(which), 0); \ 2682 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2683 POSTING_READ(GEN8_##type##_IIR(which)); \ 2684 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 2685 } while (0) 2686 2687 #define GEN8_IRQ_INIT(type) do { \ 2688 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 2689 POSTING_READ(GEN8_##type##_IMR); \ 2690 I915_WRITE(GEN8_##type##_IER, 0); \ 2691 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2692 POSTING_READ(GEN8_##type##_IIR); \ 2693 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 2694 } while (0) 2695 2696 GEN8_IRQ_INIT_NDX(GT, 0); 2697 GEN8_IRQ_INIT_NDX(GT, 1); 2698 GEN8_IRQ_INIT_NDX(GT, 2); 2699 GEN8_IRQ_INIT_NDX(GT, 3); 2700 2701 for_each_pipe(pipe) { 2702 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe); 2703 } 2704 2705 GEN8_IRQ_INIT(DE_PORT); 2706 GEN8_IRQ_INIT(DE_MISC); 2707 GEN8_IRQ_INIT(PCU); 2708 #undef GEN8_IRQ_INIT 2709 #undef GEN8_IRQ_INIT_NDX 2710 2711 POSTING_READ(GEN8_PCU_IIR); 2712 2713 ibx_irq_preinstall(dev); 2714 } 2715 2716 static void ibx_hpd_irq_setup(struct drm_device *dev) 2717 { 2718 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2719 struct drm_mode_config *mode_config = &dev->mode_config; 2720 struct intel_encoder *intel_encoder; 2721 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 2722 2723 if (HAS_PCH_IBX(dev)) { 2724 hotplug_irqs = SDE_HOTPLUG_MASK; 2725 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2726 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2727 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 2728 } else { 2729 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 2730 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 2731 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 2732 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 2733 } 2734 2735 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 2736 2737 /* 2738 * Enable digital hotplug on the PCH, and configure the DP short pulse 2739 * duration to 2ms (which is the minimum in the Display Port spec) 2740 * 2741 * This register is the same on all known PCH chips. 2742 */ 2743 hotplug = I915_READ(PCH_PORT_HOTPLUG); 2744 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 2745 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 2746 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 2747 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 2748 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 2749 } 2750 2751 static void ibx_irq_postinstall(struct drm_device *dev) 2752 { 2753 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2754 u32 mask; 2755 2756 if (HAS_PCH_NOP(dev)) 2757 return; 2758 2759 if (HAS_PCH_IBX(dev)) { 2760 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 2761 } else { 2762 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 2763 2764 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 2765 } 2766 2767 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 2768 I915_WRITE(SDEIMR, ~mask); 2769 } 2770 2771 static void gen5_gt_irq_postinstall(struct drm_device *dev) 2772 { 2773 struct drm_i915_private *dev_priv = dev->dev_private; 2774 u32 pm_irqs, gt_irqs; 2775 2776 pm_irqs = gt_irqs = 0; 2777 2778 dev_priv->gt_irq_mask = ~0; 2779 if (HAS_L3_DPF(dev)) { 2780 /* L3 parity interrupt is always unmasked. */ 2781 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 2782 gt_irqs |= GT_PARITY_ERROR(dev); 2783 } 2784 2785 gt_irqs |= GT_RENDER_USER_INTERRUPT; 2786 if (IS_GEN5(dev)) { 2787 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 2788 ILK_BSD_USER_INTERRUPT; 2789 } else { 2790 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 2791 } 2792 2793 I915_WRITE(GTIIR, I915_READ(GTIIR)); 2794 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 2795 I915_WRITE(GTIER, gt_irqs); 2796 POSTING_READ(GTIER); 2797 2798 if (INTEL_INFO(dev)->gen >= 6) { 2799 pm_irqs |= GEN6_PM_RPS_EVENTS; 2800 2801 if (HAS_VEBOX(dev)) 2802 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 2803 2804 dev_priv->pm_irq_mask = 0xffffffff; 2805 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); 2806 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 2807 I915_WRITE(GEN6_PMIER, pm_irqs); 2808 POSTING_READ(GEN6_PMIER); 2809 } 2810 } 2811 2812 static int ironlake_irq_postinstall(struct drm_device *dev) 2813 { 2814 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2815 u32 display_mask, extra_mask; 2816 2817 if (INTEL_INFO(dev)->gen >= 7) { 2818 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 2819 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 2820 DE_PLANEB_FLIP_DONE_IVB | 2821 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 2822 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 2823 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 2824 2825 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 2826 } else { 2827 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 2828 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 2829 DE_AUX_CHANNEL_A | 2830 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 2831 DE_POISON); 2832 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 2833 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 2834 } 2835 2836 dev_priv->irq_mask = ~display_mask; 2837 2838 /* should always can generate irq */ 2839 I915_WRITE(DEIIR, I915_READ(DEIIR)); 2840 I915_WRITE(DEIMR, dev_priv->irq_mask); 2841 I915_WRITE(DEIER, display_mask | extra_mask); 2842 POSTING_READ(DEIER); 2843 2844 gen5_gt_irq_postinstall(dev); 2845 2846 ibx_irq_postinstall(dev); 2847 2848 if (IS_IRONLAKE_M(dev)) { 2849 /* Enable PCU event interrupts 2850 * 2851 * spinlocking not required here for correctness since interrupt 2852 * setup is guaranteed to run in single-threaded context. But we 2853 * need it to make the assert_spin_locked happy. */ 2854 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2855 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 2856 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2857 } 2858 2859 return 0; 2860 } 2861 2862 static int valleyview_irq_postinstall(struct drm_device *dev) 2863 { 2864 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 2865 u32 enable_mask; 2866 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV | 2867 PIPE_CRC_DONE_ENABLE; 2868 2869 enable_mask = I915_DISPLAY_PORT_INTERRUPT; 2870 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 2871 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2872 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 2873 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2874 2875 /* 2876 *Leave vblank interrupts masked initially. enable/disable will 2877 * toggle them based on usage. 2878 */ 2879 dev_priv->irq_mask = (~enable_mask) | 2880 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | 2881 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 2882 2883 I915_WRITE(PORT_HOTPLUG_EN, 0); 2884 POSTING_READ(PORT_HOTPLUG_EN); 2885 2886 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 2887 I915_WRITE(VLV_IER, enable_mask); 2888 I915_WRITE(VLV_IIR, 0xffffffff); 2889 I915_WRITE(PIPESTAT(0), 0xffff); 2890 I915_WRITE(PIPESTAT(1), 0xffff); 2891 POSTING_READ(VLV_IER); 2892 2893 /* Interrupt setup is already guaranteed to be single-threaded, this is 2894 * just to make the assert_spin_locked check happy. */ 2895 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2896 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_enable); 2897 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 2898 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_enable); 2899 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2900 2901 I915_WRITE(VLV_IIR, 0xffffffff); 2902 I915_WRITE(VLV_IIR, 0xffffffff); 2903 2904 gen5_gt_irq_postinstall(dev); 2905 2906 /* ack & enable invalid PTE error interrupts */ 2907 #if 0 /* FIXME: add support to irq handler for checking these bits */ 2908 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 2909 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 2910 #endif 2911 2912 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 2913 2914 return 0; 2915 } 2916 2917 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 2918 { 2919 int i; 2920 2921 /* These are interrupts we'll toggle with the ring mask register */ 2922 uint32_t gt_interrupts[] = { 2923 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 2924 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 2925 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 2926 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 2927 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 2928 0, 2929 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 2930 }; 2931 2932 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) { 2933 u32 tmp = I915_READ(GEN8_GT_IIR(i)); 2934 if (tmp) 2935 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2936 i, tmp); 2937 I915_WRITE(GEN8_GT_IMR(i), ~gt_interrupts[i]); 2938 I915_WRITE(GEN8_GT_IER(i), gt_interrupts[i]); 2939 } 2940 POSTING_READ(GEN8_GT_IER(0)); 2941 } 2942 2943 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 2944 { 2945 struct drm_device *dev = dev_priv->dev; 2946 uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE | 2947 GEN8_PIPE_CDCLK_CRC_DONE | 2948 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 2949 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 2950 GEN8_PIPE_FIFO_UNDERRUN; 2951 int pipe; 2952 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 2953 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 2954 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 2955 2956 for_each_pipe(pipe) { 2957 u32 tmp = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2958 if (tmp) 2959 DRM_ERROR("Interrupt (%d) should have been masked in pre-install 0x%08x\n", 2960 pipe, tmp); 2961 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2962 I915_WRITE(GEN8_DE_PIPE_IER(pipe), de_pipe_enables); 2963 } 2964 POSTING_READ(GEN8_DE_PIPE_ISR(0)); 2965 2966 I915_WRITE(GEN8_DE_PORT_IMR, ~GEN8_AUX_CHANNEL_A); 2967 I915_WRITE(GEN8_DE_PORT_IER, GEN8_AUX_CHANNEL_A); 2968 POSTING_READ(GEN8_DE_PORT_IER); 2969 } 2970 2971 static int gen8_irq_postinstall(struct drm_device *dev) 2972 { 2973 struct drm_i915_private *dev_priv = dev->dev_private; 2974 2975 gen8_gt_irq_postinstall(dev_priv); 2976 gen8_de_irq_postinstall(dev_priv); 2977 2978 ibx_irq_postinstall(dev); 2979 2980 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 2981 POSTING_READ(GEN8_MASTER_IRQ); 2982 2983 return 0; 2984 } 2985 2986 static void gen8_irq_uninstall(struct drm_device *dev) 2987 { 2988 struct drm_i915_private *dev_priv = dev->dev_private; 2989 int pipe; 2990 2991 if (!dev_priv) 2992 return; 2993 2994 atomic_set(&dev_priv->irq_received, 0); 2995 2996 I915_WRITE(GEN8_MASTER_IRQ, 0); 2997 2998 #define GEN8_IRQ_FINI_NDX(type, which) do { \ 2999 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3000 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3001 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3002 } while (0) 3003 3004 #define GEN8_IRQ_FINI(type) do { \ 3005 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3006 I915_WRITE(GEN8_##type##_IER, 0); \ 3007 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3008 } while (0) 3009 3010 GEN8_IRQ_FINI_NDX(GT, 0); 3011 GEN8_IRQ_FINI_NDX(GT, 1); 3012 GEN8_IRQ_FINI_NDX(GT, 2); 3013 GEN8_IRQ_FINI_NDX(GT, 3); 3014 3015 for_each_pipe(pipe) { 3016 GEN8_IRQ_FINI_NDX(DE_PIPE, pipe); 3017 } 3018 3019 GEN8_IRQ_FINI(DE_PORT); 3020 GEN8_IRQ_FINI(DE_MISC); 3021 GEN8_IRQ_FINI(PCU); 3022 #undef GEN8_IRQ_FINI 3023 #undef GEN8_IRQ_FINI_NDX 3024 3025 POSTING_READ(GEN8_PCU_IIR); 3026 } 3027 3028 static void valleyview_irq_uninstall(struct drm_device *dev) 3029 { 3030 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3031 int pipe; 3032 3033 if (!dev_priv) 3034 return; 3035 3036 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3037 3038 for_each_pipe(pipe) 3039 I915_WRITE(PIPESTAT(pipe), 0xffff); 3040 3041 I915_WRITE(HWSTAM, 0xffffffff); 3042 I915_WRITE(PORT_HOTPLUG_EN, 0); 3043 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3044 for_each_pipe(pipe) 3045 I915_WRITE(PIPESTAT(pipe), 0xffff); 3046 I915_WRITE(VLV_IIR, 0xffffffff); 3047 I915_WRITE(VLV_IMR, 0xffffffff); 3048 I915_WRITE(VLV_IER, 0x0); 3049 POSTING_READ(VLV_IER); 3050 } 3051 3052 static void ironlake_irq_uninstall(struct drm_device *dev) 3053 { 3054 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3055 3056 if (!dev_priv) 3057 return; 3058 3059 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3060 3061 I915_WRITE(HWSTAM, 0xffffffff); 3062 3063 I915_WRITE(DEIMR, 0xffffffff); 3064 I915_WRITE(DEIER, 0x0); 3065 I915_WRITE(DEIIR, I915_READ(DEIIR)); 3066 if (IS_GEN7(dev)) 3067 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); 3068 3069 I915_WRITE(GTIMR, 0xffffffff); 3070 I915_WRITE(GTIER, 0x0); 3071 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3072 3073 if (HAS_PCH_NOP(dev)) 3074 return; 3075 3076 I915_WRITE(SDEIMR, 0xffffffff); 3077 I915_WRITE(SDEIER, 0x0); 3078 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 3079 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3080 I915_WRITE(SERR_INT, I915_READ(SERR_INT)); 3081 } 3082 3083 static void i8xx_irq_preinstall(struct drm_device * dev) 3084 { 3085 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3086 int pipe; 3087 3088 atomic_set(&dev_priv->irq_received, 0); 3089 3090 for_each_pipe(pipe) 3091 I915_WRITE(PIPESTAT(pipe), 0); 3092 I915_WRITE16(IMR, 0xffff); 3093 I915_WRITE16(IER, 0x0); 3094 POSTING_READ16(IER); 3095 } 3096 3097 static int i8xx_irq_postinstall(struct drm_device *dev) 3098 { 3099 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3100 3101 I915_WRITE16(EMR, 3102 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3103 3104 /* Unmask the interrupts that we always want on. */ 3105 dev_priv->irq_mask = 3106 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3107 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3108 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3109 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3110 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3111 I915_WRITE16(IMR, dev_priv->irq_mask); 3112 3113 I915_WRITE16(IER, 3114 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3115 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3116 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3117 I915_USER_INTERRUPT); 3118 POSTING_READ16(IER); 3119 3120 /* Interrupt setup is already guaranteed to be single-threaded, this is 3121 * just to make the assert_spin_locked check happy. */ 3122 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3123 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3124 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3125 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3126 3127 return 0; 3128 } 3129 3130 /* 3131 * Returns true when a page flip has completed. 3132 */ 3133 static bool i8xx_handle_vblank(struct drm_device *dev, 3134 int plane, int pipe, u32 iir) 3135 { 3136 drm_i915_private_t *dev_priv = dev->dev_private; 3137 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3138 3139 if (!drm_handle_vblank(dev, pipe)) 3140 return false; 3141 3142 if ((iir & flip_pending) == 0) 3143 return false; 3144 3145 intel_prepare_page_flip(dev, plane); 3146 3147 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3148 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3149 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3150 * the flip is completed (no longer pending). Since this doesn't raise 3151 * an interrupt per se, we watch for the change at vblank. 3152 */ 3153 if (I915_READ16(ISR) & flip_pending) 3154 return false; 3155 3156 intel_finish_page_flip(dev, pipe); 3157 3158 return true; 3159 } 3160 3161 static irqreturn_t i8xx_irq_handler(void *arg) 3162 { 3163 struct drm_device *dev = (struct drm_device *) arg; 3164 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3165 u16 iir, new_iir; 3166 u32 pipe_stats[2]; 3167 int pipe; 3168 u16 flip_mask = 3169 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3170 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3171 3172 atomic_inc(&dev_priv->irq_received); 3173 3174 iir = I915_READ16(IIR); 3175 if (iir == 0) 3176 return; 3177 3178 while (iir & ~flip_mask) { 3179 /* Can't rely on pipestat interrupt bit in iir as it might 3180 * have been cleared after the pipestat interrupt was received. 3181 * It doesn't set the bit in iir again, but it still produces 3182 * interrupts (for non-MSI). 3183 */ 3184 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3185 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3186 i915_handle_error(dev, false); 3187 3188 for_each_pipe(pipe) { 3189 int reg = PIPESTAT(pipe); 3190 pipe_stats[pipe] = I915_READ(reg); 3191 3192 /* 3193 * Clear the PIPE*STAT regs before the IIR 3194 */ 3195 if (pipe_stats[pipe] & 0x8000ffff) { 3196 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3197 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3198 pipe_name(pipe)); 3199 I915_WRITE(reg, pipe_stats[pipe]); 3200 } 3201 } 3202 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3203 3204 I915_WRITE16(IIR, iir & ~flip_mask); 3205 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3206 3207 i915_update_dri1_breadcrumb(dev); 3208 3209 if (iir & I915_USER_INTERRUPT) 3210 notify_ring(dev, &dev_priv->ring[RCS]); 3211 3212 for_each_pipe(pipe) { 3213 int plane = pipe; 3214 if (HAS_FBC(dev)) 3215 plane = !plane; 3216 3217 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3218 i8xx_handle_vblank(dev, plane, pipe, iir)) 3219 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3220 3221 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3222 i9xx_pipe_crc_irq_handler(dev, pipe); 3223 } 3224 3225 iir = new_iir; 3226 } 3227 3228 } 3229 3230 static void i8xx_irq_uninstall(struct drm_device * dev) 3231 { 3232 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3233 int pipe; 3234 3235 for_each_pipe(pipe) { 3236 /* Clear enable bits; then clear status bits */ 3237 I915_WRITE(PIPESTAT(pipe), 0); 3238 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3239 } 3240 I915_WRITE16(IMR, 0xffff); 3241 I915_WRITE16(IER, 0x0); 3242 I915_WRITE16(IIR, I915_READ16(IIR)); 3243 } 3244 3245 static void i915_irq_preinstall(struct drm_device * dev) 3246 { 3247 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3248 int pipe; 3249 3250 atomic_set(&dev_priv->irq_received, 0); 3251 3252 if (I915_HAS_HOTPLUG(dev)) { 3253 I915_WRITE(PORT_HOTPLUG_EN, 0); 3254 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3255 } 3256 3257 I915_WRITE16(HWSTAM, 0xeffe); 3258 for_each_pipe(pipe) 3259 I915_WRITE(PIPESTAT(pipe), 0); 3260 I915_WRITE(IMR, 0xffffffff); 3261 I915_WRITE(IER, 0x0); 3262 POSTING_READ(IER); 3263 } 3264 3265 static int i915_irq_postinstall(struct drm_device *dev) 3266 { 3267 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3268 u32 enable_mask; 3269 3270 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3271 3272 /* Unmask the interrupts that we always want on. */ 3273 dev_priv->irq_mask = 3274 ~(I915_ASLE_INTERRUPT | 3275 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3276 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3277 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3278 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3279 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3280 3281 enable_mask = 3282 I915_ASLE_INTERRUPT | 3283 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3284 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3285 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3286 I915_USER_INTERRUPT; 3287 3288 if (I915_HAS_HOTPLUG(dev)) { 3289 I915_WRITE(PORT_HOTPLUG_EN, 0); 3290 POSTING_READ(PORT_HOTPLUG_EN); 3291 3292 /* Enable in IER... */ 3293 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3294 /* and unmask in IMR */ 3295 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3296 } 3297 3298 I915_WRITE(IMR, dev_priv->irq_mask); 3299 I915_WRITE(IER, enable_mask); 3300 POSTING_READ(IER); 3301 3302 i915_enable_asle_pipestat(dev); 3303 3304 /* Interrupt setup is already guaranteed to be single-threaded, this is 3305 * just to make the assert_spin_locked check happy. */ 3306 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3307 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3308 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3309 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3310 3311 return 0; 3312 } 3313 3314 /* 3315 * Returns true when a page flip has completed. 3316 */ 3317 static bool i915_handle_vblank(struct drm_device *dev, 3318 int plane, int pipe, u32 iir) 3319 { 3320 drm_i915_private_t *dev_priv = dev->dev_private; 3321 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3322 3323 if (!drm_handle_vblank(dev, pipe)) 3324 return false; 3325 3326 if ((iir & flip_pending) == 0) 3327 return false; 3328 3329 intel_prepare_page_flip(dev, plane); 3330 3331 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3332 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3333 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3334 * the flip is completed (no longer pending). Since this doesn't raise 3335 * an interrupt per se, we watch for the change at vblank. 3336 */ 3337 if (I915_READ(ISR) & flip_pending) 3338 return false; 3339 3340 intel_finish_page_flip(dev, pipe); 3341 3342 return true; 3343 } 3344 3345 static irqreturn_t i915_irq_handler(void *arg) 3346 { 3347 struct drm_device *dev = (struct drm_device *) arg; 3348 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3349 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3350 u32 flip_mask = 3351 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3352 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3353 int pipe; 3354 3355 atomic_inc(&dev_priv->irq_received); 3356 3357 iir = I915_READ(IIR); 3358 do { 3359 bool irq_received = (iir & ~flip_mask) != 0; 3360 bool blc_event = false; 3361 3362 /* Can't rely on pipestat interrupt bit in iir as it might 3363 * have been cleared after the pipestat interrupt was received. 3364 * It doesn't set the bit in iir again, but it still produces 3365 * interrupts (for non-MSI). 3366 */ 3367 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3368 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3369 i915_handle_error(dev, false); 3370 3371 for_each_pipe(pipe) { 3372 int reg = PIPESTAT(pipe); 3373 pipe_stats[pipe] = I915_READ(reg); 3374 3375 /* Clear the PIPE*STAT regs before the IIR */ 3376 if (pipe_stats[pipe] & 0x8000ffff) { 3377 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3378 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3379 pipe_name(pipe)); 3380 I915_WRITE(reg, pipe_stats[pipe]); 3381 irq_received = true; 3382 } 3383 } 3384 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3385 3386 if (!irq_received) 3387 break; 3388 3389 /* Consume port. Then clear IIR or we'll miss events */ 3390 if ((I915_HAS_HOTPLUG(dev)) && 3391 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 3392 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3393 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 3394 3395 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3396 hotplug_status); 3397 3398 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 3399 3400 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3401 POSTING_READ(PORT_HOTPLUG_STAT); 3402 } 3403 3404 I915_WRITE(IIR, iir & ~flip_mask); 3405 new_iir = I915_READ(IIR); /* Flush posted writes */ 3406 3407 if (iir & I915_USER_INTERRUPT) 3408 notify_ring(dev, &dev_priv->ring[RCS]); 3409 3410 for_each_pipe(pipe) { 3411 int plane = pipe; 3412 if (HAS_FBC(dev)) 3413 plane = !plane; 3414 3415 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3416 i915_handle_vblank(dev, plane, pipe, iir)) 3417 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3418 3419 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3420 blc_event = true; 3421 3422 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3423 i9xx_pipe_crc_irq_handler(dev, pipe); 3424 } 3425 3426 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3427 intel_opregion_asle_intr(dev); 3428 3429 /* With MSI, interrupts are only generated when iir 3430 * transitions from zero to nonzero. If another bit got 3431 * set while we were handling the existing iir bits, then 3432 * we would never get another interrupt. 3433 * 3434 * This is fine on non-MSI as well, as if we hit this path 3435 * we avoid exiting the interrupt handler only to generate 3436 * another one. 3437 * 3438 * Note that for MSI this could cause a stray interrupt report 3439 * if an interrupt landed in the time between writing IIR and 3440 * the posting read. This should be rare enough to never 3441 * trigger the 99% of 100,000 interrupts test for disabling 3442 * stray interrupts. 3443 */ 3444 iir = new_iir; 3445 } while (iir & ~flip_mask); 3446 3447 i915_update_dri1_breadcrumb(dev); 3448 3449 } 3450 3451 static void i915_irq_uninstall(struct drm_device * dev) 3452 { 3453 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3454 int pipe; 3455 3456 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3457 3458 if (I915_HAS_HOTPLUG(dev)) { 3459 I915_WRITE(PORT_HOTPLUG_EN, 0); 3460 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3461 } 3462 3463 I915_WRITE16(HWSTAM, 0xffff); 3464 for_each_pipe(pipe) { 3465 /* Clear enable bits; then clear status bits */ 3466 I915_WRITE(PIPESTAT(pipe), 0); 3467 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3468 } 3469 I915_WRITE(IMR, 0xffffffff); 3470 I915_WRITE(IER, 0x0); 3471 3472 I915_WRITE(IIR, I915_READ(IIR)); 3473 } 3474 3475 static void i965_irq_preinstall(struct drm_device * dev) 3476 { 3477 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3478 int pipe; 3479 3480 atomic_set(&dev_priv->irq_received, 0); 3481 3482 I915_WRITE(PORT_HOTPLUG_EN, 0); 3483 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3484 3485 I915_WRITE(HWSTAM, 0xeffe); 3486 for_each_pipe(pipe) 3487 I915_WRITE(PIPESTAT(pipe), 0); 3488 I915_WRITE(IMR, 0xffffffff); 3489 I915_WRITE(IER, 0x0); 3490 POSTING_READ(IER); 3491 } 3492 3493 static int i965_irq_postinstall(struct drm_device *dev) 3494 { 3495 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3496 u32 enable_mask; 3497 u32 error_mask; 3498 3499 /* Unmask the interrupts that we always want on. */ 3500 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 3501 I915_DISPLAY_PORT_INTERRUPT | 3502 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3503 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3504 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3505 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3506 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3507 3508 enable_mask = ~dev_priv->irq_mask; 3509 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3510 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 3511 enable_mask |= I915_USER_INTERRUPT; 3512 3513 if (IS_G4X(dev)) 3514 enable_mask |= I915_BSD_USER_INTERRUPT; 3515 3516 /* Interrupt setup is already guaranteed to be single-threaded, this is 3517 * just to make the assert_spin_locked check happy. */ 3518 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3519 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_EVENT_ENABLE); 3520 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_ENABLE); 3521 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_ENABLE); 3522 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3523 3524 /* 3525 * Enable some error detection, note the instruction error mask 3526 * bit is reserved, so we leave it masked. 3527 */ 3528 if (IS_G4X(dev)) { 3529 error_mask = ~(GM45_ERROR_PAGE_TABLE | 3530 GM45_ERROR_MEM_PRIV | 3531 GM45_ERROR_CP_PRIV | 3532 I915_ERROR_MEMORY_REFRESH); 3533 } else { 3534 error_mask = ~(I915_ERROR_PAGE_TABLE | 3535 I915_ERROR_MEMORY_REFRESH); 3536 } 3537 I915_WRITE(EMR, error_mask); 3538 3539 I915_WRITE(IMR, dev_priv->irq_mask); 3540 I915_WRITE(IER, enable_mask); 3541 POSTING_READ(IER); 3542 3543 I915_WRITE(PORT_HOTPLUG_EN, 0); 3544 POSTING_READ(PORT_HOTPLUG_EN); 3545 3546 i915_enable_asle_pipestat(dev); 3547 3548 return 0; 3549 } 3550 3551 static void i915_hpd_irq_setup(struct drm_device *dev) 3552 { 3553 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3554 struct drm_mode_config *mode_config = &dev->mode_config; 3555 struct intel_encoder *intel_encoder; 3556 u32 hotplug_en; 3557 3558 assert_spin_locked(&dev_priv->irq_lock); 3559 3560 if (I915_HAS_HOTPLUG(dev)) { 3561 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 3562 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 3563 /* Note HDMI and DP share hotplug bits */ 3564 /* enable bits are the same for all generations */ 3565 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3566 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3567 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 3568 /* Programming the CRT detection parameters tends 3569 to generate a spurious hotplug event about three 3570 seconds later. So just do it once. 3571 */ 3572 if (IS_G4X(dev)) 3573 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 3574 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 3575 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 3576 3577 /* Ignore TV since it's buggy */ 3578 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 3579 } 3580 } 3581 3582 static irqreturn_t i965_irq_handler(void *arg) 3583 { 3584 struct drm_device *dev = (struct drm_device *) arg; 3585 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3586 u32 iir, new_iir; 3587 u32 pipe_stats[I915_MAX_PIPES]; 3588 int irq_received; 3589 int pipe; 3590 u32 flip_mask = 3591 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3592 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3593 3594 atomic_inc(&dev_priv->irq_received); 3595 3596 iir = I915_READ(IIR); 3597 3598 for (;;) { 3599 bool blc_event = false; 3600 3601 irq_received = (iir & ~flip_mask) != 0; 3602 3603 /* Can't rely on pipestat interrupt bit in iir as it might 3604 * have been cleared after the pipestat interrupt was received. 3605 * It doesn't set the bit in iir again, but it still produces 3606 * interrupts (for non-MSI). 3607 */ 3608 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3609 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3610 i915_handle_error(dev, false); 3611 3612 for_each_pipe(pipe) { 3613 int reg = PIPESTAT(pipe); 3614 pipe_stats[pipe] = I915_READ(reg); 3615 3616 /* 3617 * Clear the PIPE*STAT regs before the IIR 3618 */ 3619 if (pipe_stats[pipe] & 0x8000ffff) { 3620 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 3621 DRM_DEBUG_DRIVER("pipe %c underrun\n", 3622 pipe_name(pipe)); 3623 I915_WRITE(reg, pipe_stats[pipe]); 3624 irq_received = 1; 3625 } 3626 } 3627 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3628 3629 if (!irq_received) 3630 break; 3631 3632 3633 /* Consume port. Then clear IIR or we'll miss events */ 3634 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 3635 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 3636 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? 3637 HOTPLUG_INT_STATUS_G4X : 3638 HOTPLUG_INT_STATUS_I915); 3639 3640 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 3641 hotplug_status); 3642 3643 intel_hpd_irq_handler(dev, hotplug_trigger, 3644 IS_G4X(dev) ? hpd_status_g4x : hpd_status_i915); 3645 3646 if (IS_G4X(dev) && 3647 (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)) 3648 dp_aux_irq_handler(dev); 3649 3650 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 3651 I915_READ(PORT_HOTPLUG_STAT); 3652 } 3653 3654 I915_WRITE(IIR, iir & ~flip_mask); 3655 new_iir = I915_READ(IIR); /* Flush posted writes */ 3656 3657 if (iir & I915_USER_INTERRUPT) 3658 notify_ring(dev, &dev_priv->ring[RCS]); 3659 if (iir & I915_BSD_USER_INTERRUPT) 3660 notify_ring(dev, &dev_priv->ring[VCS]); 3661 3662 for_each_pipe(pipe) { 3663 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 3664 i915_handle_vblank(dev, pipe, pipe, iir)) 3665 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 3666 3667 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3668 blc_event = true; 3669 3670 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3671 i9xx_pipe_crc_irq_handler(dev, pipe); 3672 } 3673 3674 3675 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3676 intel_opregion_asle_intr(dev); 3677 3678 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 3679 gmbus_irq_handler(dev); 3680 3681 /* With MSI, interrupts are only generated when iir 3682 * transitions from zero to nonzero. If another bit got 3683 * set while we were handling the existing iir bits, then 3684 * we would never get another interrupt. 3685 * 3686 * This is fine on non-MSI as well, as if we hit this path 3687 * we avoid exiting the interrupt handler only to generate 3688 * another one. 3689 * 3690 * Note that for MSI this could cause a stray interrupt report 3691 * if an interrupt landed in the time between writing IIR and 3692 * the posting read. This should be rare enough to never 3693 * trigger the 99% of 100,000 interrupts test for disabling 3694 * stray interrupts. 3695 */ 3696 iir = new_iir; 3697 } 3698 3699 i915_update_dri1_breadcrumb(dev); 3700 3701 } 3702 3703 static void i965_irq_uninstall(struct drm_device * dev) 3704 { 3705 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 3706 int pipe; 3707 3708 if (!dev_priv) 3709 return; 3710 3711 del_timer_sync(&dev_priv->hotplug_reenable_timer); 3712 3713 I915_WRITE(PORT_HOTPLUG_EN, 0); 3714 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3715 3716 I915_WRITE(HWSTAM, 0xffffffff); 3717 for_each_pipe(pipe) 3718 I915_WRITE(PIPESTAT(pipe), 0); 3719 I915_WRITE(IMR, 0xffffffff); 3720 I915_WRITE(IER, 0x0); 3721 3722 for_each_pipe(pipe) 3723 I915_WRITE(PIPESTAT(pipe), 3724 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 3725 I915_WRITE(IIR, I915_READ(IIR)); 3726 } 3727 3728 static void i915_reenable_hotplug_timer_func(unsigned long data) 3729 { 3730 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; 3731 struct drm_device *dev = dev_priv->dev; 3732 struct drm_mode_config *mode_config = &dev->mode_config; 3733 int i; 3734 3735 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3736 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 3737 struct drm_connector *connector; 3738 3739 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 3740 continue; 3741 3742 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3743 3744 list_for_each_entry(connector, &mode_config->connector_list, head) { 3745 struct intel_connector *intel_connector = to_intel_connector(connector); 3746 3747 if (intel_connector->encoder->hpd_pin == i) { 3748 if (connector->polled != intel_connector->polled) 3749 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 3750 drm_get_connector_name(connector)); 3751 connector->polled = intel_connector->polled; 3752 if (!connector->polled) 3753 connector->polled = DRM_CONNECTOR_POLL_HPD; 3754 } 3755 } 3756 } 3757 if (dev_priv->display.hpd_irq_setup) 3758 dev_priv->display.hpd_irq_setup(dev); 3759 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3760 } 3761 3762 void intel_irq_init(struct drm_device *dev) 3763 { 3764 struct drm_i915_private *dev_priv = dev->dev_private; 3765 3766 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 3767 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 3768 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 3769 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 3770 3771 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 3772 i915_hangcheck_elapsed, 3773 (unsigned long) dev); 3774 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, 3775 (unsigned long) dev_priv); 3776 3777 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 3778 3779 if (IS_GEN2(dev)) { 3780 dev->max_vblank_count = 0; 3781 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 3782 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 3783 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 3784 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 3785 } else { 3786 dev->driver->get_vblank_counter = i915_get_vblank_counter; 3787 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 3788 } 3789 3790 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 3791 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 3792 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 3793 } 3794 3795 if (IS_VALLEYVIEW(dev)) { 3796 dev->driver->irq_handler = valleyview_irq_handler; 3797 dev->driver->irq_preinstall = valleyview_irq_preinstall; 3798 dev->driver->irq_postinstall = valleyview_irq_postinstall; 3799 dev->driver->irq_uninstall = valleyview_irq_uninstall; 3800 dev->driver->enable_vblank = valleyview_enable_vblank; 3801 dev->driver->disable_vblank = valleyview_disable_vblank; 3802 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3803 } else if (IS_GEN8(dev)) { 3804 dev->driver->irq_handler = gen8_irq_handler; 3805 dev->driver->irq_preinstall = gen8_irq_preinstall; 3806 dev->driver->irq_postinstall = gen8_irq_postinstall; 3807 dev->driver->irq_uninstall = gen8_irq_uninstall; 3808 dev->driver->enable_vblank = gen8_enable_vblank; 3809 dev->driver->disable_vblank = gen8_disable_vblank; 3810 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3811 } else if (HAS_PCH_SPLIT(dev)) { 3812 dev->driver->irq_handler = ironlake_irq_handler; 3813 dev->driver->irq_preinstall = ironlake_irq_preinstall; 3814 dev->driver->irq_postinstall = ironlake_irq_postinstall; 3815 dev->driver->irq_uninstall = ironlake_irq_uninstall; 3816 dev->driver->enable_vblank = ironlake_enable_vblank; 3817 dev->driver->disable_vblank = ironlake_disable_vblank; 3818 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 3819 } else { 3820 if (INTEL_INFO(dev)->gen == 2) { 3821 dev->driver->irq_preinstall = i8xx_irq_preinstall; 3822 dev->driver->irq_postinstall = i8xx_irq_postinstall; 3823 dev->driver->irq_handler = i8xx_irq_handler; 3824 dev->driver->irq_uninstall = i8xx_irq_uninstall; 3825 } else if (INTEL_INFO(dev)->gen == 3) { 3826 dev->driver->irq_preinstall = i915_irq_preinstall; 3827 dev->driver->irq_postinstall = i915_irq_postinstall; 3828 dev->driver->irq_uninstall = i915_irq_uninstall; 3829 dev->driver->irq_handler = i915_irq_handler; 3830 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3831 } else { 3832 dev->driver->irq_preinstall = i965_irq_preinstall; 3833 dev->driver->irq_postinstall = i965_irq_postinstall; 3834 dev->driver->irq_uninstall = i965_irq_uninstall; 3835 dev->driver->irq_handler = i965_irq_handler; 3836 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 3837 } 3838 dev->driver->enable_vblank = i915_enable_vblank; 3839 dev->driver->disable_vblank = i915_disable_vblank; 3840 } 3841 } 3842 3843 void intel_hpd_init(struct drm_device *dev) 3844 { 3845 struct drm_i915_private *dev_priv = dev->dev_private; 3846 struct drm_mode_config *mode_config = &dev->mode_config; 3847 struct drm_connector *connector; 3848 int i; 3849 3850 for (i = 1; i < HPD_NUM_PINS; i++) { 3851 dev_priv->hpd_stats[i].hpd_cnt = 0; 3852 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 3853 } 3854 list_for_each_entry(connector, &mode_config->connector_list, head) { 3855 struct intel_connector *intel_connector = to_intel_connector(connector); 3856 connector->polled = intel_connector->polled; 3857 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 3858 connector->polled = DRM_CONNECTOR_POLL_HPD; 3859 } 3860 3861 /* Interrupt setup is already guaranteed to be single-threaded, this is 3862 * just to make the assert_spin_locked checks happy. */ 3863 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3864 if (dev_priv->display.hpd_irq_setup) 3865 dev_priv->display.hpd_irq_setup(dev); 3866 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3867 } 3868 3869 /* Disable interrupts so we can allow Package C8+. */ 3870 void hsw_pc8_disable_interrupts(struct drm_device *dev) 3871 { 3872 struct drm_i915_private *dev_priv = dev->dev_private; 3873 3874 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3875 3876 dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); 3877 dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); 3878 dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); 3879 dev_priv->pc8.regsave.gtier = I915_READ(GTIER); 3880 dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); 3881 3882 ironlake_disable_display_irq(dev_priv, 0xffffffff); 3883 ibx_disable_display_interrupt(dev_priv, 0xffffffff); 3884 ilk_disable_gt_irq(dev_priv, 0xffffffff); 3885 snb_disable_pm_irq(dev_priv, 0xffffffff); 3886 3887 dev_priv->pc8.irqs_disabled = true; 3888 3889 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3890 } 3891 3892 /* Restore interrupts so we can recover from Package C8+. */ 3893 void hsw_pc8_restore_interrupts(struct drm_device *dev) 3894 { 3895 struct drm_i915_private *dev_priv = dev->dev_private; 3896 uint32_t val; 3897 3898 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3899 3900 val = I915_READ(DEIMR); 3901 WARN(val != 0xffffffff, "DEIMR is 0x%08x\n", val); 3902 3903 val = I915_READ(SDEIMR); 3904 WARN(val != 0xffffffff, "SDEIMR is 0x%08x\n", val); 3905 3906 val = I915_READ(GTIMR); 3907 WARN(val != 0xffffffff, "GTIMR is 0x%08x\n", val); 3908 3909 val = I915_READ(GEN6_PMIMR); 3910 WARN(val != 0xffffffff, "GEN6_PMIMR is 0x%08x\n", val); 3911 3912 dev_priv->pc8.irqs_disabled = false; 3913 3914 ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); 3915 ibx_enable_display_interrupt(dev_priv, ~dev_priv->pc8.regsave.sdeimr); 3916 ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); 3917 snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); 3918 I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); 3919 3920 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3921 } 3922