1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /* 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 */ 28 29 #include <drm/drmP.h> 30 #include <drm/i915_drm.h> 31 #include "i915_drv.h" 32 #include "i915_trace.h" 33 #include "intel_drv.h" 34 35 static const u32 hpd_ibx[] = { 36 [HPD_CRT] = SDE_CRT_HOTPLUG, 37 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 38 [HPD_PORT_B] = SDE_PORTB_HOTPLUG, 39 [HPD_PORT_C] = SDE_PORTC_HOTPLUG, 40 [HPD_PORT_D] = SDE_PORTD_HOTPLUG 41 }; 42 43 static const u32 hpd_cpt[] = { 44 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, 45 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, 46 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, 47 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, 48 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT 49 }; 50 51 static const u32 hpd_mask_i915[] = { 52 [HPD_CRT] = CRT_HOTPLUG_INT_EN, 53 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, 54 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, 55 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, 56 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, 57 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN 58 }; 59 60 static const u32 hpd_status_g4x[] = { 61 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 62 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, 63 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, 64 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 65 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 66 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 67 }; 68 69 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ 70 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, 71 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, 72 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, 73 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, 74 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, 75 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS 76 }; 77 78 /* IIR can theoretically queue up two events. Be paranoid. */ 79 #define GEN8_IRQ_RESET_NDX(type, which) do { \ 80 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 81 POSTING_READ(GEN8_##type##_IMR(which)); \ 82 I915_WRITE(GEN8_##type##_IER(which), 0); \ 83 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 84 POSTING_READ(GEN8_##type##_IIR(which)); \ 85 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 86 POSTING_READ(GEN8_##type##_IIR(which)); \ 87 } while (0) 88 89 #define GEN5_IRQ_RESET(type) do { \ 90 I915_WRITE(type##IMR, 0xffffffff); \ 91 POSTING_READ(type##IMR); \ 92 I915_WRITE(type##IER, 0); \ 93 I915_WRITE(type##IIR, 0xffffffff); \ 94 POSTING_READ(type##IIR); \ 95 I915_WRITE(type##IIR, 0xffffffff); \ 96 POSTING_READ(type##IIR); \ 97 } while (0) 98 99 /* 100 * We should clear IMR at preinstall/uninstall, and just check at postinstall. 101 */ 102 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ 103 u32 val = I915_READ(reg); \ 104 if (val) { \ 105 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ 106 (reg), val); \ 107 I915_WRITE((reg), 0xffffffff); \ 108 POSTING_READ(reg); \ 109 I915_WRITE((reg), 0xffffffff); \ 110 POSTING_READ(reg); \ 111 } \ 112 } while (0) 113 114 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 115 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 116 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ 117 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 118 POSTING_READ(GEN8_##type##_IER(which)); \ 119 } while (0) 120 121 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 122 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 123 I915_WRITE(type##IMR, (imr_val)); \ 124 I915_WRITE(type##IER, (ier_val)); \ 125 POSTING_READ(type##IER); \ 126 } while (0) 127 128 /* For display hotplug interrupt */ 129 static void 130 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 131 { 132 assert_spin_locked(&dev_priv->irq_lock); 133 134 if (WARN_ON(dev_priv->pm.irqs_disabled)) 135 return; 136 137 if ((dev_priv->irq_mask & mask) != 0) { 138 dev_priv->irq_mask &= ~mask; 139 I915_WRITE(DEIMR, dev_priv->irq_mask); 140 POSTING_READ(DEIMR); 141 } 142 } 143 144 static void 145 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 146 { 147 assert_spin_locked(&dev_priv->irq_lock); 148 149 if (WARN_ON(dev_priv->pm.irqs_disabled)) 150 return; 151 152 if ((dev_priv->irq_mask & mask) != mask) { 153 dev_priv->irq_mask |= mask; 154 I915_WRITE(DEIMR, dev_priv->irq_mask); 155 POSTING_READ(DEIMR); 156 } 157 } 158 159 /** 160 * ilk_update_gt_irq - update GTIMR 161 * @dev_priv: driver private 162 * @interrupt_mask: mask of interrupt bits to update 163 * @enabled_irq_mask: mask of interrupt bits to enable 164 */ 165 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, 166 uint32_t interrupt_mask, 167 uint32_t enabled_irq_mask) 168 { 169 assert_spin_locked(&dev_priv->irq_lock); 170 171 if (WARN_ON(dev_priv->pm.irqs_disabled)) 172 return; 173 174 dev_priv->gt_irq_mask &= ~interrupt_mask; 175 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 176 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 177 POSTING_READ(GTIMR); 178 } 179 180 void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 181 { 182 ilk_update_gt_irq(dev_priv, mask, mask); 183 } 184 185 void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 186 { 187 ilk_update_gt_irq(dev_priv, mask, 0); 188 } 189 190 /** 191 * snb_update_pm_irq - update GEN6_PMIMR 192 * @dev_priv: driver private 193 * @interrupt_mask: mask of interrupt bits to update 194 * @enabled_irq_mask: mask of interrupt bits to enable 195 */ 196 static void snb_update_pm_irq(struct drm_i915_private *dev_priv, 197 uint32_t interrupt_mask, 198 uint32_t enabled_irq_mask) 199 { 200 uint32_t new_val; 201 202 assert_spin_locked(&dev_priv->irq_lock); 203 204 if (WARN_ON(dev_priv->pm.irqs_disabled)) 205 return; 206 207 new_val = dev_priv->pm_irq_mask; 208 new_val &= ~interrupt_mask; 209 new_val |= (~enabled_irq_mask & interrupt_mask); 210 211 if (new_val != dev_priv->pm_irq_mask) { 212 dev_priv->pm_irq_mask = new_val; 213 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); 214 POSTING_READ(GEN6_PMIMR); 215 } 216 } 217 218 void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 219 { 220 snb_update_pm_irq(dev_priv, mask, mask); 221 } 222 223 void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 224 { 225 snb_update_pm_irq(dev_priv, mask, 0); 226 } 227 228 static bool ivb_can_enable_err_int(struct drm_device *dev) 229 { 230 struct drm_i915_private *dev_priv = dev->dev_private; 231 struct intel_crtc *crtc; 232 enum i915_pipe pipe; 233 234 assert_spin_locked(&dev_priv->irq_lock); 235 236 for_each_pipe(pipe) { 237 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 238 239 if (crtc->cpu_fifo_underrun_disabled) 240 return false; 241 } 242 243 return true; 244 } 245 246 /** 247 * bdw_update_pm_irq - update GT interrupt 2 248 * @dev_priv: driver private 249 * @interrupt_mask: mask of interrupt bits to update 250 * @enabled_irq_mask: mask of interrupt bits to enable 251 * 252 * Copied from the snb function, updated with relevant register offsets 253 */ 254 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, 255 uint32_t interrupt_mask, 256 uint32_t enabled_irq_mask) 257 { 258 uint32_t new_val; 259 260 assert_spin_locked(&dev_priv->irq_lock); 261 262 if (WARN_ON(dev_priv->pm.irqs_disabled)) 263 return; 264 265 new_val = dev_priv->pm_irq_mask; 266 new_val &= ~interrupt_mask; 267 new_val |= (~enabled_irq_mask & interrupt_mask); 268 269 if (new_val != dev_priv->pm_irq_mask) { 270 dev_priv->pm_irq_mask = new_val; 271 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 272 POSTING_READ(GEN8_GT_IMR(2)); 273 } 274 } 275 276 void bdw_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 277 { 278 bdw_update_pm_irq(dev_priv, mask, mask); 279 } 280 281 void bdw_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 282 { 283 bdw_update_pm_irq(dev_priv, mask, 0); 284 } 285 286 static bool cpt_can_enable_serr_int(struct drm_device *dev) 287 { 288 struct drm_i915_private *dev_priv = dev->dev_private; 289 enum i915_pipe pipe; 290 struct intel_crtc *crtc; 291 292 assert_spin_locked(&dev_priv->irq_lock); 293 294 for_each_pipe(pipe) { 295 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 296 297 if (crtc->pch_fifo_underrun_disabled) 298 return false; 299 } 300 301 return true; 302 } 303 304 void i9xx_check_fifo_underruns(struct drm_device *dev) 305 { 306 struct drm_i915_private *dev_priv = dev->dev_private; 307 struct intel_crtc *crtc; 308 309 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 310 311 for_each_intel_crtc(dev, crtc) { 312 u32 reg = PIPESTAT(crtc->pipe); 313 u32 pipestat; 314 315 if (crtc->cpu_fifo_underrun_disabled) 316 continue; 317 318 pipestat = I915_READ(reg) & 0xffff0000; 319 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0) 320 continue; 321 322 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 323 POSTING_READ(reg); 324 325 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe)); 326 } 327 328 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 329 } 330 331 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 332 enum i915_pipe pipe, 333 bool enable, bool old) 334 { 335 struct drm_i915_private *dev_priv = dev->dev_private; 336 u32 reg = PIPESTAT(pipe); 337 u32 pipestat = I915_READ(reg) & 0xffff0000; 338 339 assert_spin_locked(&dev_priv->irq_lock); 340 341 if (enable) { 342 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 343 POSTING_READ(reg); 344 } else { 345 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 346 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 347 } 348 } 349 350 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 351 enum i915_pipe pipe, bool enable) 352 { 353 struct drm_i915_private *dev_priv = dev->dev_private; 354 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 355 DE_PIPEB_FIFO_UNDERRUN; 356 357 if (enable) 358 ironlake_enable_display_irq(dev_priv, bit); 359 else 360 ironlake_disable_display_irq(dev_priv, bit); 361 } 362 363 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 364 enum i915_pipe pipe, 365 bool enable, bool old) 366 { 367 struct drm_i915_private *dev_priv = dev->dev_private; 368 if (enable) { 369 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 370 371 if (!ivb_can_enable_err_int(dev)) 372 return; 373 374 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 375 } else { 376 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); 377 378 if (old && 379 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { 380 DRM_ERROR("uncleared fifo underrun on pipe %c\n", 381 pipe_name(pipe)); 382 } 383 } 384 } 385 386 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 387 enum i915_pipe pipe, bool enable) 388 { 389 struct drm_i915_private *dev_priv = dev->dev_private; 390 391 assert_spin_locked(&dev_priv->irq_lock); 392 393 if (enable) 394 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 395 else 396 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN; 397 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 398 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 399 } 400 401 /** 402 * ibx_display_interrupt_update - update SDEIMR 403 * @dev_priv: driver private 404 * @interrupt_mask: mask of interrupt bits to update 405 * @enabled_irq_mask: mask of interrupt bits to enable 406 */ 407 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 408 uint32_t interrupt_mask, 409 uint32_t enabled_irq_mask) 410 { 411 uint32_t sdeimr = I915_READ(SDEIMR); 412 sdeimr &= ~interrupt_mask; 413 sdeimr |= (~enabled_irq_mask & interrupt_mask); 414 415 assert_spin_locked(&dev_priv->irq_lock); 416 417 if (WARN_ON(dev_priv->pm.irqs_disabled)) 418 return; 419 420 I915_WRITE(SDEIMR, sdeimr); 421 POSTING_READ(SDEIMR); 422 } 423 #define ibx_enable_display_interrupt(dev_priv, bits) \ 424 ibx_display_interrupt_update((dev_priv), (bits), (bits)) 425 #define ibx_disable_display_interrupt(dev_priv, bits) \ 426 ibx_display_interrupt_update((dev_priv), (bits), 0) 427 428 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, 429 enum transcoder pch_transcoder, 430 bool enable) 431 { 432 struct drm_i915_private *dev_priv = dev->dev_private; 433 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 434 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 435 436 if (enable) 437 ibx_enable_display_interrupt(dev_priv, bit); 438 else 439 ibx_disable_display_interrupt(dev_priv, bit); 440 } 441 442 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, 443 enum transcoder pch_transcoder, 444 bool enable, bool old) 445 { 446 struct drm_i915_private *dev_priv = dev->dev_private; 447 448 if (enable) { 449 I915_WRITE(SERR_INT, 450 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); 451 452 if (!cpt_can_enable_serr_int(dev)) 453 return; 454 455 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); 456 } else { 457 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); 458 459 if (old && I915_READ(SERR_INT) & 460 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { 461 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n", 462 transcoder_name(pch_transcoder)); 463 } 464 } 465 } 466 467 /** 468 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages 469 * @dev: drm device 470 * @pipe: pipe 471 * @enable: true if we want to report FIFO underrun errors, false otherwise 472 * 473 * This function makes us disable or enable CPU fifo underruns for a specific 474 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun 475 * reporting for one pipe may also disable all the other CPU error interruts for 476 * the other pipes, due to the fact that there's just one interrupt mask/enable 477 * bit for all the pipes. 478 * 479 * Returns the previous state of underrun reporting. 480 */ 481 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 482 enum i915_pipe pipe, bool enable) 483 { 484 struct drm_i915_private *dev_priv = dev->dev_private; 485 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 486 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 487 bool old; 488 489 assert_spin_locked(&dev_priv->irq_lock); 490 491 old = !intel_crtc->cpu_fifo_underrun_disabled; 492 intel_crtc->cpu_fifo_underrun_disabled = !enable; 493 494 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev)) 495 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old); 496 else if (IS_GEN5(dev) || IS_GEN6(dev)) 497 ironlake_set_fifo_underrun_reporting(dev, pipe, enable); 498 else if (IS_GEN7(dev)) 499 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old); 500 else if (IS_GEN8(dev)) 501 broadwell_set_fifo_underrun_reporting(dev, pipe, enable); 502 503 return old; 504 } 505 506 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 507 enum i915_pipe pipe, bool enable) 508 { 509 struct drm_i915_private *dev_priv = dev->dev_private; 510 bool ret; 511 512 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 513 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable); 514 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 515 516 return ret; 517 } 518 519 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev, 520 enum i915_pipe pipe) 521 { 522 struct drm_i915_private *dev_priv = dev->dev_private; 523 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 524 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 525 526 return !intel_crtc->cpu_fifo_underrun_disabled; 527 } 528 529 /** 530 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages 531 * @dev: drm device 532 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) 533 * @enable: true if we want to report FIFO underrun errors, false otherwise 534 * 535 * This function makes us disable or enable PCH fifo underruns for a specific 536 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO 537 * underrun reporting for one transcoder may also disable all the other PCH 538 * error interruts for the other transcoders, due to the fact that there's just 539 * one interrupt mask/enable bit for all the transcoders. 540 * 541 * Returns the previous state of underrun reporting. 542 */ 543 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 544 enum transcoder pch_transcoder, 545 bool enable) 546 { 547 struct drm_i915_private *dev_priv = dev->dev_private; 548 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; 549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 550 bool old; 551 552 /* 553 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT 554 * has only one pch transcoder A that all pipes can use. To avoid racy 555 * pch transcoder -> pipe lookups from interrupt code simply store the 556 * underrun statistics in crtc A. Since we never expose this anywhere 557 * nor use it outside of the fifo underrun code here using the "wrong" 558 * crtc on LPT won't cause issues. 559 */ 560 561 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 562 563 old = !intel_crtc->pch_fifo_underrun_disabled; 564 intel_crtc->pch_fifo_underrun_disabled = !enable; 565 566 if (HAS_PCH_IBX(dev)) 567 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); 568 else 569 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old); 570 571 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 572 return old; 573 } 574 575 static void 576 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 577 u32 enable_mask, u32 status_mask) 578 { 579 u32 reg = PIPESTAT(pipe); 580 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 581 582 assert_spin_locked(&dev_priv->irq_lock); 583 584 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 585 status_mask & ~PIPESTAT_INT_STATUS_MASK, 586 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 587 pipe_name(pipe), enable_mask, status_mask)) 588 return; 589 590 if ((pipestat & enable_mask) == enable_mask) 591 return; 592 593 dev_priv->pipestat_irq_mask[pipe] |= status_mask; 594 595 /* Enable the interrupt, clear any pending status */ 596 pipestat |= enable_mask | status_mask; 597 I915_WRITE(reg, pipestat); 598 POSTING_READ(reg); 599 } 600 601 static void 602 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 603 u32 enable_mask, u32 status_mask) 604 { 605 u32 reg = PIPESTAT(pipe); 606 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 607 608 assert_spin_locked(&dev_priv->irq_lock); 609 610 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 611 status_mask & ~PIPESTAT_INT_STATUS_MASK, 612 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", 613 pipe_name(pipe), enable_mask, status_mask)) 614 return; 615 616 if ((pipestat & enable_mask) == 0) 617 return; 618 619 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; 620 621 pipestat &= ~enable_mask; 622 I915_WRITE(reg, pipestat); 623 POSTING_READ(reg); 624 } 625 626 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) 627 { 628 u32 enable_mask = status_mask << 16; 629 630 /* 631 * On pipe A we don't support the PSR interrupt yet, 632 * on pipe B and C the same bit MBZ. 633 */ 634 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) 635 return 0; 636 /* 637 * On pipe B and C we don't support the PSR interrupt yet, on pipe 638 * A the same bit is for perf counters which we don't use either. 639 */ 640 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) 641 return 0; 642 643 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | 644 SPRITE0_FLIP_DONE_INT_EN_VLV | 645 SPRITE1_FLIP_DONE_INT_EN_VLV); 646 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) 647 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; 648 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) 649 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; 650 651 return enable_mask; 652 } 653 654 void 655 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 656 u32 status_mask) 657 { 658 u32 enable_mask; 659 660 if (IS_VALLEYVIEW(dev_priv->dev)) 661 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 662 status_mask); 663 else 664 enable_mask = status_mask << 16; 665 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); 666 } 667 668 void 669 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe, 670 u32 status_mask) 671 { 672 u32 enable_mask; 673 674 if (IS_VALLEYVIEW(dev_priv->dev)) 675 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 676 status_mask); 677 else 678 enable_mask = status_mask << 16; 679 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); 680 } 681 682 /** 683 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 684 */ 685 static void i915_enable_asle_pipestat(struct drm_device *dev) 686 { 687 struct drm_i915_private *dev_priv = dev->dev_private; 688 689 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 690 return; 691 692 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 693 694 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 695 if (INTEL_INFO(dev)->gen >= 4) 696 i915_enable_pipestat(dev_priv, PIPE_A, 697 PIPE_LEGACY_BLC_EVENT_STATUS); 698 699 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 700 } 701 702 /** 703 * i915_pipe_enabled - check if a pipe is enabled 704 * @dev: DRM device 705 * @pipe: pipe to check 706 * 707 * Reading certain registers when the pipe is disabled can hang the chip. 708 * Use this routine to make sure the PLL is running and the pipe is active 709 * before reading such registers if unsure. 710 */ 711 static int 712 i915_pipe_enabled(struct drm_device *dev, int pipe) 713 { 714 struct drm_i915_private *dev_priv = dev->dev_private; 715 716 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 717 /* Locking is horribly broken here, but whatever. */ 718 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 719 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 720 721 return intel_crtc->active; 722 } else { 723 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 724 } 725 } 726 727 /* 728 * This timing diagram depicts the video signal in and 729 * around the vertical blanking period. 730 * 731 * Assumptions about the fictitious mode used in this example: 732 * vblank_start >= 3 733 * vsync_start = vblank_start + 1 734 * vsync_end = vblank_start + 2 735 * vtotal = vblank_start + 3 736 * 737 * start of vblank: 738 * latch double buffered registers 739 * increment frame counter (ctg+) 740 * generate start of vblank interrupt (gen4+) 741 * | 742 * | frame start: 743 * | generate frame start interrupt (aka. vblank interrupt) (gmch) 744 * | may be shifted forward 1-3 extra lines via PIPECONF 745 * | | 746 * | | start of vsync: 747 * | | generate vsync interrupt 748 * | | | 749 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx 750 * . \hs/ . \hs/ \hs/ \hs/ . \hs/ 751 * ----va---> <-----------------vb--------------------> <--------va------------- 752 * | | <----vs-----> | 753 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) 754 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) 755 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) 756 * | | | 757 * last visible pixel first visible pixel 758 * | increment frame counter (gen3/4) 759 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) 760 * 761 * x = horizontal active 762 * _ = horizontal blanking 763 * hs = horizontal sync 764 * va = vertical active 765 * vb = vertical blanking 766 * vs = vertical sync 767 * vbs = vblank_start (number) 768 * 769 * Summary: 770 * - most events happen at the start of horizontal sync 771 * - frame start happens at the start of horizontal blank, 1-4 lines 772 * (depending on PIPECONF settings) after the start of vblank 773 * - gen3/4 pixel and frame counter are synchronized with the start 774 * of horizontal active on the first line of vertical active 775 */ 776 777 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) 778 { 779 /* Gen2 doesn't have a hardware frame counter */ 780 return 0; 781 } 782 783 /* Called from drm generic code, passed a 'crtc', which 784 * we use as a pipe index 785 */ 786 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) 787 { 788 struct drm_i915_private *dev_priv = dev->dev_private; 789 unsigned long high_frame; 790 unsigned long low_frame; 791 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 792 793 if (!i915_pipe_enabled(dev, pipe)) { 794 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 795 "pipe %c\n", pipe_name(pipe)); 796 return 0; 797 } 798 799 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 800 struct intel_crtc *intel_crtc = 801 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 802 const struct drm_display_mode *mode = 803 &intel_crtc->config.adjusted_mode; 804 805 htotal = mode->crtc_htotal; 806 hsync_start = mode->crtc_hsync_start; 807 vbl_start = mode->crtc_vblank_start; 808 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 809 vbl_start = DIV_ROUND_UP(vbl_start, 2); 810 } else { 811 enum transcoder cpu_transcoder = (enum transcoder) pipe; 812 813 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; 814 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; 815 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; 816 if ((I915_READ(PIPECONF(cpu_transcoder)) & 817 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) 818 vbl_start = DIV_ROUND_UP(vbl_start, 2); 819 } 820 821 /* Convert to pixel count */ 822 vbl_start *= htotal; 823 824 /* Start of vblank event occurs at start of hsync */ 825 vbl_start -= htotal - hsync_start; 826 827 high_frame = PIPEFRAME(pipe); 828 low_frame = PIPEFRAMEPIXEL(pipe); 829 830 /* 831 * High & low register fields aren't synchronized, so make sure 832 * we get a low value that's stable across two reads of the high 833 * register. 834 */ 835 do { 836 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 837 low = I915_READ(low_frame); 838 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 839 } while (high1 != high2); 840 841 high1 >>= PIPE_FRAME_HIGH_SHIFT; 842 pixel = low & PIPE_PIXEL_MASK; 843 low >>= PIPE_FRAME_LOW_SHIFT; 844 845 /* 846 * The frame counter increments at beginning of active. 847 * Cook up a vblank counter by also checking the pixel 848 * counter against vblank start. 849 */ 850 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; 851 } 852 853 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 854 { 855 struct drm_i915_private *dev_priv = dev->dev_private; 856 int reg = PIPE_FRMCOUNT_GM45(pipe); 857 858 if (!i915_pipe_enabled(dev, pipe)) { 859 DRM_DEBUG_DRIVER("trying to get vblank count for disabled " 860 "pipe %c\n", pipe_name(pipe)); 861 return 0; 862 } 863 864 return I915_READ(reg); 865 } 866 867 /* raw reads, only for fast reads of display block, no need for forcewake etc. */ 868 #define __raw_i915_read32(dev_priv__, reg__) DRM_READ32(dev_priv__->mmio_map, reg__) 869 870 static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 871 { 872 struct drm_device *dev = crtc->base.dev; 873 struct drm_i915_private *dev_priv = dev->dev_private; 874 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 875 enum i915_pipe pipe = crtc->pipe; 876 int position, vtotal; 877 878 vtotal = mode->crtc_vtotal; 879 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 880 vtotal /= 2; 881 882 if (IS_GEN2(dev)) 883 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 884 else 885 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 886 887 /* 888 * See update_scanline_offset() for the details on the 889 * scanline_offset adjustment. 890 */ 891 return (position + crtc->scanline_offset) % vtotal; 892 } 893 894 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 895 unsigned int flags, int *vpos, int *hpos, 896 ktime_t *stime, ktime_t *etime) 897 { 898 struct drm_i915_private *dev_priv = dev->dev_private; 899 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 900 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 901 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; 902 int position; 903 int vbl_start, vbl_end, hsync_start, htotal, vtotal; 904 bool in_vbl = true; 905 int ret = 0; 906 907 if (!intel_crtc->active) { 908 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " 909 "pipe %c\n", pipe_name(pipe)); 910 return 0; 911 } 912 913 htotal = mode->crtc_htotal; 914 hsync_start = mode->crtc_hsync_start; 915 vtotal = mode->crtc_vtotal; 916 vbl_start = mode->crtc_vblank_start; 917 vbl_end = mode->crtc_vblank_end; 918 919 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 920 vbl_start = DIV_ROUND_UP(vbl_start, 2); 921 vbl_end /= 2; 922 vtotal /= 2; 923 } 924 925 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 926 927 /* 928 * Lock uncore.lock, as we will do multiple timing critical raw 929 * register reads, potentially with preemption disabled, so the 930 * following code must not block on uncore.lock. 931 */ 932 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 933 934 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ 935 936 /* Get optional system timestamp before query. */ 937 if (stime) 938 *stime = ktime_get(); 939 940 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 941 /* No obvious pixelcount register. Only query vertical 942 * scanout position from Display scan line register. 943 */ 944 position = __intel_get_crtc_scanline(intel_crtc); 945 } else { 946 /* Have access to pixelcount since start of frame. 947 * We can split this into vertical and horizontal 948 * scanout position. 949 */ 950 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 951 952 /* convert to pixel counts */ 953 vbl_start *= htotal; 954 vbl_end *= htotal; 955 vtotal *= htotal; 956 957 /* 958 * In interlaced modes, the pixel counter counts all pixels, 959 * so one field will have htotal more pixels. In order to avoid 960 * the reported position from jumping backwards when the pixel 961 * counter is beyond the length of the shorter field, just 962 * clamp the position the length of the shorter field. This 963 * matches how the scanline counter based position works since 964 * the scanline counter doesn't count the two half lines. 965 */ 966 if (position >= vtotal) 967 position = vtotal - 1; 968 969 /* 970 * Start of vblank interrupt is triggered at start of hsync, 971 * just prior to the first active line of vblank. However we 972 * consider lines to start at the leading edge of horizontal 973 * active. So, should we get here before we've crossed into 974 * the horizontal active of the first line in vblank, we would 975 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, 976 * always add htotal-hsync_start to the current pixel position. 977 */ 978 position = (position + htotal - hsync_start) % vtotal; 979 } 980 981 /* Get optional system timestamp after query. */ 982 if (etime) 983 *etime = ktime_get(); 984 985 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ 986 987 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 988 989 in_vbl = position >= vbl_start && position < vbl_end; 990 991 /* 992 * While in vblank, position will be negative 993 * counting up towards 0 at vbl_end. And outside 994 * vblank, position will be positive counting 995 * up since vbl_end. 996 */ 997 if (position >= vbl_start) 998 position -= vbl_end; 999 else 1000 position += vtotal - vbl_end; 1001 1002 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 1003 *vpos = position; 1004 *hpos = 0; 1005 } else { 1006 *vpos = position / htotal; 1007 *hpos = position - (*vpos * htotal); 1008 } 1009 1010 /* In vblank? */ 1011 if (in_vbl) 1012 ret |= DRM_SCANOUTPOS_INVBL; 1013 1014 return ret; 1015 } 1016 1017 int intel_get_crtc_scanline(struct intel_crtc *crtc) 1018 { 1019 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1020 int position; 1021 1022 lockmgr(&dev_priv->uncore.lock, LK_EXCLUSIVE); 1023 position = __intel_get_crtc_scanline(crtc); 1024 lockmgr(&dev_priv->uncore.lock, LK_RELEASE); 1025 1026 return position; 1027 } 1028 1029 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, 1030 int *max_error, 1031 struct timeval *vblank_time, 1032 unsigned flags) 1033 { 1034 struct drm_crtc *crtc; 1035 1036 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { 1037 DRM_ERROR("Invalid crtc %d\n", pipe); 1038 return -EINVAL; 1039 } 1040 1041 /* Get drm_crtc to timestamp: */ 1042 crtc = intel_get_crtc_for_pipe(dev, pipe); 1043 if (crtc == NULL) { 1044 DRM_ERROR("Invalid crtc %d\n", pipe); 1045 return -EINVAL; 1046 } 1047 1048 if (!crtc->enabled) { 1049 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); 1050 return -EBUSY; 1051 } 1052 1053 /* Helper routine in DRM core does all the work: */ 1054 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 1055 vblank_time, flags, 1056 crtc, 1057 &to_intel_crtc(crtc)->config.adjusted_mode); 1058 } 1059 1060 static bool intel_hpd_irq_event(struct drm_device *dev, 1061 struct drm_connector *connector) 1062 { 1063 enum drm_connector_status old_status; 1064 1065 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); 1066 old_status = connector->status; 1067 1068 connector->status = connector->funcs->detect(connector, false); 1069 if (old_status == connector->status) 1070 return false; 1071 1072 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", 1073 connector->base.id, 1074 connector->name, 1075 drm_get_connector_status_name(old_status), 1076 drm_get_connector_status_name(connector->status)); 1077 1078 return true; 1079 } 1080 1081 /* 1082 * Handle hotplug events outside the interrupt handler proper. 1083 */ 1084 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) 1085 1086 static void i915_hotplug_work_func(struct work_struct *work) 1087 { 1088 struct drm_i915_private *dev_priv = 1089 container_of(work, struct drm_i915_private, hotplug_work); 1090 struct drm_device *dev = dev_priv->dev; 1091 struct drm_mode_config *mode_config = &dev->mode_config; 1092 struct intel_connector *intel_connector; 1093 struct intel_encoder *intel_encoder; 1094 struct drm_connector *connector; 1095 bool hpd_disabled = false; 1096 bool changed = false; 1097 u32 hpd_event_bits; 1098 1099 /* HPD irq before everything is fully set up. */ 1100 if (!dev_priv->enable_hotplug_processing) 1101 return; 1102 1103 mutex_lock(&mode_config->mutex); 1104 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 1105 1106 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1107 1108 hpd_event_bits = dev_priv->hpd_event_bits; 1109 dev_priv->hpd_event_bits = 0; 1110 list_for_each_entry(connector, &mode_config->connector_list, head) { 1111 intel_connector = to_intel_connector(connector); 1112 intel_encoder = intel_connector->encoder; 1113 if (intel_encoder->hpd_pin > HPD_NONE && 1114 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && 1115 connector->polled == DRM_CONNECTOR_POLL_HPD) { 1116 DRM_INFO("HPD interrupt storm detected on connector %s: " 1117 "switching from hotplug detection to polling\n", 1118 connector->name); 1119 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; 1120 connector->polled = DRM_CONNECTOR_POLL_CONNECT 1121 | DRM_CONNECTOR_POLL_DISCONNECT; 1122 hpd_disabled = true; 1123 } 1124 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1125 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", 1126 connector->name, intel_encoder->hpd_pin); 1127 } 1128 } 1129 /* if there were no outputs to poll, poll was disabled, 1130 * therefore make sure it's enabled when disabling HPD on 1131 * some connectors */ 1132 if (hpd_disabled) { 1133 drm_kms_helper_poll_enable(dev); 1134 mod_timer(&dev_priv->hotplug_reenable_timer, 1135 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 1136 } 1137 1138 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1139 1140 list_for_each_entry(connector, &mode_config->connector_list, head) { 1141 intel_connector = to_intel_connector(connector); 1142 intel_encoder = intel_connector->encoder; 1143 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { 1144 if (intel_encoder->hot_plug) 1145 intel_encoder->hot_plug(intel_encoder); 1146 if (intel_hpd_irq_event(dev, connector)) 1147 changed = true; 1148 } 1149 } 1150 mutex_unlock(&mode_config->mutex); 1151 1152 if (changed) 1153 drm_kms_helper_hotplug_event(dev); 1154 } 1155 1156 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) 1157 { 1158 del_timer_sync(&dev_priv->hotplug_reenable_timer); 1159 } 1160 1161 static void ironlake_rps_change_irq_handler(struct drm_device *dev) 1162 { 1163 struct drm_i915_private *dev_priv = dev->dev_private; 1164 u32 busy_up, busy_down, max_avg, min_avg; 1165 u8 new_delay; 1166 1167 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 1168 1169 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 1170 1171 new_delay = dev_priv->ips.cur_delay; 1172 1173 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 1174 busy_up = I915_READ(RCPREVBSYTUPAVG); 1175 busy_down = I915_READ(RCPREVBSYTDNAVG); 1176 max_avg = I915_READ(RCBMAXAVG); 1177 min_avg = I915_READ(RCBMINAVG); 1178 1179 /* Handle RCS change request from hw */ 1180 if (busy_up > max_avg) { 1181 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) 1182 new_delay = dev_priv->ips.cur_delay - 1; 1183 if (new_delay < dev_priv->ips.max_delay) 1184 new_delay = dev_priv->ips.max_delay; 1185 } else if (busy_down < min_avg) { 1186 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) 1187 new_delay = dev_priv->ips.cur_delay + 1; 1188 if (new_delay > dev_priv->ips.min_delay) 1189 new_delay = dev_priv->ips.min_delay; 1190 } 1191 1192 if (ironlake_set_drps(dev, new_delay)) 1193 dev_priv->ips.cur_delay = new_delay; 1194 1195 lockmgr(&mchdev_lock, LK_RELEASE); 1196 1197 return; 1198 } 1199 1200 static void notify_ring(struct drm_device *dev, 1201 struct intel_engine_cs *ring) 1202 { 1203 if (!intel_ring_initialized(ring)) 1204 return; 1205 1206 trace_i915_gem_request_complete(ring); 1207 1208 wake_up_all(&ring->irq_queue); 1209 i915_queue_hangcheck(dev); 1210 } 1211 1212 static void gen6_pm_rps_work(struct work_struct *work) 1213 { 1214 struct drm_i915_private *dev_priv = 1215 container_of(work, struct drm_i915_private, rps.work); 1216 u32 pm_iir; 1217 int new_delay, adj; 1218 1219 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1220 pm_iir = dev_priv->rps.pm_iir; 1221 dev_priv->rps.pm_iir = 0; 1222 if (IS_BROADWELL(dev_priv->dev)) 1223 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1224 else { 1225 /* Make sure not to corrupt PMIMR state used by ringbuffer */ 1226 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1227 } 1228 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1229 1230 /* Make sure we didn't queue anything we're not going to process. */ 1231 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1232 1233 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1234 return; 1235 1236 mutex_lock(&dev_priv->rps.hw_lock); 1237 1238 adj = dev_priv->rps.last_adj; 1239 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { 1240 if (adj > 0) 1241 adj *= 2; 1242 else 1243 adj = 1; 1244 new_delay = dev_priv->rps.cur_freq + adj; 1245 1246 /* 1247 * For better performance, jump directly 1248 * to RPe if we're below it. 1249 */ 1250 if (new_delay < dev_priv->rps.efficient_freq) 1251 new_delay = dev_priv->rps.efficient_freq; 1252 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { 1253 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) 1254 new_delay = dev_priv->rps.efficient_freq; 1255 else 1256 new_delay = dev_priv->rps.min_freq_softlimit; 1257 adj = 0; 1258 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { 1259 if (adj < 0) 1260 adj *= 2; 1261 else 1262 adj = -1; 1263 new_delay = dev_priv->rps.cur_freq + adj; 1264 } else { /* unknown event */ 1265 new_delay = dev_priv->rps.cur_freq; 1266 } 1267 1268 /* sysfs frequency interfaces may have snuck in while servicing the 1269 * interrupt 1270 */ 1271 new_delay = clamp_t(int, new_delay, 1272 dev_priv->rps.min_freq_softlimit, 1273 dev_priv->rps.max_freq_softlimit); 1274 1275 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; 1276 1277 if (IS_VALLEYVIEW(dev_priv->dev)) 1278 valleyview_set_rps(dev_priv->dev, new_delay); 1279 else 1280 gen6_set_rps(dev_priv->dev, new_delay); 1281 1282 mutex_unlock(&dev_priv->rps.hw_lock); 1283 } 1284 1285 1286 /** 1287 * ivybridge_parity_work - Workqueue called when a parity error interrupt 1288 * occurred. 1289 * @work: workqueue struct 1290 * 1291 * Doesn't actually do anything except notify userspace. As a consequence of 1292 * this event, userspace should try to remap the bad rows since statistically 1293 * it is likely the same row is more likely to go bad again. 1294 */ 1295 static void ivybridge_parity_work(struct work_struct *work) 1296 { 1297 struct drm_i915_private *dev_priv = 1298 container_of(work, struct drm_i915_private, l3_parity.error_work); 1299 u32 error_status, row, bank, subbank; 1300 char *parity_event[6]; 1301 uint32_t misccpctl; 1302 uint8_t slice = 0; 1303 1304 /* We must turn off DOP level clock gating to access the L3 registers. 1305 * In order to prevent a get/put style interface, acquire struct mutex 1306 * any time we access those registers. 1307 */ 1308 mutex_lock(&dev_priv->dev->struct_mutex); 1309 1310 /* If we've screwed up tracking, just let the interrupt fire again */ 1311 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1312 goto out; 1313 1314 misccpctl = I915_READ(GEN7_MISCCPCTL); 1315 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); 1316 POSTING_READ(GEN7_MISCCPCTL); 1317 1318 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { 1319 u32 reg; 1320 1321 slice--; 1322 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) 1323 break; 1324 1325 dev_priv->l3_parity.which_slice &= ~(1<<slice); 1326 1327 reg = GEN7_L3CDERRST1 + (slice * 0x200); 1328 1329 error_status = I915_READ(reg); 1330 row = GEN7_PARITY_ERROR_ROW(error_status); 1331 bank = GEN7_PARITY_ERROR_BANK(error_status); 1332 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); 1333 1334 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); 1335 POSTING_READ(reg); 1336 1337 parity_event[0] = I915_L3_PARITY_UEVENT "=1"; 1338 parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row); 1339 parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank); 1340 parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank); 1341 parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice); 1342 parity_event[5] = NULL; 1343 1344 #if 0 1345 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1346 KOBJ_CHANGE, parity_event); 1347 #endif 1348 1349 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1350 slice, row, bank, subbank); 1351 1352 kfree(parity_event[4]); 1353 kfree(parity_event[3]); 1354 kfree(parity_event[2]); 1355 kfree(parity_event[1]); 1356 } 1357 1358 I915_WRITE(GEN7_MISCCPCTL, misccpctl); 1359 1360 out: 1361 WARN_ON(dev_priv->l3_parity.which_slice); 1362 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1363 ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1364 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1365 1366 mutex_unlock(&dev_priv->dev->struct_mutex); 1367 } 1368 1369 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) 1370 { 1371 struct drm_i915_private *dev_priv = dev->dev_private; 1372 1373 if (!HAS_L3_DPF(dev)) 1374 return; 1375 1376 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1377 ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); 1378 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1379 1380 iir &= GT_PARITY_ERROR(dev); 1381 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) 1382 dev_priv->l3_parity.which_slice |= 1 << 1; 1383 1384 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) 1385 dev_priv->l3_parity.which_slice |= 1 << 0; 1386 1387 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); 1388 } 1389 1390 static void ilk_gt_irq_handler(struct drm_device *dev, 1391 struct drm_i915_private *dev_priv, 1392 u32 gt_iir) 1393 { 1394 if (gt_iir & 1395 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1396 notify_ring(dev, &dev_priv->ring[RCS]); 1397 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1398 notify_ring(dev, &dev_priv->ring[VCS]); 1399 } 1400 1401 static void snb_gt_irq_handler(struct drm_device *dev, 1402 struct drm_i915_private *dev_priv, 1403 u32 gt_iir) 1404 { 1405 1406 if (gt_iir & 1407 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) 1408 notify_ring(dev, &dev_priv->ring[RCS]); 1409 if (gt_iir & GT_BSD_USER_INTERRUPT) 1410 notify_ring(dev, &dev_priv->ring[VCS]); 1411 if (gt_iir & GT_BLT_USER_INTERRUPT) 1412 notify_ring(dev, &dev_priv->ring[BCS]); 1413 1414 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1415 GT_BSD_CS_ERROR_INTERRUPT | 1416 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1417 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1418 gt_iir); 1419 } 1420 1421 if (gt_iir & GT_PARITY_ERROR(dev)) 1422 ivybridge_parity_error_irq_handler(dev, gt_iir); 1423 } 1424 1425 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1426 { 1427 if ((pm_iir & dev_priv->pm_rps_events) == 0) 1428 return; 1429 1430 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1431 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1432 bdw_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1433 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1434 1435 queue_work(dev_priv->wq, &dev_priv->rps.work); 1436 } 1437 1438 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1439 struct drm_i915_private *dev_priv, 1440 u32 master_ctl) 1441 { 1442 u32 rcs, bcs, vcs; 1443 uint32_t tmp = 0; 1444 1445 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1446 tmp = I915_READ(GEN8_GT_IIR(0)); 1447 if (tmp) { 1448 rcs = tmp >> GEN8_RCS_IRQ_SHIFT; 1449 bcs = tmp >> GEN8_BCS_IRQ_SHIFT; 1450 if (rcs & GT_RENDER_USER_INTERRUPT) 1451 notify_ring(dev, &dev_priv->ring[RCS]); 1452 if (bcs & GT_RENDER_USER_INTERRUPT) 1453 notify_ring(dev, &dev_priv->ring[BCS]); 1454 I915_WRITE(GEN8_GT_IIR(0), tmp); 1455 } else 1456 DRM_ERROR("The master control interrupt lied (GT0)!\n"); 1457 } 1458 1459 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1460 tmp = I915_READ(GEN8_GT_IIR(1)); 1461 if (tmp) { 1462 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; 1463 if (vcs & GT_RENDER_USER_INTERRUPT) 1464 notify_ring(dev, &dev_priv->ring[VCS]); 1465 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; 1466 if (vcs & GT_RENDER_USER_INTERRUPT) 1467 notify_ring(dev, &dev_priv->ring[VCS2]); 1468 I915_WRITE(GEN8_GT_IIR(1), tmp); 1469 } else 1470 DRM_ERROR("The master control interrupt lied (GT1)!\n"); 1471 } 1472 1473 if (master_ctl & GEN8_GT_PM_IRQ) { 1474 tmp = I915_READ(GEN8_GT_IIR(2)); 1475 if (tmp & dev_priv->pm_rps_events) { 1476 gen8_rps_irq_handler(dev_priv, tmp); 1477 I915_WRITE(GEN8_GT_IIR(2), 1478 tmp & dev_priv->pm_rps_events); 1479 } else 1480 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1481 } 1482 1483 if (master_ctl & GEN8_GT_VECS_IRQ) { 1484 tmp = I915_READ(GEN8_GT_IIR(3)); 1485 if (tmp) { 1486 vcs = tmp >> GEN8_VECS_IRQ_SHIFT; 1487 if (vcs & GT_RENDER_USER_INTERRUPT) 1488 notify_ring(dev, &dev_priv->ring[VECS]); 1489 I915_WRITE(GEN8_GT_IIR(3), tmp); 1490 } else 1491 DRM_ERROR("The master control interrupt lied (GT3)!\n"); 1492 } 1493 1494 } 1495 1496 #define HPD_STORM_DETECT_PERIOD 1000 1497 #define HPD_STORM_THRESHOLD 5 1498 1499 static inline void intel_hpd_irq_handler(struct drm_device *dev, 1500 u32 hotplug_trigger, 1501 const u32 *hpd) 1502 { 1503 struct drm_i915_private *dev_priv = dev->dev_private; 1504 int i; 1505 bool storm_detected = false; 1506 1507 if (!hotplug_trigger) 1508 return; 1509 1510 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", 1511 hotplug_trigger); 1512 1513 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1514 for (i = 1; i < HPD_NUM_PINS; i++) { 1515 1516 if (hpd[i] & hotplug_trigger && 1517 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { 1518 /* 1519 * On GMCH platforms the interrupt mask bits only 1520 * prevent irq generation, not the setting of the 1521 * hotplug bits itself. So only WARN about unexpected 1522 * interrupts on saner platforms. 1523 */ 1524 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), 1525 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", 1526 hotplug_trigger, i, hpd[i]); 1527 1528 continue; 1529 } 1530 1531 if (!(hpd[i] & hotplug_trigger) || 1532 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) 1533 continue; 1534 1535 dev_priv->hpd_event_bits |= (1 << i); 1536 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, 1537 dev_priv->hpd_stats[i].hpd_last_jiffies 1538 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { 1539 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; 1540 dev_priv->hpd_stats[i].hpd_cnt = 0; 1541 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); 1542 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { 1543 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; 1544 dev_priv->hpd_event_bits &= ~(1 << i); 1545 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); 1546 storm_detected = true; 1547 } else { 1548 dev_priv->hpd_stats[i].hpd_cnt++; 1549 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, 1550 dev_priv->hpd_stats[i].hpd_cnt); 1551 } 1552 } 1553 1554 if (storm_detected) 1555 dev_priv->display.hpd_irq_setup(dev); 1556 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1557 1558 /* 1559 * Our hotplug handler can grab modeset locks (by calling down into the 1560 * fb helpers). Hence it must not be run on our own dev-priv->wq work 1561 * queue for otherwise the flush_work in the pageflip code will 1562 * deadlock. 1563 */ 1564 schedule_work(&dev_priv->hotplug_work); 1565 } 1566 1567 static void gmbus_irq_handler(struct drm_device *dev) 1568 { 1569 struct drm_i915_private *dev_priv = dev->dev_private; 1570 1571 wake_up_all(&dev_priv->gmbus_wait_queue); 1572 } 1573 1574 static void dp_aux_irq_handler(struct drm_device *dev) 1575 { 1576 struct drm_i915_private *dev_priv = dev->dev_private; 1577 1578 wake_up_all(&dev_priv->gmbus_wait_queue); 1579 } 1580 1581 #if defined(CONFIG_DEBUG_FS) 1582 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1583 uint32_t crc0, uint32_t crc1, 1584 uint32_t crc2, uint32_t crc3, 1585 uint32_t crc4) 1586 { 1587 struct drm_i915_private *dev_priv = dev->dev_private; 1588 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1589 struct intel_pipe_crc_entry *entry; 1590 int head, tail; 1591 1592 spin_lock(&pipe_crc->lock); 1593 1594 if (!pipe_crc->entries) { 1595 spin_unlock(&pipe_crc->lock); 1596 DRM_ERROR("spurious interrupt\n"); 1597 return; 1598 } 1599 1600 head = pipe_crc->head; 1601 tail = pipe_crc->tail; 1602 1603 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { 1604 spin_unlock(&pipe_crc->lock); 1605 DRM_ERROR("CRC buffer overflowing\n"); 1606 return; 1607 } 1608 1609 entry = &pipe_crc->entries[head]; 1610 1611 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1612 entry->crc[0] = crc0; 1613 entry->crc[1] = crc1; 1614 entry->crc[2] = crc2; 1615 entry->crc[3] = crc3; 1616 entry->crc[4] = crc4; 1617 1618 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); 1619 pipe_crc->head = head; 1620 1621 spin_unlock(&pipe_crc->lock); 1622 1623 wake_up_interruptible(&pipe_crc->wq); 1624 } 1625 #else 1626 static inline void 1627 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe, 1628 uint32_t crc0, uint32_t crc1, 1629 uint32_t crc2, uint32_t crc3, 1630 uint32_t crc4) {} 1631 #endif 1632 1633 1634 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1635 { 1636 struct drm_i915_private *dev_priv = dev->dev_private; 1637 1638 display_pipe_crc_irq_handler(dev, pipe, 1639 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1640 0, 0, 0, 0); 1641 } 1642 1643 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1644 { 1645 struct drm_i915_private *dev_priv = dev->dev_private; 1646 1647 display_pipe_crc_irq_handler(dev, pipe, 1648 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1649 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1650 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1651 I915_READ(PIPE_CRC_RES_4_IVB(pipe)), 1652 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1653 } 1654 1655 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe) 1656 { 1657 struct drm_i915_private *dev_priv = dev->dev_private; 1658 uint32_t res1, res2; 1659 1660 if (INTEL_INFO(dev)->gen >= 3) 1661 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1662 else 1663 res1 = 0; 1664 1665 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1666 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1667 else 1668 res2 = 0; 1669 1670 display_pipe_crc_irq_handler(dev, pipe, 1671 I915_READ(PIPE_CRC_RES_RED(pipe)), 1672 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1673 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1674 res1, res2); 1675 } 1676 1677 /* The RPS events need forcewake, so we add them to a work queue and mask their 1678 * IMR bits until the work is done. Other interrupts can be processed without 1679 * the work queue. */ 1680 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1681 { 1682 if (pm_iir & dev_priv->pm_rps_events) { 1683 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1684 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1685 snb_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1686 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1687 1688 queue_work(dev_priv->wq, &dev_priv->rps.work); 1689 } 1690 1691 if (HAS_VEBOX(dev_priv->dev)) { 1692 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1693 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1694 1695 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1696 i915_handle_error(dev_priv->dev, false, 1697 "VEBOX CS error interrupt 0x%08x", 1698 pm_iir); 1699 } 1700 } 1701 } 1702 1703 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe) 1704 { 1705 struct intel_crtc *crtc; 1706 1707 if (!drm_handle_vblank(dev, pipe)) 1708 return false; 1709 1710 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); 1711 wake_up(&crtc->vbl_wait); 1712 1713 return true; 1714 } 1715 1716 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) 1717 { 1718 struct drm_i915_private *dev_priv = dev->dev_private; 1719 u32 pipe_stats[I915_MAX_PIPES] = { }; 1720 int pipe; 1721 1722 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1723 for_each_pipe(pipe) { 1724 int reg; 1725 u32 mask, iir_bit = 0; 1726 1727 /* 1728 * PIPESTAT bits get signalled even when the interrupt is 1729 * disabled with the mask bits, and some of the status bits do 1730 * not generate interrupts at all (like the underrun bit). Hence 1731 * we need to be careful that we only handle what we want to 1732 * handle. 1733 */ 1734 mask = 0; 1735 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1736 mask |= PIPE_FIFO_UNDERRUN_STATUS; 1737 1738 switch (pipe) { 1739 case PIPE_A: 1740 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; 1741 break; 1742 case PIPE_B: 1743 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 1744 break; 1745 case PIPE_C: 1746 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 1747 break; 1748 } 1749 if (iir & iir_bit) 1750 mask |= dev_priv->pipestat_irq_mask[pipe]; 1751 1752 if (!mask) 1753 continue; 1754 1755 reg = PIPESTAT(pipe); 1756 mask |= PIPESTAT_INT_ENABLE_MASK; 1757 pipe_stats[pipe] = I915_READ(reg) & mask; 1758 1759 /* 1760 * Clear the PIPE*STAT regs before the IIR 1761 */ 1762 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | 1763 PIPESTAT_INT_STATUS_MASK)) 1764 I915_WRITE(reg, pipe_stats[pipe]); 1765 } 1766 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1767 1768 for_each_pipe(pipe) { 1769 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) 1770 intel_pipe_handle_vblank(dev, pipe); 1771 1772 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1773 intel_prepare_page_flip(dev, pipe); 1774 intel_finish_page_flip(dev, pipe); 1775 } 1776 1777 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1778 i9xx_pipe_crc_irq_handler(dev, pipe); 1779 1780 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1781 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1782 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 1783 } 1784 1785 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1786 gmbus_irq_handler(dev); 1787 } 1788 1789 static void i9xx_hpd_irq_handler(struct drm_device *dev) 1790 { 1791 struct drm_i915_private *dev_priv = dev->dev_private; 1792 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 1793 1794 if (IS_G4X(dev)) { 1795 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1796 1797 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_g4x); 1798 } else { 1799 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1800 1801 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); 1802 } 1803 1804 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && 1805 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1806 dp_aux_irq_handler(dev); 1807 1808 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 1809 /* 1810 * Make sure hotplug status is cleared before we clear IIR, or else we 1811 * may miss hotplug events. 1812 */ 1813 POSTING_READ(PORT_HOTPLUG_STAT); 1814 } 1815 1816 static irqreturn_t valleyview_irq_handler(void *arg) 1817 { 1818 struct drm_device *dev = arg; 1819 struct drm_i915_private *dev_priv = dev->dev_private; 1820 u32 iir, gt_iir, pm_iir; 1821 1822 while (true) { 1823 iir = I915_READ(VLV_IIR); 1824 gt_iir = I915_READ(GTIIR); 1825 pm_iir = I915_READ(GEN6_PMIIR); 1826 1827 if (gt_iir == 0 && pm_iir == 0 && iir == 0) 1828 goto out; 1829 1830 snb_gt_irq_handler(dev, dev_priv, gt_iir); 1831 1832 valleyview_pipestat_irq_handler(dev, iir); 1833 1834 /* Consume port. Then clear IIR or we'll miss events */ 1835 if (iir & I915_DISPLAY_PORT_INTERRUPT) 1836 i9xx_hpd_irq_handler(dev); 1837 1838 if (pm_iir) 1839 gen6_rps_irq_handler(dev_priv, pm_iir); 1840 1841 I915_WRITE(GTIIR, gt_iir); 1842 I915_WRITE(GEN6_PMIIR, pm_iir); 1843 I915_WRITE(VLV_IIR, iir); 1844 } 1845 1846 out: 1847 return; 1848 } 1849 1850 static irqreturn_t cherryview_irq_handler(void *arg) 1851 { 1852 struct drm_device *dev = arg; 1853 struct drm_i915_private *dev_priv = dev->dev_private; 1854 u32 master_ctl, iir; 1855 1856 for (;;) { 1857 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; 1858 iir = I915_READ(VLV_IIR); 1859 1860 if (master_ctl == 0 && iir == 0) 1861 break; 1862 1863 I915_WRITE(GEN8_MASTER_IRQ, 0); 1864 1865 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 1866 1867 valleyview_pipestat_irq_handler(dev, iir); 1868 1869 /* Consume port. Then clear IIR or we'll miss events */ 1870 i9xx_hpd_irq_handler(dev); 1871 1872 I915_WRITE(VLV_IIR, iir); 1873 1874 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 1875 POSTING_READ(GEN8_MASTER_IRQ); 1876 } 1877 } 1878 1879 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1880 { 1881 struct drm_i915_private *dev_priv = dev->dev_private; 1882 int pipe; 1883 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1884 1885 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1886 1887 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1888 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1889 SDE_AUDIO_POWER_SHIFT); 1890 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 1891 port_name(port)); 1892 } 1893 1894 if (pch_iir & SDE_AUX_MASK) 1895 dp_aux_irq_handler(dev); 1896 1897 if (pch_iir & SDE_GMBUS) 1898 gmbus_irq_handler(dev); 1899 1900 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1901 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1902 1903 if (pch_iir & SDE_AUDIO_TRANS_MASK) 1904 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 1905 1906 if (pch_iir & SDE_POISON) 1907 DRM_ERROR("PCH poison interrupt\n"); 1908 1909 if (pch_iir & SDE_FDI_MASK) 1910 for_each_pipe(pipe) 1911 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 1912 pipe_name(pipe), 1913 I915_READ(FDI_RX_IIR(pipe))); 1914 1915 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 1916 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 1917 1918 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 1919 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1920 1921 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1922 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1923 false)) 1924 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1925 1926 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1927 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1928 false)) 1929 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1930 } 1931 1932 static void ivb_err_int_handler(struct drm_device *dev) 1933 { 1934 struct drm_i915_private *dev_priv = dev->dev_private; 1935 u32 err_int = I915_READ(GEN7_ERR_INT); 1936 enum i915_pipe pipe; 1937 1938 if (err_int & ERR_INT_POISON) 1939 DRM_ERROR("Poison interrupt\n"); 1940 1941 for_each_pipe(pipe) { 1942 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 1943 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 1944 false)) 1945 DRM_ERROR("Pipe %c FIFO underrun\n", 1946 pipe_name(pipe)); 1947 } 1948 1949 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1950 if (IS_IVYBRIDGE(dev)) 1951 ivb_pipe_crc_irq_handler(dev, pipe); 1952 else 1953 hsw_pipe_crc_irq_handler(dev, pipe); 1954 } 1955 } 1956 1957 I915_WRITE(GEN7_ERR_INT, err_int); 1958 } 1959 1960 static void cpt_serr_int_handler(struct drm_device *dev) 1961 { 1962 struct drm_i915_private *dev_priv = dev->dev_private; 1963 u32 serr_int = I915_READ(SERR_INT); 1964 1965 if (serr_int & SERR_INT_POISON) 1966 DRM_ERROR("PCH poison interrupt\n"); 1967 1968 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 1969 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1970 false)) 1971 DRM_ERROR("PCH transcoder A FIFO underrun\n"); 1972 1973 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 1974 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1975 false)) 1976 DRM_ERROR("PCH transcoder B FIFO underrun\n"); 1977 1978 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 1979 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 1980 false)) 1981 DRM_ERROR("PCH transcoder C FIFO underrun\n"); 1982 1983 I915_WRITE(SERR_INT, serr_int); 1984 } 1985 1986 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 1987 { 1988 struct drm_i915_private *dev_priv = dev->dev_private; 1989 int pipe; 1990 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 1991 1992 intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 1993 1994 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 1995 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 1996 SDE_AUDIO_POWER_SHIFT_CPT); 1997 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", 1998 port_name(port)); 1999 } 2000 2001 if (pch_iir & SDE_AUX_MASK_CPT) 2002 dp_aux_irq_handler(dev); 2003 2004 if (pch_iir & SDE_GMBUS_CPT) 2005 gmbus_irq_handler(dev); 2006 2007 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2008 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2009 2010 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 2011 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 2012 2013 if (pch_iir & SDE_FDI_MASK_CPT) 2014 for_each_pipe(pipe) 2015 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 2016 pipe_name(pipe), 2017 I915_READ(FDI_RX_IIR(pipe))); 2018 2019 if (pch_iir & SDE_ERROR_CPT) 2020 cpt_serr_int_handler(dev); 2021 } 2022 2023 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2024 { 2025 struct drm_i915_private *dev_priv = dev->dev_private; 2026 enum i915_pipe pipe; 2027 2028 if (de_iir & DE_AUX_CHANNEL_A) 2029 dp_aux_irq_handler(dev); 2030 2031 if (de_iir & DE_GSE) 2032 intel_opregion_asle_intr(dev); 2033 2034 if (de_iir & DE_POISON) 2035 DRM_ERROR("Poison interrupt\n"); 2036 2037 for_each_pipe(pipe) { 2038 if (de_iir & DE_PIPE_VBLANK(pipe)) 2039 intel_pipe_handle_vblank(dev, pipe); 2040 2041 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2042 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2043 DRM_ERROR("Pipe %c FIFO underrun\n", 2044 pipe_name(pipe)); 2045 2046 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2047 i9xx_pipe_crc_irq_handler(dev, pipe); 2048 2049 /* plane/pipes map 1:1 on ilk+ */ 2050 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2051 intel_prepare_page_flip(dev, pipe); 2052 intel_finish_page_flip_plane(dev, pipe); 2053 } 2054 } 2055 2056 /* check event from PCH */ 2057 if (de_iir & DE_PCH_EVENT) { 2058 u32 pch_iir = I915_READ(SDEIIR); 2059 2060 if (HAS_PCH_CPT(dev)) 2061 cpt_irq_handler(dev, pch_iir); 2062 else 2063 ibx_irq_handler(dev, pch_iir); 2064 2065 /* should clear PCH hotplug event before clear CPU irq */ 2066 I915_WRITE(SDEIIR, pch_iir); 2067 } 2068 2069 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2070 ironlake_rps_change_irq_handler(dev); 2071 } 2072 2073 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2074 { 2075 struct drm_i915_private *dev_priv = dev->dev_private; 2076 enum i915_pipe pipe; 2077 2078 if (de_iir & DE_ERR_INT_IVB) 2079 ivb_err_int_handler(dev); 2080 2081 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2082 dp_aux_irq_handler(dev); 2083 2084 if (de_iir & DE_GSE_IVB) 2085 intel_opregion_asle_intr(dev); 2086 2087 for_each_pipe(pipe) { 2088 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe))) 2089 intel_pipe_handle_vblank(dev, pipe); 2090 2091 /* plane/pipes map 1:1 on ilk+ */ 2092 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2093 intel_prepare_page_flip(dev, pipe); 2094 intel_finish_page_flip_plane(dev, pipe); 2095 } 2096 } 2097 2098 /* check event from PCH */ 2099 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2100 u32 pch_iir = I915_READ(SDEIIR); 2101 2102 cpt_irq_handler(dev, pch_iir); 2103 2104 /* clear PCH hotplug event before clear CPU irq */ 2105 I915_WRITE(SDEIIR, pch_iir); 2106 } 2107 } 2108 2109 static irqreturn_t ironlake_irq_handler(void *arg) 2110 { 2111 struct drm_device *dev = arg; 2112 struct drm_i915_private *dev_priv = dev->dev_private; 2113 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2114 2115 /* We get interrupts on unclaimed registers, so check for this before we 2116 * do any I915_{READ,WRITE}. */ 2117 intel_uncore_check_errors(dev); 2118 2119 /* disable master interrupt before clearing iir */ 2120 de_ier = I915_READ(DEIER); 2121 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 2122 POSTING_READ(DEIER); 2123 2124 /* Disable south interrupts. We'll only write to SDEIIR once, so further 2125 * interrupts will will be stored on its back queue, and then we'll be 2126 * able to process them after we restore SDEIER (as soon as we restore 2127 * it, we'll get an interrupt if SDEIIR still has something to process 2128 * due to its back queue). */ 2129 if (!HAS_PCH_NOP(dev)) { 2130 sde_ier = I915_READ(SDEIER); 2131 I915_WRITE(SDEIER, 0); 2132 POSTING_READ(SDEIER); 2133 } 2134 2135 gt_iir = I915_READ(GTIIR); 2136 if (gt_iir) { 2137 if (INTEL_INFO(dev)->gen >= 6) 2138 snb_gt_irq_handler(dev, dev_priv, gt_iir); 2139 else 2140 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 2141 I915_WRITE(GTIIR, gt_iir); 2142 } 2143 2144 de_iir = I915_READ(DEIIR); 2145 if (de_iir) { 2146 if (INTEL_INFO(dev)->gen >= 7) 2147 ivb_display_irq_handler(dev, de_iir); 2148 else 2149 ilk_display_irq_handler(dev, de_iir); 2150 I915_WRITE(DEIIR, de_iir); 2151 } 2152 2153 if (INTEL_INFO(dev)->gen >= 6) { 2154 u32 pm_iir = I915_READ(GEN6_PMIIR); 2155 if (pm_iir) { 2156 gen6_rps_irq_handler(dev_priv, pm_iir); 2157 I915_WRITE(GEN6_PMIIR, pm_iir); 2158 } 2159 } 2160 2161 I915_WRITE(DEIER, de_ier); 2162 POSTING_READ(DEIER); 2163 if (!HAS_PCH_NOP(dev)) { 2164 I915_WRITE(SDEIER, sde_ier); 2165 POSTING_READ(SDEIER); 2166 } 2167 2168 } 2169 2170 static irqreturn_t gen8_irq_handler(void *arg) 2171 { 2172 struct drm_device *dev = arg; 2173 struct drm_i915_private *dev_priv = dev->dev_private; 2174 u32 master_ctl; 2175 uint32_t tmp = 0; 2176 enum i915_pipe pipe; 2177 2178 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2179 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2180 if (!master_ctl) 2181 return; 2182 2183 I915_WRITE(GEN8_MASTER_IRQ, 0); 2184 POSTING_READ(GEN8_MASTER_IRQ); 2185 2186 gen8_gt_irq_handler(dev, dev_priv, master_ctl); 2187 2188 if (master_ctl & GEN8_DE_MISC_IRQ) { 2189 tmp = I915_READ(GEN8_DE_MISC_IIR); 2190 if (tmp & GEN8_DE_MISC_GSE) 2191 intel_opregion_asle_intr(dev); 2192 else if (tmp) 2193 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2194 else 2195 DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); 2196 2197 if (tmp) { 2198 I915_WRITE(GEN8_DE_MISC_IIR, tmp); 2199 } 2200 } 2201 2202 if (master_ctl & GEN8_DE_PORT_IRQ) { 2203 tmp = I915_READ(GEN8_DE_PORT_IIR); 2204 if (tmp & GEN8_AUX_CHANNEL_A) 2205 dp_aux_irq_handler(dev); 2206 else if (tmp) 2207 DRM_ERROR("Unexpected DE Port interrupt\n"); 2208 else 2209 DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); 2210 2211 if (tmp) { 2212 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2213 } 2214 } 2215 2216 for_each_pipe(pipe) { 2217 uint32_t pipe_iir; 2218 2219 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2220 continue; 2221 2222 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); 2223 if (pipe_iir & GEN8_PIPE_VBLANK) 2224 intel_pipe_handle_vblank(dev, pipe); 2225 2226 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2227 intel_prepare_page_flip(dev, pipe); 2228 intel_finish_page_flip_plane(dev, pipe); 2229 } 2230 2231 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2232 hsw_pipe_crc_irq_handler(dev, pipe); 2233 2234 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2235 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2236 false)) 2237 DRM_ERROR("Pipe %c FIFO underrun\n", 2238 pipe_name(pipe)); 2239 } 2240 2241 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2242 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2243 pipe_name(pipe), 2244 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2245 } 2246 2247 if (pipe_iir) { 2248 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2249 } else 2250 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2251 } 2252 2253 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { 2254 /* 2255 * FIXME(BDW): Assume for now that the new interrupt handling 2256 * scheme also closed the SDE interrupt handling race we've seen 2257 * on older pch-split platforms. But this needs testing. 2258 */ 2259 u32 pch_iir = I915_READ(SDEIIR); 2260 2261 cpt_irq_handler(dev, pch_iir); 2262 2263 if (pch_iir) { 2264 I915_WRITE(SDEIIR, pch_iir); 2265 } 2266 } 2267 2268 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2269 POSTING_READ(GEN8_MASTER_IRQ); 2270 2271 } 2272 2273 static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2274 bool reset_completed) 2275 { 2276 struct intel_engine_cs *ring; 2277 int i; 2278 2279 /* 2280 * Notify all waiters for GPU completion events that reset state has 2281 * been changed, and that they need to restart their wait after 2282 * checking for potential errors (and bail out to drop locks if there is 2283 * a gpu reset pending so that i915_error_work_func can acquire them). 2284 */ 2285 2286 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2287 for_each_ring(ring, dev_priv, i) 2288 wake_up_all(&ring->irq_queue); 2289 2290 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2291 wake_up_all(&dev_priv->pending_flip_queue); 2292 2293 /* 2294 * Signal tasks blocked in i915_gem_wait_for_error that the pending 2295 * reset state is cleared. 2296 */ 2297 if (reset_completed) 2298 wake_up_all(&dev_priv->gpu_error.reset_queue); 2299 } 2300 2301 /** 2302 * i915_error_work_func - do process context error handling work 2303 * @work: work struct 2304 * 2305 * Fire an error uevent so userspace can see that a hang or error 2306 * was detected. 2307 */ 2308 static void i915_error_work_func(struct work_struct *work) 2309 { 2310 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, 2311 work); 2312 struct drm_i915_private *dev_priv = 2313 container_of(error, struct drm_i915_private, gpu_error); 2314 struct drm_device *dev = dev_priv->dev; 2315 #if 0 2316 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2317 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2318 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2319 #endif 2320 int ret; 2321 2322 /* kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); */ 2323 2324 /* 2325 * Note that there's only one work item which does gpu resets, so we 2326 * need not worry about concurrent gpu resets potentially incrementing 2327 * error->reset_counter twice. We only need to take care of another 2328 * racing irq/hangcheck declaring the gpu dead for a second time. A 2329 * quick check for that is good enough: schedule_work ensures the 2330 * correct ordering between hang detection and this work item, and since 2331 * the reset in-progress bit is only ever set by code outside of this 2332 * work we don't need to worry about any other races. 2333 */ 2334 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { 2335 DRM_DEBUG_DRIVER("resetting chip\n"); 2336 #if 0 2337 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2338 reset_event); 2339 #endif 2340 2341 /* 2342 * In most cases it's guaranteed that we get here with an RPM 2343 * reference held, for example because there is a pending GPU 2344 * request that won't finish until the reset is done. This 2345 * isn't the case at least when we get here by doing a 2346 * simulated reset via debugs, so get an RPM reference. 2347 */ 2348 intel_runtime_pm_get(dev_priv); 2349 /* 2350 * All state reset _must_ be completed before we update the 2351 * reset counter, for otherwise waiters might miss the reset 2352 * pending state and not properly drop locks, resulting in 2353 * deadlocks with the reset work. 2354 */ 2355 ret = i915_reset(dev); 2356 2357 intel_display_handle_reset(dev); 2358 2359 intel_runtime_pm_put(dev_priv); 2360 2361 if (ret == 0) { 2362 /* 2363 * After all the gem state is reset, increment the reset 2364 * counter and wake up everyone waiting for the reset to 2365 * complete. 2366 * 2367 * Since unlock operations are a one-sided barrier only, 2368 * we need to insert a barrier here to order any seqno 2369 * updates before 2370 * the counter increment. 2371 */ 2372 smp_mb__before_atomic(); 2373 atomic_inc(&dev_priv->gpu_error.reset_counter); 2374 2375 #if 0 2376 kobject_uevent_env(&dev->primary->kdev->kobj, 2377 KOBJ_CHANGE, reset_done_event); 2378 #endif 2379 } else { 2380 atomic_set_mask(I915_WEDGED, &error->reset_counter); 2381 } 2382 2383 /* 2384 * Note: The wake_up also serves as a memory barrier so that 2385 * waiters see the update value of the reset counter atomic_t. 2386 */ 2387 i915_error_wake_up(dev_priv, true); 2388 } 2389 } 2390 2391 static void i915_report_and_clear_eir(struct drm_device *dev) 2392 { 2393 struct drm_i915_private *dev_priv = dev->dev_private; 2394 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2395 u32 eir = I915_READ(EIR); 2396 int pipe, i; 2397 2398 if (!eir) 2399 return; 2400 2401 pr_err("render error detected, EIR: 0x%08x\n", eir); 2402 2403 #if 0 2404 i915_get_extra_instdone(dev, instdone); 2405 #endif 2406 2407 if (IS_G4X(dev)) { 2408 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2409 u32 ipeir = I915_READ(IPEIR_I965); 2410 2411 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2412 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2413 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2414 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2415 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2416 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2417 I915_WRITE(IPEIR_I965, ipeir); 2418 POSTING_READ(IPEIR_I965); 2419 } 2420 if (eir & GM45_ERROR_PAGE_TABLE) { 2421 u32 pgtbl_err = I915_READ(PGTBL_ER); 2422 pr_err("page table error\n"); 2423 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2424 I915_WRITE(PGTBL_ER, pgtbl_err); 2425 POSTING_READ(PGTBL_ER); 2426 } 2427 } 2428 2429 if (!IS_GEN2(dev)) { 2430 if (eir & I915_ERROR_PAGE_TABLE) { 2431 u32 pgtbl_err = I915_READ(PGTBL_ER); 2432 pr_err("page table error\n"); 2433 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); 2434 I915_WRITE(PGTBL_ER, pgtbl_err); 2435 POSTING_READ(PGTBL_ER); 2436 } 2437 } 2438 2439 if (eir & I915_ERROR_MEMORY_REFRESH) { 2440 pr_err("memory refresh error:\n"); 2441 for_each_pipe(pipe) 2442 pr_err("pipe %c stat: 0x%08x\n", 2443 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 2444 /* pipestat has already been acked */ 2445 } 2446 if (eir & I915_ERROR_INSTRUCTION) { 2447 pr_err("instruction error\n"); 2448 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2449 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2450 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2451 if (INTEL_INFO(dev)->gen < 4) { 2452 u32 ipeir = I915_READ(IPEIR); 2453 2454 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2455 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); 2456 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); 2457 I915_WRITE(IPEIR, ipeir); 2458 POSTING_READ(IPEIR); 2459 } else { 2460 u32 ipeir = I915_READ(IPEIR_I965); 2461 2462 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); 2463 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); 2464 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); 2465 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); 2466 I915_WRITE(IPEIR_I965, ipeir); 2467 POSTING_READ(IPEIR_I965); 2468 } 2469 } 2470 2471 I915_WRITE(EIR, eir); 2472 POSTING_READ(EIR); 2473 eir = I915_READ(EIR); 2474 if (eir) { 2475 /* 2476 * some errors might have become stuck, 2477 * mask them. 2478 */ 2479 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 2480 I915_WRITE(EMR, I915_READ(EMR) | eir); 2481 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 2482 } 2483 } 2484 2485 /** 2486 * i915_handle_error - handle an error interrupt 2487 * @dev: drm device 2488 * 2489 * Do some basic checking of regsiter state at error interrupt time and 2490 * dump it to the syslog. Also call i915_capture_error_state() to make 2491 * sure we get a record and make it available in debugfs. Fire a uevent 2492 * so userspace knows something bad happened (should trigger collection 2493 * of a ring dump etc.). 2494 */ 2495 void i915_handle_error(struct drm_device *dev, bool wedged, 2496 const char *fmt, ...) 2497 { 2498 struct drm_i915_private *dev_priv = dev->dev_private; 2499 #if 0 2500 va_list args; 2501 char error_msg[80]; 2502 2503 va_start(args, fmt); 2504 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2505 va_end(args); 2506 2507 i915_capture_error_state(dev, wedged, error_msg); 2508 #endif 2509 i915_report_and_clear_eir(dev); 2510 2511 if (wedged) { 2512 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, 2513 &dev_priv->gpu_error.reset_counter); 2514 2515 /* 2516 * Wakeup waiting processes so that the reset work function 2517 * i915_error_work_func doesn't deadlock trying to grab various 2518 * locks. By bumping the reset counter first, the woken 2519 * processes will see a reset in progress and back off, 2520 * releasing their locks and then wait for the reset completion. 2521 * We must do this for _all_ gpu waiters that might hold locks 2522 * that the reset work needs to acquire. 2523 * 2524 * Note: The wake_up serves as the required memory barrier to 2525 * ensure that the waiters see the updated value of the reset 2526 * counter atomic_t. 2527 */ 2528 i915_error_wake_up(dev_priv, false); 2529 } 2530 2531 /* 2532 * Our reset work can grab modeset locks (since it needs to reset the 2533 * state of outstanding pagelips). Hence it must not be run on our own 2534 * dev-priv->wq work queue for otherwise the flush_work in the pageflip 2535 * code will deadlock. 2536 */ 2537 schedule_work(&dev_priv->gpu_error.work); 2538 } 2539 2540 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) 2541 { 2542 struct drm_i915_private *dev_priv = dev->dev_private; 2543 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 2544 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2545 struct drm_i915_gem_object *obj; 2546 struct intel_unpin_work *work; 2547 bool stall_detected; 2548 2549 /* Ignore early vblank irqs */ 2550 if (intel_crtc == NULL) 2551 return; 2552 2553 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 2554 work = intel_crtc->unpin_work; 2555 2556 if (work == NULL || 2557 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || 2558 !work->enable_stall_check) { 2559 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 2560 lockmgr(&dev->event_lock, LK_RELEASE); 2561 return; 2562 } 2563 2564 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 2565 obj = work->pending_flip_obj; 2566 if (INTEL_INFO(dev)->gen >= 4) { 2567 int dspsurf = DSPSURF(intel_crtc->plane); 2568 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == 2569 i915_gem_obj_ggtt_offset(obj); 2570 } else { 2571 int dspaddr = DSPADDR(intel_crtc->plane); 2572 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + 2573 crtc->y * crtc->primary->fb->pitches[0] + 2574 crtc->x * crtc->primary->fb->bits_per_pixel/8); 2575 } 2576 2577 lockmgr(&dev->event_lock, LK_RELEASE); 2578 2579 if (stall_detected) { 2580 DRM_DEBUG_DRIVER("Pageflip stall detected\n"); 2581 intel_prepare_page_flip(dev, intel_crtc->plane); 2582 } 2583 } 2584 2585 /* Called from drm generic code, passed 'crtc' which 2586 * we use as a pipe index 2587 */ 2588 static int i915_enable_vblank(struct drm_device *dev, int pipe) 2589 { 2590 struct drm_i915_private *dev_priv = dev->dev_private; 2591 2592 if (!i915_pipe_enabled(dev, pipe)) 2593 return -EINVAL; 2594 2595 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2596 if (INTEL_INFO(dev)->gen >= 4) 2597 i915_enable_pipestat(dev_priv, pipe, 2598 PIPE_START_VBLANK_INTERRUPT_STATUS); 2599 else 2600 i915_enable_pipestat(dev_priv, pipe, 2601 PIPE_VBLANK_INTERRUPT_STATUS); 2602 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2603 2604 return 0; 2605 } 2606 2607 static int ironlake_enable_vblank(struct drm_device *dev, int pipe) 2608 { 2609 struct drm_i915_private *dev_priv = dev->dev_private; 2610 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2611 DE_PIPE_VBLANK(pipe); 2612 2613 if (!i915_pipe_enabled(dev, pipe)) 2614 return -EINVAL; 2615 2616 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2617 ironlake_enable_display_irq(dev_priv, bit); 2618 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2619 2620 return 0; 2621 } 2622 2623 static int valleyview_enable_vblank(struct drm_device *dev, int pipe) 2624 { 2625 struct drm_i915_private *dev_priv = dev->dev_private; 2626 2627 if (!i915_pipe_enabled(dev, pipe)) 2628 return -EINVAL; 2629 2630 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2631 i915_enable_pipestat(dev_priv, pipe, 2632 PIPE_START_VBLANK_INTERRUPT_STATUS); 2633 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2634 2635 return 0; 2636 } 2637 2638 static int gen8_enable_vblank(struct drm_device *dev, int pipe) 2639 { 2640 struct drm_i915_private *dev_priv = dev->dev_private; 2641 2642 if (!i915_pipe_enabled(dev, pipe)) 2643 return -EINVAL; 2644 2645 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2646 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; 2647 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2648 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2649 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2650 return 0; 2651 } 2652 2653 /* Called from drm generic code, passed 'crtc' which 2654 * we use as a pipe index 2655 */ 2656 static void i915_disable_vblank(struct drm_device *dev, int pipe) 2657 { 2658 struct drm_i915_private *dev_priv = dev->dev_private; 2659 2660 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2661 i915_disable_pipestat(dev_priv, pipe, 2662 PIPE_VBLANK_INTERRUPT_STATUS | 2663 PIPE_START_VBLANK_INTERRUPT_STATUS); 2664 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2665 } 2666 2667 static void ironlake_disable_vblank(struct drm_device *dev, int pipe) 2668 { 2669 struct drm_i915_private *dev_priv = dev->dev_private; 2670 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2671 DE_PIPE_VBLANK(pipe); 2672 2673 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2674 ironlake_disable_display_irq(dev_priv, bit); 2675 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2676 } 2677 2678 static void valleyview_disable_vblank(struct drm_device *dev, int pipe) 2679 { 2680 struct drm_i915_private *dev_priv = dev->dev_private; 2681 2682 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2683 i915_disable_pipestat(dev_priv, pipe, 2684 PIPE_START_VBLANK_INTERRUPT_STATUS); 2685 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2686 } 2687 2688 static void gen8_disable_vblank(struct drm_device *dev, int pipe) 2689 { 2690 struct drm_i915_private *dev_priv = dev->dev_private; 2691 2692 if (!i915_pipe_enabled(dev, pipe)) 2693 return; 2694 2695 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 2696 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; 2697 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); 2698 POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); 2699 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 2700 } 2701 2702 static u32 2703 ring_last_seqno(struct intel_engine_cs *ring) 2704 { 2705 return list_entry(ring->request_list.prev, 2706 struct drm_i915_gem_request, list)->seqno; 2707 } 2708 2709 static bool 2710 ring_idle(struct intel_engine_cs *ring, u32 seqno) 2711 { 2712 return (list_empty(&ring->request_list) || 2713 i915_seqno_passed(seqno, ring_last_seqno(ring))); 2714 } 2715 2716 static bool 2717 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2718 { 2719 if (INTEL_INFO(dev)->gen >= 8) { 2720 /* 2721 * FIXME: gen8 semaphore support - currently we don't emit 2722 * semaphores on bdw anyway, but this needs to be addressed when 2723 * we merge that code. 2724 */ 2725 return false; 2726 } else { 2727 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2728 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | 2729 MI_SEMAPHORE_REGISTER); 2730 } 2731 } 2732 2733 static struct intel_engine_cs * 2734 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr) 2735 { 2736 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2737 struct intel_engine_cs *signaller; 2738 int i; 2739 2740 if (INTEL_INFO(dev_priv->dev)->gen >= 8) { 2741 /* 2742 * FIXME: gen8 semaphore support - currently we don't emit 2743 * semaphores on bdw anyway, but this needs to be addressed when 2744 * we merge that code. 2745 */ 2746 return NULL; 2747 } else { 2748 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; 2749 2750 for_each_ring(signaller, dev_priv, i) { 2751 if(ring == signaller) 2752 continue; 2753 2754 if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) 2755 return signaller; 2756 } 2757 } 2758 2759 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x\n", 2760 ring->id, ipehr); 2761 2762 return NULL; 2763 } 2764 2765 static struct intel_engine_cs * 2766 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) 2767 { 2768 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2769 u32 cmd, ipehr, head; 2770 int i; 2771 2772 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2773 if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) 2774 return NULL; 2775 2776 /* 2777 * HEAD is likely pointing to the dword after the actual command, 2778 * so scan backwards until we find the MBOX. But limit it to just 3 2779 * dwords. Note that we don't care about ACTHD here since that might 2780 * point at at batch, and semaphores are always emitted into the 2781 * ringbuffer itself. 2782 */ 2783 head = I915_READ_HEAD(ring) & HEAD_ADDR; 2784 2785 for (i = 4; i; --i) { 2786 /* 2787 * Be paranoid and presume the hw has gone off into the wild - 2788 * our ring is smaller than what the hardware (and hence 2789 * HEAD_ADDR) allows. Also handles wrap-around. 2790 */ 2791 head &= ring->buffer->size - 1; 2792 2793 /* This here seems to blow up */ 2794 cmd = ioread32(ring->buffer->virtual_start + head); 2795 if (cmd == ipehr) 2796 break; 2797 2798 head -= 4; 2799 } 2800 2801 if (!i) 2802 return NULL; 2803 2804 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; 2805 return semaphore_wait_to_signaller_ring(ring, ipehr); 2806 } 2807 2808 static int semaphore_passed(struct intel_engine_cs *ring) 2809 { 2810 struct drm_i915_private *dev_priv = ring->dev->dev_private; 2811 struct intel_engine_cs *signaller; 2812 u32 seqno; 2813 2814 ring->hangcheck.deadlock++; 2815 2816 signaller = semaphore_waits_for(ring, &seqno); 2817 if (signaller == NULL) 2818 return -1; 2819 2820 /* Prevent pathological recursion due to driver bugs */ 2821 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) 2822 return -1; 2823 2824 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) 2825 return 1; 2826 2827 /* cursory check for an unkickable deadlock */ 2828 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && 2829 semaphore_passed(signaller) < 0) 2830 return -1; 2831 2832 return 0; 2833 } 2834 2835 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) 2836 { 2837 struct intel_engine_cs *ring; 2838 int i; 2839 2840 for_each_ring(ring, dev_priv, i) 2841 ring->hangcheck.deadlock = 0; 2842 } 2843 2844 static enum intel_ring_hangcheck_action 2845 ring_stuck(struct intel_engine_cs *ring, u64 acthd) 2846 { 2847 struct drm_device *dev = ring->dev; 2848 struct drm_i915_private *dev_priv = dev->dev_private; 2849 u32 tmp; 2850 2851 if (ring->hangcheck.acthd != acthd) 2852 return HANGCHECK_ACTIVE; 2853 2854 if (IS_GEN2(dev)) 2855 return HANGCHECK_HUNG; 2856 2857 /* Is the chip hanging on a WAIT_FOR_EVENT? 2858 * If so we can simply poke the RB_WAIT bit 2859 * and break the hang. This should work on 2860 * all but the second generation chipsets. 2861 */ 2862 tmp = I915_READ_CTL(ring); 2863 if (tmp & RING_WAIT) { 2864 i915_handle_error(dev, false, 2865 "Kicking stuck wait on %s", 2866 ring->name); 2867 I915_WRITE_CTL(ring, tmp); 2868 return HANGCHECK_KICK; 2869 } 2870 2871 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 2872 switch (semaphore_passed(ring)) { 2873 default: 2874 return HANGCHECK_HUNG; 2875 case 1: 2876 i915_handle_error(dev, false, 2877 "Kicking stuck semaphore on %s", 2878 ring->name); 2879 I915_WRITE_CTL(ring, tmp); 2880 return HANGCHECK_KICK; 2881 case 0: 2882 return HANGCHECK_WAIT; 2883 } 2884 } 2885 2886 return HANGCHECK_HUNG; 2887 } 2888 2889 /** 2890 * This is called when the chip hasn't reported back with completed 2891 * batchbuffers in a long time. We keep track per ring seqno progress and 2892 * if there are no progress, hangcheck score for that ring is increased. 2893 * Further, acthd is inspected to see if the ring is stuck. On stuck case 2894 * we kick the ring. If we see no progress on three subsequent calls 2895 * we assume chip is wedged and try to fix it by resetting the chip. 2896 */ 2897 static void i915_hangcheck_elapsed(unsigned long data) 2898 { 2899 struct drm_device *dev = (struct drm_device *)data; 2900 struct drm_i915_private *dev_priv = dev->dev_private; 2901 struct intel_engine_cs *ring; 2902 int i; 2903 int busy_count = 0, rings_hung = 0; 2904 bool stuck[I915_NUM_RINGS] = { 0 }; 2905 #define BUSY 1 2906 #define KICK 5 2907 #define HUNG 20 2908 2909 if (!i915.enable_hangcheck) 2910 return; 2911 2912 for_each_ring(ring, dev_priv, i) { 2913 u64 acthd; 2914 u32 seqno; 2915 bool busy = true; 2916 2917 semaphore_clear_deadlocks(dev_priv); 2918 2919 seqno = ring->get_seqno(ring, false); 2920 acthd = intel_ring_get_active_head(ring); 2921 2922 if (ring->hangcheck.seqno == seqno) { 2923 if (ring_idle(ring, seqno)) { 2924 ring->hangcheck.action = HANGCHECK_IDLE; 2925 2926 if (waitqueue_active(&ring->irq_queue)) { 2927 /* Issue a wake-up to catch stuck h/w. */ 2928 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { 2929 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) 2930 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 2931 ring->name); 2932 else 2933 DRM_INFO("Fake missed irq on %s\n", 2934 ring->name); 2935 wake_up_all(&ring->irq_queue); 2936 } 2937 /* Safeguard against driver failure */ 2938 ring->hangcheck.score += BUSY; 2939 } else 2940 busy = false; 2941 } else { 2942 /* We always increment the hangcheck score 2943 * if the ring is busy and still processing 2944 * the same request, so that no single request 2945 * can run indefinitely (such as a chain of 2946 * batches). The only time we do not increment 2947 * the hangcheck score on this ring, if this 2948 * ring is in a legitimate wait for another 2949 * ring. In that case the waiting ring is a 2950 * victim and we want to be sure we catch the 2951 * right culprit. Then every time we do kick 2952 * the ring, add a small increment to the 2953 * score so that we can catch a batch that is 2954 * being repeatedly kicked and so responsible 2955 * for stalling the machine. 2956 */ 2957 ring->hangcheck.action = ring_stuck(ring, 2958 acthd); 2959 2960 switch (ring->hangcheck.action) { 2961 case HANGCHECK_IDLE: 2962 case HANGCHECK_WAIT: 2963 break; 2964 case HANGCHECK_ACTIVE: 2965 ring->hangcheck.score += BUSY; 2966 break; 2967 case HANGCHECK_KICK: 2968 ring->hangcheck.score += KICK; 2969 break; 2970 case HANGCHECK_HUNG: 2971 ring->hangcheck.score += HUNG; 2972 stuck[i] = true; 2973 break; 2974 } 2975 } 2976 } else { 2977 ring->hangcheck.action = HANGCHECK_ACTIVE; 2978 2979 /* Gradually reduce the count so that we catch DoS 2980 * attempts across multiple batches. 2981 */ 2982 if (ring->hangcheck.score > 0) 2983 ring->hangcheck.score--; 2984 } 2985 2986 ring->hangcheck.seqno = seqno; 2987 ring->hangcheck.acthd = acthd; 2988 busy_count += busy; 2989 } 2990 2991 for_each_ring(ring, dev_priv, i) { 2992 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 2993 DRM_INFO("%s on %s\n", 2994 stuck[i] ? "stuck" : "no progress", 2995 ring->name); 2996 rings_hung++; 2997 } 2998 } 2999 3000 if (rings_hung) 3001 return i915_handle_error(dev, true, "Ring hung"); 3002 3003 if (busy_count) 3004 /* Reset timer case chip hangs without another request 3005 * being added */ 3006 i915_queue_hangcheck(dev); 3007 } 3008 3009 void i915_queue_hangcheck(struct drm_device *dev) 3010 { 3011 struct drm_i915_private *dev_priv = dev->dev_private; 3012 if (!i915.enable_hangcheck) 3013 return; 3014 3015 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3016 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3017 } 3018 3019 static void ibx_irq_reset(struct drm_device *dev) 3020 { 3021 struct drm_i915_private *dev_priv = dev->dev_private; 3022 3023 if (HAS_PCH_NOP(dev)) 3024 return; 3025 3026 GEN5_IRQ_RESET(SDE); 3027 3028 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) 3029 I915_WRITE(SERR_INT, 0xffffffff); 3030 } 3031 3032 /* 3033 * SDEIER is also touched by the interrupt handler to work around missed PCH 3034 * interrupts. Hence we can't update it after the interrupt handler is enabled - 3035 * instead we unconditionally enable all PCH interrupt sources here, but then 3036 * only unmask them as needed with SDEIMR. 3037 * 3038 * This function needs to be called before interrupts are enabled. 3039 */ 3040 static void ibx_irq_pre_postinstall(struct drm_device *dev) 3041 { 3042 struct drm_i915_private *dev_priv = dev->dev_private; 3043 3044 if (HAS_PCH_NOP(dev)) 3045 return; 3046 3047 WARN_ON(I915_READ(SDEIER) != 0); 3048 I915_WRITE(SDEIER, 0xffffffff); 3049 POSTING_READ(SDEIER); 3050 } 3051 3052 static void gen5_gt_irq_reset(struct drm_device *dev) 3053 { 3054 struct drm_i915_private *dev_priv = dev->dev_private; 3055 3056 GEN5_IRQ_RESET(GT); 3057 if (INTEL_INFO(dev)->gen >= 6) 3058 GEN5_IRQ_RESET(GEN6_PM); 3059 } 3060 3061 /* drm_dma.h hooks 3062 */ 3063 static void ironlake_irq_reset(struct drm_device *dev) 3064 { 3065 struct drm_i915_private *dev_priv = dev->dev_private; 3066 3067 I915_WRITE(HWSTAM, 0xffffffff); 3068 3069 GEN5_IRQ_RESET(DE); 3070 if (IS_GEN7(dev)) 3071 I915_WRITE(GEN7_ERR_INT, 0xffffffff); 3072 3073 gen5_gt_irq_reset(dev); 3074 3075 ibx_irq_reset(dev); 3076 } 3077 3078 static void valleyview_irq_preinstall(struct drm_device *dev) 3079 { 3080 struct drm_i915_private *dev_priv = dev->dev_private; 3081 int pipe; 3082 3083 /* VLV magic */ 3084 I915_WRITE(VLV_IMR, 0); 3085 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); 3086 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3087 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3088 3089 /* and GT */ 3090 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3091 I915_WRITE(GTIIR, I915_READ(GTIIR)); 3092 3093 gen5_gt_irq_reset(dev); 3094 3095 I915_WRITE(DPINVGTT, 0xff); 3096 3097 I915_WRITE(PORT_HOTPLUG_EN, 0); 3098 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3099 for_each_pipe(pipe) 3100 I915_WRITE(PIPESTAT(pipe), 0xffff); 3101 I915_WRITE(VLV_IIR, 0xffffffff); 3102 I915_WRITE(VLV_IMR, 0xffffffff); 3103 I915_WRITE(VLV_IER, 0x0); 3104 POSTING_READ(VLV_IER); 3105 } 3106 3107 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3108 { 3109 GEN8_IRQ_RESET_NDX(GT, 0); 3110 GEN8_IRQ_RESET_NDX(GT, 1); 3111 GEN8_IRQ_RESET_NDX(GT, 2); 3112 GEN8_IRQ_RESET_NDX(GT, 3); 3113 } 3114 3115 static void gen8_irq_reset(struct drm_device *dev) 3116 { 3117 struct drm_i915_private *dev_priv = dev->dev_private; 3118 int pipe; 3119 3120 I915_WRITE(GEN8_MASTER_IRQ, 0); 3121 POSTING_READ(GEN8_MASTER_IRQ); 3122 3123 gen8_gt_irq_reset(dev_priv); 3124 3125 for_each_pipe(pipe) 3126 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3127 3128 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3129 GEN5_IRQ_RESET(GEN8_DE_MISC_); 3130 GEN5_IRQ_RESET(GEN8_PCU_); 3131 3132 ibx_irq_reset(dev); 3133 } 3134 3135 static void cherryview_irq_preinstall(struct drm_device *dev) 3136 { 3137 struct drm_i915_private *dev_priv = dev->dev_private; 3138 int pipe; 3139 3140 I915_WRITE(GEN8_MASTER_IRQ, 0); 3141 POSTING_READ(GEN8_MASTER_IRQ); 3142 3143 gen8_gt_irq_reset(dev_priv); 3144 3145 GEN5_IRQ_RESET(GEN8_PCU_); 3146 3147 POSTING_READ(GEN8_PCU_IIR); 3148 3149 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3150 3151 I915_WRITE(PORT_HOTPLUG_EN, 0); 3152 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3153 3154 for_each_pipe(pipe) 3155 I915_WRITE(PIPESTAT(pipe), 0xffff); 3156 3157 I915_WRITE(VLV_IMR, 0xffffffff); 3158 I915_WRITE(VLV_IER, 0x0); 3159 I915_WRITE(VLV_IIR, 0xffffffff); 3160 POSTING_READ(VLV_IIR); 3161 } 3162 3163 static void ibx_hpd_irq_setup(struct drm_device *dev) 3164 { 3165 struct drm_i915_private *dev_priv = dev->dev_private; 3166 struct drm_mode_config *mode_config = &dev->mode_config; 3167 struct intel_encoder *intel_encoder; 3168 u32 hotplug_irqs, hotplug, enabled_irqs = 0; 3169 3170 if (HAS_PCH_IBX(dev)) { 3171 hotplug_irqs = SDE_HOTPLUG_MASK; 3172 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3173 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3174 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; 3175 } else { 3176 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3177 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 3178 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 3179 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; 3180 } 3181 3182 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3183 3184 /* 3185 * Enable digital hotplug on the PCH, and configure the DP short pulse 3186 * duration to 2ms (which is the minimum in the Display Port spec) 3187 * 3188 * This register is the same on all known PCH chips. 3189 */ 3190 hotplug = I915_READ(PCH_PORT_HOTPLUG); 3191 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 3192 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 3193 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 3194 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 3195 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3196 } 3197 3198 static void ibx_irq_postinstall(struct drm_device *dev) 3199 { 3200 struct drm_i915_private *dev_priv = dev->dev_private; 3201 u32 mask; 3202 3203 if (HAS_PCH_NOP(dev)) 3204 return; 3205 3206 if (HAS_PCH_IBX(dev)) 3207 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; 3208 else 3209 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; 3210 3211 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); 3212 I915_WRITE(SDEIMR, ~mask); 3213 } 3214 3215 static void gen5_gt_irq_postinstall(struct drm_device *dev) 3216 { 3217 struct drm_i915_private *dev_priv = dev->dev_private; 3218 u32 pm_irqs, gt_irqs; 3219 3220 pm_irqs = gt_irqs = 0; 3221 3222 dev_priv->gt_irq_mask = ~0; 3223 if (HAS_L3_DPF(dev)) { 3224 /* L3 parity interrupt is always unmasked. */ 3225 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); 3226 gt_irqs |= GT_PARITY_ERROR(dev); 3227 } 3228 3229 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3230 if (IS_GEN5(dev)) { 3231 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3232 ILK_BSD_USER_INTERRUPT; 3233 } else { 3234 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3235 } 3236 3237 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3238 3239 if (INTEL_INFO(dev)->gen >= 6) { 3240 pm_irqs |= dev_priv->pm_rps_events; 3241 3242 if (HAS_VEBOX(dev)) 3243 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3244 3245 dev_priv->pm_irq_mask = 0xffffffff; 3246 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); 3247 } 3248 } 3249 3250 static int ironlake_irq_postinstall(struct drm_device *dev) 3251 { 3252 struct drm_i915_private *dev_priv = dev->dev_private; 3253 u32 display_mask, extra_mask; 3254 3255 if (INTEL_INFO(dev)->gen >= 7) { 3256 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 3257 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | 3258 DE_PLANEB_FLIP_DONE_IVB | 3259 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); 3260 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | 3261 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); 3262 } else { 3263 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 3264 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | 3265 DE_AUX_CHANNEL_A | 3266 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | 3267 DE_POISON); 3268 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | 3269 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; 3270 } 3271 3272 dev_priv->irq_mask = ~display_mask; 3273 3274 I915_WRITE(HWSTAM, 0xeffe); 3275 3276 ibx_irq_pre_postinstall(dev); 3277 3278 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); 3279 3280 gen5_gt_irq_postinstall(dev); 3281 3282 ibx_irq_postinstall(dev); 3283 3284 if (IS_IRONLAKE_M(dev)) { 3285 /* Enable PCU event interrupts 3286 * 3287 * spinlocking not required here for correctness since interrupt 3288 * setup is guaranteed to run in single-threaded context. But we 3289 * need it to make the assert_spin_locked happy. */ 3290 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3291 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3292 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3293 } 3294 3295 return 0; 3296 } 3297 3298 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) 3299 { 3300 u32 pipestat_mask; 3301 u32 iir_mask; 3302 3303 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3304 PIPE_FIFO_UNDERRUN_STATUS; 3305 3306 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3307 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3308 POSTING_READ(PIPESTAT(PIPE_A)); 3309 3310 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3311 PIPE_CRC_DONE_INTERRUPT_STATUS; 3312 3313 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3314 PIPE_GMBUS_INTERRUPT_STATUS); 3315 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3316 3317 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3318 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3319 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3320 dev_priv->irq_mask &= ~iir_mask; 3321 3322 I915_WRITE(VLV_IIR, iir_mask); 3323 I915_WRITE(VLV_IIR, iir_mask); 3324 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3325 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3326 POSTING_READ(VLV_IER); 3327 } 3328 3329 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3330 { 3331 u32 pipestat_mask; 3332 u32 iir_mask; 3333 3334 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3335 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3336 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3337 3338 dev_priv->irq_mask |= iir_mask; 3339 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3340 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3341 I915_WRITE(VLV_IIR, iir_mask); 3342 I915_WRITE(VLV_IIR, iir_mask); 3343 POSTING_READ(VLV_IIR); 3344 3345 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3346 PIPE_CRC_DONE_INTERRUPT_STATUS; 3347 3348 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3349 PIPE_GMBUS_INTERRUPT_STATUS); 3350 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3351 3352 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3353 PIPE_FIFO_UNDERRUN_STATUS; 3354 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3355 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3356 POSTING_READ(PIPESTAT(PIPE_A)); 3357 } 3358 3359 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) 3360 { 3361 assert_spin_locked(&dev_priv->irq_lock); 3362 3363 if (dev_priv->display_irqs_enabled) 3364 return; 3365 3366 dev_priv->display_irqs_enabled = true; 3367 3368 if (dev_priv->dev->irq_enabled) 3369 valleyview_display_irqs_install(dev_priv); 3370 } 3371 3372 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) 3373 { 3374 assert_spin_locked(&dev_priv->irq_lock); 3375 3376 if (!dev_priv->display_irqs_enabled) 3377 return; 3378 3379 dev_priv->display_irqs_enabled = false; 3380 3381 if (dev_priv->dev->irq_enabled) 3382 valleyview_display_irqs_uninstall(dev_priv); 3383 } 3384 3385 static int valleyview_irq_postinstall(struct drm_device *dev) 3386 { 3387 struct drm_i915_private *dev_priv = dev->dev_private; 3388 3389 dev_priv->irq_mask = ~0; 3390 3391 I915_WRITE(PORT_HOTPLUG_EN, 0); 3392 POSTING_READ(PORT_HOTPLUG_EN); 3393 3394 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3395 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3396 I915_WRITE(VLV_IIR, 0xffffffff); 3397 POSTING_READ(VLV_IER); 3398 3399 /* Interrupt setup is already guaranteed to be single-threaded, this is 3400 * just to make the assert_spin_locked check happy. */ 3401 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3402 if (dev_priv->display_irqs_enabled) 3403 valleyview_display_irqs_install(dev_priv); 3404 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3405 3406 I915_WRITE(VLV_IIR, 0xffffffff); 3407 I915_WRITE(VLV_IIR, 0xffffffff); 3408 3409 gen5_gt_irq_postinstall(dev); 3410 3411 /* ack & enable invalid PTE error interrupts */ 3412 #if 0 /* FIXME: add support to irq handler for checking these bits */ 3413 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); 3414 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); 3415 #endif 3416 3417 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); 3418 3419 return 0; 3420 } 3421 3422 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) 3423 { 3424 int i; 3425 3426 /* These are interrupts we'll toggle with the ring mask register */ 3427 uint32_t gt_interrupts[] = { 3428 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | 3429 GT_RENDER_L3_PARITY_ERROR_INTERRUPT | 3430 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT, 3431 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | 3432 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, 3433 0, 3434 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT 3435 }; 3436 3437 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++) 3438 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]); 3439 3440 dev_priv->pm_irq_mask = 0xffffffff; 3441 } 3442 3443 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3444 { 3445 struct drm_device *dev = dev_priv->dev; 3446 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3447 GEN8_PIPE_CDCLK_CRC_DONE | 3448 GEN8_DE_PIPE_IRQ_FAULT_ERRORS; 3449 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | 3450 GEN8_PIPE_FIFO_UNDERRUN; 3451 int pipe; 3452 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3453 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3454 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3455 3456 for_each_pipe(pipe) 3457 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, dev_priv->de_irq_mask[pipe], 3458 de_pipe_enables); 3459 3460 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3461 } 3462 3463 static int gen8_irq_postinstall(struct drm_device *dev) 3464 { 3465 struct drm_i915_private *dev_priv = dev->dev_private; 3466 3467 ibx_irq_pre_postinstall(dev); 3468 3469 gen8_gt_irq_postinstall(dev_priv); 3470 gen8_de_irq_postinstall(dev_priv); 3471 3472 ibx_irq_postinstall(dev); 3473 3474 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); 3475 POSTING_READ(GEN8_MASTER_IRQ); 3476 3477 return 0; 3478 } 3479 3480 static int cherryview_irq_postinstall(struct drm_device *dev) 3481 { 3482 struct drm_i915_private *dev_priv = dev->dev_private; 3483 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT | 3484 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3485 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3486 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; 3487 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV | 3488 PIPE_CRC_DONE_INTERRUPT_STATUS; 3489 int pipe; 3490 3491 /* 3492 * Leave vblank interrupts masked initially. enable/disable will 3493 * toggle them based on usage. 3494 */ 3495 dev_priv->irq_mask = ~enable_mask; 3496 3497 for_each_pipe(pipe) 3498 I915_WRITE(PIPESTAT(pipe), 0xffff); 3499 3500 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3501 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 3502 for_each_pipe(pipe) 3503 i915_enable_pipestat(dev_priv, pipe, pipestat_enable); 3504 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3505 3506 I915_WRITE(VLV_IIR, 0xffffffff); 3507 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3508 I915_WRITE(VLV_IER, enable_mask); 3509 3510 gen8_gt_irq_postinstall(dev_priv); 3511 3512 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); 3513 POSTING_READ(GEN8_MASTER_IRQ); 3514 3515 return 0; 3516 } 3517 3518 static void gen8_irq_uninstall(struct drm_device *dev) 3519 { 3520 struct drm_i915_private *dev_priv = dev->dev_private; 3521 3522 if (!dev_priv) 3523 return; 3524 3525 intel_hpd_irq_uninstall(dev_priv); 3526 3527 gen8_irq_reset(dev); 3528 } 3529 3530 static void valleyview_irq_uninstall(struct drm_device *dev) 3531 { 3532 struct drm_i915_private *dev_priv = dev->dev_private; 3533 int pipe; 3534 3535 if (!dev_priv) 3536 return; 3537 3538 I915_WRITE(VLV_MASTER_IER, 0); 3539 3540 intel_hpd_irq_uninstall(dev_priv); 3541 3542 for_each_pipe(pipe) 3543 I915_WRITE(PIPESTAT(pipe), 0xffff); 3544 3545 I915_WRITE(HWSTAM, 0xffffffff); 3546 I915_WRITE(PORT_HOTPLUG_EN, 0); 3547 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3548 3549 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3550 if (dev_priv->display_irqs_enabled) 3551 valleyview_display_irqs_uninstall(dev_priv); 3552 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3553 3554 dev_priv->irq_mask = 0; 3555 3556 I915_WRITE(VLV_IIR, 0xffffffff); 3557 I915_WRITE(VLV_IMR, 0xffffffff); 3558 I915_WRITE(VLV_IER, 0x0); 3559 POSTING_READ(VLV_IER); 3560 } 3561 3562 static void cherryview_irq_uninstall(struct drm_device *dev) 3563 { 3564 struct drm_i915_private *dev_priv = dev->dev_private; 3565 int pipe; 3566 3567 if (!dev_priv) 3568 return; 3569 3570 I915_WRITE(GEN8_MASTER_IRQ, 0); 3571 POSTING_READ(GEN8_MASTER_IRQ); 3572 3573 #define GEN8_IRQ_FINI_NDX(type, which) \ 3574 do { \ 3575 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ 3576 I915_WRITE(GEN8_##type##_IER(which), 0); \ 3577 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3578 POSTING_READ(GEN8_##type##_IIR(which)); \ 3579 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ 3580 } while (0) 3581 3582 #define GEN8_IRQ_FINI(type) \ 3583 do { \ 3584 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \ 3585 I915_WRITE(GEN8_##type##_IER, 0); \ 3586 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3587 POSTING_READ(GEN8_##type##_IIR); \ 3588 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \ 3589 } while (0) 3590 3591 GEN8_IRQ_FINI_NDX(GT, 0); 3592 GEN8_IRQ_FINI_NDX(GT, 1); 3593 GEN8_IRQ_FINI_NDX(GT, 2); 3594 GEN8_IRQ_FINI_NDX(GT, 3); 3595 3596 GEN8_IRQ_FINI(PCU); 3597 3598 #undef GEN8_IRQ_FINI 3599 #undef GEN8_IRQ_FINI_NDX 3600 3601 I915_WRITE(PORT_HOTPLUG_EN, 0); 3602 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3603 3604 for_each_pipe(pipe) 3605 I915_WRITE(PIPESTAT(pipe), 0xffff); 3606 3607 I915_WRITE(VLV_IMR, 0xffffffff); 3608 I915_WRITE(VLV_IER, 0x0); 3609 I915_WRITE(VLV_IIR, 0xffffffff); 3610 POSTING_READ(VLV_IIR); 3611 } 3612 3613 static void ironlake_irq_uninstall(struct drm_device *dev) 3614 { 3615 struct drm_i915_private *dev_priv = dev->dev_private; 3616 3617 if (!dev_priv) 3618 return; 3619 3620 intel_hpd_irq_uninstall(dev_priv); 3621 3622 ironlake_irq_reset(dev); 3623 } 3624 3625 static void i8xx_irq_preinstall(struct drm_device * dev) 3626 { 3627 struct drm_i915_private *dev_priv = dev->dev_private; 3628 int pipe; 3629 3630 for_each_pipe(pipe) 3631 I915_WRITE(PIPESTAT(pipe), 0); 3632 I915_WRITE16(IMR, 0xffff); 3633 I915_WRITE16(IER, 0x0); 3634 POSTING_READ16(IER); 3635 } 3636 3637 static int i8xx_irq_postinstall(struct drm_device *dev) 3638 { 3639 struct drm_i915_private *dev_priv = dev->dev_private; 3640 3641 I915_WRITE16(EMR, 3642 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3643 3644 /* Unmask the interrupts that we always want on. */ 3645 dev_priv->irq_mask = 3646 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3647 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3648 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3649 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3650 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3651 I915_WRITE16(IMR, dev_priv->irq_mask); 3652 3653 I915_WRITE16(IER, 3654 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3655 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3656 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3657 I915_USER_INTERRUPT); 3658 POSTING_READ16(IER); 3659 3660 /* Interrupt setup is already guaranteed to be single-threaded, this is 3661 * just to make the assert_spin_locked check happy. */ 3662 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3663 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3664 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3665 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3666 3667 return 0; 3668 } 3669 3670 /* 3671 * Returns true when a page flip has completed. 3672 */ 3673 static bool i8xx_handle_vblank(struct drm_device *dev, 3674 int plane, int pipe, u32 iir) 3675 { 3676 struct drm_i915_private *dev_priv = dev->dev_private; 3677 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3678 3679 if (!intel_pipe_handle_vblank(dev, pipe)) 3680 return false; 3681 3682 if ((iir & flip_pending) == 0) 3683 return false; 3684 3685 intel_prepare_page_flip(dev, plane); 3686 3687 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3688 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3689 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3690 * the flip is completed (no longer pending). Since this doesn't raise 3691 * an interrupt per se, we watch for the change at vblank. 3692 */ 3693 if (I915_READ16(ISR) & flip_pending) 3694 return false; 3695 3696 intel_finish_page_flip(dev, pipe); 3697 3698 return true; 3699 } 3700 3701 static irqreturn_t i8xx_irq_handler(void *arg) 3702 { 3703 struct drm_device *dev = arg; 3704 struct drm_i915_private *dev_priv = dev->dev_private; 3705 u16 iir, new_iir; 3706 u32 pipe_stats[2]; 3707 int pipe; 3708 u16 flip_mask = 3709 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3710 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3711 3712 iir = I915_READ16(IIR); 3713 if (iir == 0) 3714 return; 3715 3716 while (iir & ~flip_mask) { 3717 /* Can't rely on pipestat interrupt bit in iir as it might 3718 * have been cleared after the pipestat interrupt was received. 3719 * It doesn't set the bit in iir again, but it still produces 3720 * interrupts (for non-MSI). 3721 */ 3722 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3723 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3724 i915_handle_error(dev, false, 3725 "Command parser error, iir 0x%08x", 3726 iir); 3727 3728 for_each_pipe(pipe) { 3729 int reg = PIPESTAT(pipe); 3730 pipe_stats[pipe] = I915_READ(reg); 3731 3732 /* 3733 * Clear the PIPE*STAT regs before the IIR 3734 */ 3735 if (pipe_stats[pipe] & 0x8000ffff) 3736 I915_WRITE(reg, pipe_stats[pipe]); 3737 } 3738 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3739 3740 I915_WRITE16(IIR, iir & ~flip_mask); 3741 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3742 3743 i915_update_dri1_breadcrumb(dev); 3744 3745 if (iir & I915_USER_INTERRUPT) 3746 notify_ring(dev, &dev_priv->ring[RCS]); 3747 3748 for_each_pipe(pipe) { 3749 int plane = pipe; 3750 if (HAS_FBC(dev)) 3751 plane = !plane; 3752 3753 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3754 i8xx_handle_vblank(dev, plane, pipe, iir)) 3755 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3756 3757 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3758 i9xx_pipe_crc_irq_handler(dev, pipe); 3759 3760 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3761 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3762 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3763 } 3764 3765 iir = new_iir; 3766 } 3767 3768 } 3769 3770 static void i8xx_irq_uninstall(struct drm_device * dev) 3771 { 3772 struct drm_i915_private *dev_priv = dev->dev_private; 3773 int pipe; 3774 3775 for_each_pipe(pipe) { 3776 /* Clear enable bits; then clear status bits */ 3777 I915_WRITE(PIPESTAT(pipe), 0); 3778 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3779 } 3780 I915_WRITE16(IMR, 0xffff); 3781 I915_WRITE16(IER, 0x0); 3782 I915_WRITE16(IIR, I915_READ16(IIR)); 3783 } 3784 3785 static void i915_irq_preinstall(struct drm_device * dev) 3786 { 3787 struct drm_i915_private *dev_priv = dev->dev_private; 3788 int pipe; 3789 3790 if (I915_HAS_HOTPLUG(dev)) { 3791 I915_WRITE(PORT_HOTPLUG_EN, 0); 3792 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3793 } 3794 3795 I915_WRITE16(HWSTAM, 0xeffe); 3796 for_each_pipe(pipe) 3797 I915_WRITE(PIPESTAT(pipe), 0); 3798 I915_WRITE(IMR, 0xffffffff); 3799 I915_WRITE(IER, 0x0); 3800 POSTING_READ(IER); 3801 } 3802 3803 static int i915_irq_postinstall(struct drm_device *dev) 3804 { 3805 struct drm_i915_private *dev_priv = dev->dev_private; 3806 u32 enable_mask; 3807 3808 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3809 3810 /* Unmask the interrupts that we always want on. */ 3811 dev_priv->irq_mask = 3812 ~(I915_ASLE_INTERRUPT | 3813 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3814 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3815 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3816 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 3817 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 3818 3819 enable_mask = 3820 I915_ASLE_INTERRUPT | 3821 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3822 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 3823 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | 3824 I915_USER_INTERRUPT; 3825 3826 if (I915_HAS_HOTPLUG(dev)) { 3827 I915_WRITE(PORT_HOTPLUG_EN, 0); 3828 POSTING_READ(PORT_HOTPLUG_EN); 3829 3830 /* Enable in IER... */ 3831 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 3832 /* and unmask in IMR */ 3833 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 3834 } 3835 3836 I915_WRITE(IMR, dev_priv->irq_mask); 3837 I915_WRITE(IER, enable_mask); 3838 POSTING_READ(IER); 3839 3840 i915_enable_asle_pipestat(dev); 3841 3842 /* Interrupt setup is already guaranteed to be single-threaded, this is 3843 * just to make the assert_spin_locked check happy. */ 3844 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3845 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3846 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3847 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3848 3849 return 0; 3850 } 3851 3852 /* 3853 * Returns true when a page flip has completed. 3854 */ 3855 static bool i915_handle_vblank(struct drm_device *dev, 3856 int plane, int pipe, u32 iir) 3857 { 3858 struct drm_i915_private *dev_priv = dev->dev_private; 3859 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3860 3861 if (!intel_pipe_handle_vblank(dev, pipe)) 3862 return false; 3863 3864 if ((iir & flip_pending) == 0) 3865 return false; 3866 3867 intel_prepare_page_flip(dev, plane); 3868 3869 /* We detect FlipDone by looking for the change in PendingFlip from '1' 3870 * to '0' on the following vblank, i.e. IIR has the Pendingflip 3871 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence 3872 * the flip is completed (no longer pending). Since this doesn't raise 3873 * an interrupt per se, we watch for the change at vblank. 3874 */ 3875 if (I915_READ(ISR) & flip_pending) 3876 return false; 3877 3878 intel_finish_page_flip(dev, pipe); 3879 3880 return true; 3881 } 3882 3883 static irqreturn_t i915_irq_handler(void *arg) 3884 { 3885 struct drm_device *dev = arg; 3886 struct drm_i915_private *dev_priv = dev->dev_private; 3887 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3888 u32 flip_mask = 3889 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3890 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3891 int pipe; 3892 3893 iir = I915_READ(IIR); 3894 do { 3895 bool irq_received = (iir & ~flip_mask) != 0; 3896 bool blc_event = false; 3897 3898 /* Can't rely on pipestat interrupt bit in iir as it might 3899 * have been cleared after the pipestat interrupt was received. 3900 * It doesn't set the bit in iir again, but it still produces 3901 * interrupts (for non-MSI). 3902 */ 3903 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 3904 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3905 i915_handle_error(dev, false, 3906 "Command parser error, iir 0x%08x", 3907 iir); 3908 3909 for_each_pipe(pipe) { 3910 int reg = PIPESTAT(pipe); 3911 pipe_stats[pipe] = I915_READ(reg); 3912 3913 /* Clear the PIPE*STAT regs before the IIR */ 3914 if (pipe_stats[pipe] & 0x8000ffff) { 3915 I915_WRITE(reg, pipe_stats[pipe]); 3916 irq_received = true; 3917 } 3918 } 3919 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 3920 3921 if (!irq_received) 3922 break; 3923 3924 /* Consume port. Then clear IIR or we'll miss events */ 3925 if (I915_HAS_HOTPLUG(dev) && 3926 iir & I915_DISPLAY_PORT_INTERRUPT) 3927 i9xx_hpd_irq_handler(dev); 3928 3929 I915_WRITE(IIR, iir & ~flip_mask); 3930 new_iir = I915_READ(IIR); /* Flush posted writes */ 3931 3932 if (iir & I915_USER_INTERRUPT) 3933 notify_ring(dev, &dev_priv->ring[RCS]); 3934 3935 for_each_pipe(pipe) { 3936 int plane = pipe; 3937 if (HAS_FBC(dev)) 3938 plane = !plane; 3939 3940 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 3941 i915_handle_vblank(dev, plane, pipe, iir)) 3942 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 3943 3944 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 3945 blc_event = true; 3946 3947 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3948 i9xx_pipe_crc_irq_handler(dev, pipe); 3949 3950 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3951 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3952 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3953 } 3954 3955 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3956 intel_opregion_asle_intr(dev); 3957 3958 /* With MSI, interrupts are only generated when iir 3959 * transitions from zero to nonzero. If another bit got 3960 * set while we were handling the existing iir bits, then 3961 * we would never get another interrupt. 3962 * 3963 * This is fine on non-MSI as well, as if we hit this path 3964 * we avoid exiting the interrupt handler only to generate 3965 * another one. 3966 * 3967 * Note that for MSI this could cause a stray interrupt report 3968 * if an interrupt landed in the time between writing IIR and 3969 * the posting read. This should be rare enough to never 3970 * trigger the 99% of 100,000 interrupts test for disabling 3971 * stray interrupts. 3972 */ 3973 iir = new_iir; 3974 } while (iir & ~flip_mask); 3975 3976 i915_update_dri1_breadcrumb(dev); 3977 3978 } 3979 3980 static void i915_irq_uninstall(struct drm_device * dev) 3981 { 3982 struct drm_i915_private *dev_priv = dev->dev_private; 3983 int pipe; 3984 3985 intel_hpd_irq_uninstall(dev_priv); 3986 3987 if (I915_HAS_HOTPLUG(dev)) { 3988 I915_WRITE(PORT_HOTPLUG_EN, 0); 3989 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 3990 } 3991 3992 I915_WRITE16(HWSTAM, 0xffff); 3993 for_each_pipe(pipe) { 3994 /* Clear enable bits; then clear status bits */ 3995 I915_WRITE(PIPESTAT(pipe), 0); 3996 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); 3997 } 3998 I915_WRITE(IMR, 0xffffffff); 3999 I915_WRITE(IER, 0x0); 4000 4001 I915_WRITE(IIR, I915_READ(IIR)); 4002 } 4003 4004 static void i965_irq_preinstall(struct drm_device * dev) 4005 { 4006 struct drm_i915_private *dev_priv = dev->dev_private; 4007 int pipe; 4008 4009 I915_WRITE(PORT_HOTPLUG_EN, 0); 4010 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4011 4012 I915_WRITE(HWSTAM, 0xeffe); 4013 for_each_pipe(pipe) 4014 I915_WRITE(PIPESTAT(pipe), 0); 4015 I915_WRITE(IMR, 0xffffffff); 4016 I915_WRITE(IER, 0x0); 4017 POSTING_READ(IER); 4018 } 4019 4020 static int i965_irq_postinstall(struct drm_device *dev) 4021 { 4022 struct drm_i915_private *dev_priv = dev->dev_private; 4023 u32 enable_mask; 4024 u32 error_mask; 4025 4026 /* Unmask the interrupts that we always want on. */ 4027 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4028 I915_DISPLAY_PORT_INTERRUPT | 4029 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 4030 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | 4031 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4032 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | 4033 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 4034 4035 enable_mask = ~dev_priv->irq_mask; 4036 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4037 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4038 enable_mask |= I915_USER_INTERRUPT; 4039 4040 if (IS_G4X(dev)) 4041 enable_mask |= I915_BSD_USER_INTERRUPT; 4042 4043 /* Interrupt setup is already guaranteed to be single-threaded, this is 4044 * just to make the assert_spin_locked check happy. */ 4045 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4046 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4047 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4048 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4049 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4050 4051 /* 4052 * Enable some error detection, note the instruction error mask 4053 * bit is reserved, so we leave it masked. 4054 */ 4055 if (IS_G4X(dev)) { 4056 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4057 GM45_ERROR_MEM_PRIV | 4058 GM45_ERROR_CP_PRIV | 4059 I915_ERROR_MEMORY_REFRESH); 4060 } else { 4061 error_mask = ~(I915_ERROR_PAGE_TABLE | 4062 I915_ERROR_MEMORY_REFRESH); 4063 } 4064 I915_WRITE(EMR, error_mask); 4065 4066 I915_WRITE(IMR, dev_priv->irq_mask); 4067 I915_WRITE(IER, enable_mask); 4068 POSTING_READ(IER); 4069 4070 I915_WRITE(PORT_HOTPLUG_EN, 0); 4071 POSTING_READ(PORT_HOTPLUG_EN); 4072 4073 i915_enable_asle_pipestat(dev); 4074 4075 return 0; 4076 } 4077 4078 static void i915_hpd_irq_setup(struct drm_device *dev) 4079 { 4080 struct drm_i915_private *dev_priv = dev->dev_private; 4081 struct drm_mode_config *mode_config = &dev->mode_config; 4082 struct intel_encoder *intel_encoder; 4083 u32 hotplug_en; 4084 4085 assert_spin_locked(&dev_priv->irq_lock); 4086 4087 if (I915_HAS_HOTPLUG(dev)) { 4088 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 4089 hotplug_en &= ~HOTPLUG_INT_EN_MASK; 4090 /* Note HDMI and DP share hotplug bits */ 4091 /* enable bits are the same for all generations */ 4092 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) 4093 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) 4094 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; 4095 /* Programming the CRT detection parameters tends 4096 to generate a spurious hotplug event about three 4097 seconds later. So just do it once. 4098 */ 4099 if (IS_G4X(dev)) 4100 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4101 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; 4102 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4103 4104 /* Ignore TV since it's buggy */ 4105 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 4106 } 4107 } 4108 4109 static irqreturn_t i965_irq_handler(void *arg) 4110 { 4111 struct drm_device *dev = arg; 4112 struct drm_i915_private *dev_priv = dev->dev_private; 4113 u32 iir, new_iir; 4114 u32 pipe_stats[I915_MAX_PIPES]; 4115 int pipe; 4116 u32 flip_mask = 4117 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4118 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 4119 4120 iir = I915_READ(IIR); 4121 4122 for (;;) { 4123 bool irq_received = (iir & ~flip_mask) != 0; 4124 bool blc_event = false; 4125 4126 /* Can't rely on pipestat interrupt bit in iir as it might 4127 * have been cleared after the pipestat interrupt was received. 4128 * It doesn't set the bit in iir again, but it still produces 4129 * interrupts (for non-MSI). 4130 */ 4131 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4132 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4133 i915_handle_error(dev, false, 4134 "Command parser error, iir 0x%08x", 4135 iir); 4136 4137 for_each_pipe(pipe) { 4138 int reg = PIPESTAT(pipe); 4139 pipe_stats[pipe] = I915_READ(reg); 4140 4141 /* 4142 * Clear the PIPE*STAT regs before the IIR 4143 */ 4144 if (pipe_stats[pipe] & 0x8000ffff) { 4145 I915_WRITE(reg, pipe_stats[pipe]); 4146 irq_received = true; 4147 } 4148 } 4149 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4150 4151 if (!irq_received) 4152 break; 4153 4154 /* Consume port. Then clear IIR or we'll miss events */ 4155 if (iir & I915_DISPLAY_PORT_INTERRUPT) 4156 i9xx_hpd_irq_handler(dev); 4157 4158 I915_WRITE(IIR, iir & ~flip_mask); 4159 new_iir = I915_READ(IIR); /* Flush posted writes */ 4160 4161 if (iir & I915_USER_INTERRUPT) 4162 notify_ring(dev, &dev_priv->ring[RCS]); 4163 if (iir & I915_BSD_USER_INTERRUPT) 4164 notify_ring(dev, &dev_priv->ring[VCS]); 4165 4166 for_each_pipe(pipe) { 4167 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4168 i915_handle_vblank(dev, pipe, pipe, iir)) 4169 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4170 4171 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4172 blc_event = true; 4173 4174 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4175 i9xx_pipe_crc_irq_handler(dev, pipe); 4176 4177 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4178 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4179 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 4180 } 4181 4182 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4183 intel_opregion_asle_intr(dev); 4184 4185 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4186 gmbus_irq_handler(dev); 4187 4188 /* With MSI, interrupts are only generated when iir 4189 * transitions from zero to nonzero. If another bit got 4190 * set while we were handling the existing iir bits, then 4191 * we would never get another interrupt. 4192 * 4193 * This is fine on non-MSI as well, as if we hit this path 4194 * we avoid exiting the interrupt handler only to generate 4195 * another one. 4196 * 4197 * Note that for MSI this could cause a stray interrupt report 4198 * if an interrupt landed in the time between writing IIR and 4199 * the posting read. This should be rare enough to never 4200 * trigger the 99% of 100,000 interrupts test for disabling 4201 * stray interrupts. 4202 */ 4203 iir = new_iir; 4204 } 4205 4206 i915_update_dri1_breadcrumb(dev); 4207 4208 } 4209 4210 static void i965_irq_uninstall(struct drm_device * dev) 4211 { 4212 struct drm_i915_private *dev_priv = dev->dev_private; 4213 int pipe; 4214 4215 if (!dev_priv) 4216 return; 4217 4218 intel_hpd_irq_uninstall(dev_priv); 4219 4220 I915_WRITE(PORT_HOTPLUG_EN, 0); 4221 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 4222 4223 I915_WRITE(HWSTAM, 0xffffffff); 4224 for_each_pipe(pipe) 4225 I915_WRITE(PIPESTAT(pipe), 0); 4226 I915_WRITE(IMR, 0xffffffff); 4227 I915_WRITE(IER, 0x0); 4228 4229 for_each_pipe(pipe) 4230 I915_WRITE(PIPESTAT(pipe), 4231 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 4232 I915_WRITE(IIR, I915_READ(IIR)); 4233 } 4234 4235 static void intel_hpd_irq_reenable(unsigned long data) 4236 { 4237 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; 4238 struct drm_device *dev = dev_priv->dev; 4239 struct drm_mode_config *mode_config = &dev->mode_config; 4240 int i; 4241 4242 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4243 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4244 struct drm_connector *connector; 4245 4246 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) 4247 continue; 4248 4249 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4250 4251 list_for_each_entry(connector, &mode_config->connector_list, head) { 4252 struct intel_connector *intel_connector = to_intel_connector(connector); 4253 4254 if (intel_connector->encoder->hpd_pin == i) { 4255 if (connector->polled != intel_connector->polled) 4256 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", 4257 connector->name); 4258 connector->polled = intel_connector->polled; 4259 if (!connector->polled) 4260 connector->polled = DRM_CONNECTOR_POLL_HPD; 4261 } 4262 } 4263 } 4264 if (dev_priv->display.hpd_irq_setup) 4265 dev_priv->display.hpd_irq_setup(dev); 4266 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4267 } 4268 4269 void intel_irq_init(struct drm_device *dev) 4270 { 4271 struct drm_i915_private *dev_priv = dev->dev_private; 4272 4273 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4274 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); 4275 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 4276 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4277 4278 /* Let's track the enabled rps events */ 4279 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4280 4281 setup_timer(&dev_priv->gpu_error.hangcheck_timer, 4282 i915_hangcheck_elapsed, 4283 (unsigned long) dev); 4284 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, 4285 (unsigned long) dev_priv); 4286 4287 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4288 4289 if (IS_GEN2(dev)) { 4290 dev->max_vblank_count = 0; 4291 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4292 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4293 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4294 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4295 } else { 4296 dev->driver->get_vblank_counter = i915_get_vblank_counter; 4297 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 4298 } 4299 4300 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4301 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 4302 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4303 } 4304 4305 if (IS_CHERRYVIEW(dev)) { 4306 dev->driver->irq_handler = cherryview_irq_handler; 4307 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4308 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4309 dev->driver->irq_uninstall = cherryview_irq_uninstall; 4310 dev->driver->enable_vblank = valleyview_enable_vblank; 4311 dev->driver->disable_vblank = valleyview_disable_vblank; 4312 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4313 } else if (IS_VALLEYVIEW(dev)) { 4314 dev->driver->irq_handler = valleyview_irq_handler; 4315 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4316 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4317 dev->driver->irq_uninstall = valleyview_irq_uninstall; 4318 dev->driver->enable_vblank = valleyview_enable_vblank; 4319 dev->driver->disable_vblank = valleyview_disable_vblank; 4320 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4321 } else if (IS_GEN8(dev)) { 4322 dev->driver->irq_handler = gen8_irq_handler; 4323 dev->driver->irq_preinstall = gen8_irq_reset; 4324 dev->driver->irq_postinstall = gen8_irq_postinstall; 4325 dev->driver->irq_uninstall = gen8_irq_uninstall; 4326 dev->driver->enable_vblank = gen8_enable_vblank; 4327 dev->driver->disable_vblank = gen8_disable_vblank; 4328 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4329 } else if (HAS_PCH_SPLIT(dev)) { 4330 dev->driver->irq_handler = ironlake_irq_handler; 4331 dev->driver->irq_preinstall = ironlake_irq_reset; 4332 dev->driver->irq_postinstall = ironlake_irq_postinstall; 4333 dev->driver->irq_uninstall = ironlake_irq_uninstall; 4334 dev->driver->enable_vblank = ironlake_enable_vblank; 4335 dev->driver->disable_vblank = ironlake_disable_vblank; 4336 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4337 } else { 4338 if (INTEL_INFO(dev)->gen == 2) { 4339 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4340 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4341 dev->driver->irq_handler = i8xx_irq_handler; 4342 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4343 } else if (INTEL_INFO(dev)->gen == 3) { 4344 dev->driver->irq_preinstall = i915_irq_preinstall; 4345 dev->driver->irq_postinstall = i915_irq_postinstall; 4346 dev->driver->irq_uninstall = i915_irq_uninstall; 4347 dev->driver->irq_handler = i915_irq_handler; 4348 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4349 } else { 4350 dev->driver->irq_preinstall = i965_irq_preinstall; 4351 dev->driver->irq_postinstall = i965_irq_postinstall; 4352 dev->driver->irq_uninstall = i965_irq_uninstall; 4353 dev->driver->irq_handler = i965_irq_handler; 4354 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4355 } 4356 dev->driver->enable_vblank = i915_enable_vblank; 4357 dev->driver->disable_vblank = i915_disable_vblank; 4358 } 4359 } 4360 4361 void intel_hpd_init(struct drm_device *dev) 4362 { 4363 struct drm_i915_private *dev_priv = dev->dev_private; 4364 struct drm_mode_config *mode_config = &dev->mode_config; 4365 struct drm_connector *connector; 4366 int i; 4367 4368 for (i = 1; i < HPD_NUM_PINS; i++) { 4369 dev_priv->hpd_stats[i].hpd_cnt = 0; 4370 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; 4371 } 4372 list_for_each_entry(connector, &mode_config->connector_list, head) { 4373 struct intel_connector *intel_connector = to_intel_connector(connector); 4374 connector->polled = intel_connector->polled; 4375 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) 4376 connector->polled = DRM_CONNECTOR_POLL_HPD; 4377 } 4378 4379 /* Interrupt setup is already guaranteed to be single-threaded, this is 4380 * just to make the assert_spin_locked checks happy. */ 4381 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 4382 if (dev_priv->display.hpd_irq_setup) 4383 dev_priv->display.hpd_irq_setup(dev); 4384 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 4385 } 4386 4387 /* Disable interrupts so we can allow runtime PM. */ 4388 void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4389 { 4390 struct drm_i915_private *dev_priv = dev->dev_private; 4391 4392 dev->driver->irq_uninstall(dev); 4393 dev_priv->pm.irqs_disabled = true; 4394 } 4395 4396 /* Restore interrupts so we can recover from runtime PM. */ 4397 void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4398 { 4399 struct drm_i915_private *dev_priv = dev->dev_private; 4400 4401 dev_priv->pm.irqs_disabled = false; 4402 dev->driver->irq_preinstall(dev); 4403 dev->driver->irq_postinstall(dev); 4404 } 4405