1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- 2 */ 3 /*- 4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 5 * All Rights Reserved. 6 * 7 * Permission is hereby granted, free of charge, to any person obtaining a 8 * copy of this software and associated documentation files (the 9 * "Software"), to deal in the Software without restriction, including 10 * without limitation the rights to use, copy, modify, merge, publish, 11 * distribute, sub license, and/or sell copies of the Software, and to 12 * permit persons to whom the Software is furnished to do so, subject to 13 * the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the 16 * next paragraph) shall be included in all copies or substantial portions 17 * of the Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 26 * 27 * $FreeBSD: src/sys/dev/drm2/i915/i915_irq.c,v 1.1 2012/05/22 11:07:44 kib Exp $ 28 */ 29 30 #include <sys/sfbuf.h> 31 32 #include <drm/drmP.h> 33 #include <drm/i915_drm.h> 34 #include "i915_drv.h" 35 #include "intel_drv.h" 36 37 static void i915_capture_error_state(struct drm_device *dev); 38 static u32 ring_last_seqno(struct intel_ring_buffer *ring); 39 40 /** 41 * Interrupts that are always left unmasked. 42 * 43 * Since pipe events are edge-triggered from the PIPESTAT register to IIR, 44 * we leave them always unmasked in IMR and then control enabling them through 45 * PIPESTAT alone. 46 */ 47 #define I915_INTERRUPT_ENABLE_FIX \ 48 (I915_ASLE_INTERRUPT | \ 49 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 50 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ 51 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ 52 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ 53 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 54 55 /** Interrupts that we mask and unmask at runtime. */ 56 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) 57 58 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ 59 PIPE_VBLANK_INTERRUPT_STATUS) 60 61 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ 62 PIPE_VBLANK_INTERRUPT_ENABLE) 63 64 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ 65 DRM_I915_VBLANK_PIPE_B) 66 67 /* For display hotplug interrupt */ 68 static void 69 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 70 { 71 if ((dev_priv->irq_mask & mask) != 0) { 72 dev_priv->irq_mask &= ~mask; 73 I915_WRITE(DEIMR, dev_priv->irq_mask); 74 POSTING_READ(DEIMR); 75 } 76 } 77 78 static inline void 79 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 80 { 81 if ((dev_priv->irq_mask & mask) != mask) { 82 dev_priv->irq_mask |= mask; 83 I915_WRITE(DEIMR, dev_priv->irq_mask); 84 POSTING_READ(DEIMR); 85 } 86 } 87 88 void 89 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 90 { 91 if ((dev_priv->pipestat[pipe] & mask) != mask) { 92 u32 reg = PIPESTAT(pipe); 93 94 dev_priv->pipestat[pipe] |= mask; 95 /* Enable the interrupt, clear any pending status */ 96 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 97 POSTING_READ(reg); 98 } 99 } 100 101 void 102 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) 103 { 104 if ((dev_priv->pipestat[pipe] & mask) != 0) { 105 u32 reg = PIPESTAT(pipe); 106 107 dev_priv->pipestat[pipe] &= ~mask; 108 I915_WRITE(reg, dev_priv->pipestat[pipe]); 109 POSTING_READ(reg); 110 } 111 } 112 113 /** 114 * intel_enable_asle - enable ASLE interrupt for OpRegion 115 */ 116 void intel_enable_asle(struct drm_device *dev) 117 { 118 drm_i915_private_t *dev_priv = dev->dev_private; 119 120 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 121 122 if (HAS_PCH_SPLIT(dev)) 123 ironlake_enable_display_irq(dev_priv, DE_GSE); 124 else { 125 i915_enable_pipestat(dev_priv, 1, 126 PIPE_LEGACY_BLC_EVENT_ENABLE); 127 if (INTEL_INFO(dev)->gen >= 4) 128 i915_enable_pipestat(dev_priv, 0, 129 PIPE_LEGACY_BLC_EVENT_ENABLE); 130 } 131 132 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 133 } 134 135 /** 136 * i915_pipe_enabled - check if a pipe is enabled 137 * @dev: DRM device 138 * @pipe: pipe to check 139 * 140 * Reading certain registers when the pipe is disabled can hang the chip. 141 * Use this routine to make sure the PLL is running and the pipe is active 142 * before reading such registers if unsure. 143 */ 144 static int 145 i915_pipe_enabled(struct drm_device *dev, int pipe) 146 { 147 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 148 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; 149 } 150 151 /* Called from drm generic code, passed a 'crtc', which 152 * we use as a pipe index 153 */ 154 static u32 155 i915_get_vblank_counter(struct drm_device *dev, int pipe) 156 { 157 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 158 unsigned long high_frame; 159 unsigned long low_frame; 160 u32 high1, high2, low; 161 162 if (!i915_pipe_enabled(dev, pipe)) { 163 DRM_DEBUG("trying to get vblank count for disabled " 164 "pipe %c\n", pipe_name(pipe)); 165 return 0; 166 } 167 168 high_frame = PIPEFRAME(pipe); 169 low_frame = PIPEFRAMEPIXEL(pipe); 170 171 /* 172 * High & low register fields aren't synchronized, so make sure 173 * we get a low value that's stable across two reads of the high 174 * register. 175 */ 176 do { 177 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 178 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; 179 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; 180 } while (high1 != high2); 181 182 high1 >>= PIPE_FRAME_HIGH_SHIFT; 183 low >>= PIPE_FRAME_LOW_SHIFT; 184 return (high1 << 8) | low; 185 } 186 187 static u32 188 gm45_get_vblank_counter(struct drm_device *dev, int pipe) 189 { 190 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 191 int reg = PIPE_FRMCOUNT_GM45(pipe); 192 193 if (!i915_pipe_enabled(dev, pipe)) { 194 DRM_DEBUG("i915: trying to get vblank count for disabled " 195 "pipe %c\n", pipe_name(pipe)); 196 return 0; 197 } 198 199 return I915_READ(reg); 200 } 201 202 static int 203 i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, 204 int *vpos, int *hpos) 205 { 206 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 207 u32 vbl = 0, position = 0; 208 int vbl_start, vbl_end, htotal, vtotal; 209 bool in_vbl = true; 210 int ret = 0; 211 212 if (!i915_pipe_enabled(dev, pipe)) { 213 DRM_DEBUG("i915: trying to get scanoutpos for disabled " 214 "pipe %c\n", pipe_name(pipe)); 215 return 0; 216 } 217 218 /* Get vtotal. */ 219 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff); 220 221 if (INTEL_INFO(dev)->gen >= 4) { 222 /* No obvious pixelcount register. Only query vertical 223 * scanout position from Display scan line register. 224 */ 225 position = I915_READ(PIPEDSL(pipe)); 226 227 /* Decode into vertical scanout position. Don't have 228 * horizontal scanout position. 229 */ 230 *vpos = position & 0x1fff; 231 *hpos = 0; 232 } else { 233 /* Have access to pixelcount since start of frame. 234 * We can split this into vertical and horizontal 235 * scanout position. 236 */ 237 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; 238 239 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff); 240 *vpos = position / htotal; 241 *hpos = position - (*vpos * htotal); 242 } 243 244 /* Query vblank area. */ 245 vbl = I915_READ(VBLANK(pipe)); 246 247 /* Test position against vblank region. */ 248 vbl_start = vbl & 0x1fff; 249 vbl_end = (vbl >> 16) & 0x1fff; 250 251 if ((*vpos < vbl_start) || (*vpos > vbl_end)) 252 in_vbl = false; 253 254 /* Inside "upper part" of vblank area? Apply corrective offset: */ 255 if (in_vbl && (*vpos >= vbl_start)) 256 *vpos = *vpos - vtotal; 257 258 /* Readouts valid? */ 259 if (vbl > 0) 260 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; 261 262 /* In vblank? */ 263 if (in_vbl) 264 ret |= DRM_SCANOUTPOS_INVBL; 265 266 return ret; 267 } 268 269 static int 270 i915_get_vblank_timestamp(struct drm_device *dev, int pipe, int *max_error, 271 struct timeval *vblank_time, unsigned flags) 272 { 273 struct drm_i915_private *dev_priv = dev->dev_private; 274 struct drm_crtc *crtc; 275 276 if (pipe < 0 || pipe >= dev_priv->num_pipe) { 277 DRM_ERROR("Invalid crtc %d\n", pipe); 278 return -EINVAL; 279 } 280 281 /* Get drm_crtc to timestamp: */ 282 crtc = intel_get_crtc_for_pipe(dev, pipe); 283 if (crtc == NULL) { 284 DRM_ERROR("Invalid crtc %d\n", pipe); 285 return -EINVAL; 286 } 287 288 if (!crtc->enabled) { 289 #if 0 290 DRM_DEBUG("crtc %d is disabled\n", pipe); 291 #endif 292 return -EBUSY; 293 } 294 295 /* Helper routine in DRM core does all the work: */ 296 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, 297 vblank_time, flags, 298 crtc); 299 } 300 301 /* 302 * Handle hotplug events outside the interrupt handler proper. 303 */ 304 static void i915_hotplug_work_func(struct work_struct *work) 305 { 306 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 307 hotplug_work); 308 struct drm_device *dev = dev_priv->dev; 309 struct drm_mode_config *mode_config = &dev->mode_config; 310 struct intel_encoder *encoder; 311 312 lockmgr(&mode_config->mutex, LK_EXCLUSIVE); 313 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 314 315 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 316 if (encoder->hot_plug) 317 encoder->hot_plug(encoder); 318 319 lockmgr(&mode_config->mutex, LK_RELEASE); 320 321 /* Just fire off a uevent and let userspace tell us what to do */ 322 #if 0 323 drm_helper_hpd_irq_event(dev); 324 #endif 325 } 326 327 static void ironlake_handle_rps_change(struct drm_device *dev) 328 { 329 drm_i915_private_t *dev_priv = dev->dev_private; 330 u32 busy_up, busy_down, max_avg, min_avg; 331 u8 new_delay; 332 333 lockmgr(&mchdev_lock, LK_EXCLUSIVE); 334 335 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); 336 337 new_delay = dev_priv->rps.cur_delay; 338 339 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); 340 busy_up = I915_READ(RCPREVBSYTUPAVG); 341 busy_down = I915_READ(RCPREVBSYTDNAVG); 342 max_avg = I915_READ(RCBMAXAVG); 343 min_avg = I915_READ(RCBMINAVG); 344 345 /* Handle RCS change request from hw */ 346 if (busy_up > max_avg) { 347 if (dev_priv->rps.cur_delay != dev_priv->rps.max_delay) 348 new_delay = dev_priv->rps.cur_delay - 1; 349 if (new_delay < dev_priv->rps.max_delay) 350 new_delay = dev_priv->rps.max_delay; 351 } else if (busy_down < min_avg) { 352 if (dev_priv->rps.cur_delay != dev_priv->rps.min_delay) 353 new_delay = dev_priv->rps.cur_delay + 1; 354 if (new_delay > dev_priv->rps.min_delay) 355 new_delay = dev_priv->rps.min_delay; 356 } 357 358 if (ironlake_set_drps(dev, new_delay)) 359 dev_priv->rps.cur_delay = new_delay; 360 361 lockmgr(&mchdev_lock, LK_RELEASE); 362 363 return; 364 } 365 366 static void notify_ring(struct drm_device *dev, 367 struct intel_ring_buffer *ring) 368 { 369 struct drm_i915_private *dev_priv = dev->dev_private; 370 u32 seqno; 371 372 if (ring->obj == NULL) 373 return; 374 375 seqno = ring->get_seqno(ring); 376 377 lockmgr(&ring->irq_lock, LK_EXCLUSIVE); 378 ring->irq_seqno = seqno; 379 wakeup(ring); 380 lockmgr(&ring->irq_lock, LK_RELEASE); 381 382 if (i915_enable_hangcheck) { 383 dev_priv->hangcheck_count = 0; 384 mod_timer(&dev_priv->hangcheck_timer, 385 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 386 } 387 } 388 389 static void gen6_pm_rps_work(struct work_struct *work) 390 { 391 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 392 rps.work); 393 u32 pm_iir, pm_imr; 394 u8 new_delay; 395 396 spin_lock(&dev_priv->rps.lock); 397 pm_iir = dev_priv->rps.pm_iir; 398 dev_priv->rps.pm_iir = 0; 399 pm_imr = I915_READ(GEN6_PMIMR); 400 I915_WRITE(GEN6_PMIMR, 0); 401 spin_unlock(&dev_priv->rps.lock); 402 403 if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) 404 return; 405 406 lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE); 407 408 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) 409 new_delay = dev_priv->rps.cur_delay + 1; 410 else 411 new_delay = dev_priv->rps.cur_delay - 1; 412 413 /* sysfs frequency interfaces may have snuck in while servicing the 414 * interrupt 415 */ 416 if (!(new_delay > dev_priv->rps.max_delay || 417 new_delay < dev_priv->rps.min_delay)) { 418 gen6_set_rps(dev_priv->dev, new_delay); 419 } 420 421 lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE); 422 } 423 424 static void snb_gt_irq_handler(struct drm_device *dev, 425 struct drm_i915_private *dev_priv, 426 u32 gt_iir) 427 { 428 429 if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | 430 GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) 431 notify_ring(dev, &dev_priv->ring[RCS]); 432 if (gt_iir & GEN6_BSD_USER_INTERRUPT) 433 notify_ring(dev, &dev_priv->ring[VCS]); 434 if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) 435 notify_ring(dev, &dev_priv->ring[BCS]); 436 437 if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | 438 GT_GEN6_BSD_CS_ERROR_INTERRUPT | 439 GT_RENDER_CS_ERROR_INTERRUPT)) { 440 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); 441 i915_handle_error(dev, false); 442 } 443 444 #if 0 445 if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) 446 ivybridge_handle_parity_error(dev); 447 #endif 448 } 449 450 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, 451 u32 pm_iir) 452 { 453 454 /* 455 * IIR bits should never already be set because IMR should 456 * prevent an interrupt from being shown in IIR. The warning 457 * displays a case where we've unsafely cleared 458 * dev_priv->rps.pm_iir. Although missing an interrupt of the same 459 * type is not a problem, it displays a problem in the logic. 460 * 461 * The mask bit in IMR is cleared by dev_priv->rps.work. 462 */ 463 464 spin_lock(&dev_priv->rps.lock); 465 dev_priv->rps.pm_iir |= pm_iir; 466 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); 467 POSTING_READ(GEN6_PMIMR); 468 spin_unlock(&dev_priv->rps.lock); 469 470 queue_work(dev_priv->wq, &dev_priv->rps.work); 471 } 472 473 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 474 { 475 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 476 int pipe; 477 478 if (pch_iir & SDE_HOTPLUG_MASK) 479 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 480 481 if (pch_iir & SDE_AUDIO_POWER_MASK) 482 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 483 (pch_iir & SDE_AUDIO_POWER_MASK) >> 484 SDE_AUDIO_POWER_SHIFT); 485 486 if (pch_iir & SDE_GMBUS) 487 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 488 489 if (pch_iir & SDE_AUDIO_HDCP_MASK) 490 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 491 492 if (pch_iir & SDE_AUDIO_TRANS_MASK) 493 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); 494 495 if (pch_iir & SDE_POISON) 496 DRM_ERROR("PCH poison interrupt\n"); 497 498 if (pch_iir & SDE_FDI_MASK) 499 for_each_pipe(pipe) 500 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 501 pipe_name(pipe), 502 I915_READ(FDI_RX_IIR(pipe))); 503 504 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) 505 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); 506 507 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) 508 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 509 510 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 511 DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); 512 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 513 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 514 } 515 516 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 517 { 518 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 519 int pipe; 520 521 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 522 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 523 524 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) 525 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", 526 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 527 SDE_AUDIO_POWER_SHIFT_CPT); 528 529 if (pch_iir & SDE_AUX_MASK_CPT) 530 DRM_DEBUG_DRIVER("AUX channel interrupt\n"); 531 532 if (pch_iir & SDE_GMBUS_CPT) 533 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); 534 535 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 536 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 537 538 if (pch_iir & SDE_AUDIO_CP_CHG_CPT) 539 DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); 540 541 if (pch_iir & SDE_FDI_MASK_CPT) 542 for_each_pipe(pipe) 543 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", 544 pipe_name(pipe), 545 I915_READ(FDI_RX_IIR(pipe))); 546 } 547 548 static void 549 ivybridge_irq_handler(void *arg) 550 { 551 struct drm_device *dev = (struct drm_device *) arg; 552 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 553 u32 de_iir, gt_iir, de_ier, pm_iir; 554 int i; 555 556 atomic_inc(&dev_priv->irq_received); 557 558 /* disable master interrupt before clearing iir */ 559 de_ier = I915_READ(DEIER); 560 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 561 562 gt_iir = I915_READ(GTIIR); 563 if (gt_iir) { 564 snb_gt_irq_handler(dev, dev_priv, gt_iir); 565 I915_WRITE(GTIIR, gt_iir); 566 } 567 568 de_iir = I915_READ(DEIER); 569 if (de_iir) { 570 #if 0 571 if (de_iir & DE_GSE_IVB) 572 intel_opregion_gse_intr(dev); 573 #endif 574 575 for (i = 0; i < 3; i++) { 576 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) 577 drm_handle_vblank(dev, i); 578 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { 579 intel_prepare_page_flip(dev, i); 580 intel_finish_page_flip_plane(dev, i); 581 } 582 } 583 584 /* check event from PCH */ 585 if (de_iir & DE_PCH_EVENT_IVB) { 586 u32 pch_iir = I915_READ(SDEIIR); 587 588 cpt_irq_handler(dev, pch_iir); 589 590 /* clear PCH hotplug event before clear CPU irq */ 591 I915_WRITE(SDEIIR, pch_iir); 592 } 593 594 I915_WRITE(DEIIR, de_iir); 595 } 596 597 pm_iir = I915_READ(GEN6_PMIIR); 598 if (pm_iir) { 599 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) 600 gen6_queue_rps_work(dev_priv, pm_iir); 601 I915_WRITE(GEN6_PMIIR, pm_iir); 602 } 603 604 I915_WRITE(DEIER, de_ier); 605 POSTING_READ(DEIER); 606 } 607 608 static void ilk_gt_irq_handler(struct drm_device *dev, 609 struct drm_i915_private *dev_priv, 610 u32 gt_iir) 611 { 612 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) 613 notify_ring(dev, &dev_priv->ring[RCS]); 614 if (gt_iir & GT_BSD_USER_INTERRUPT) 615 notify_ring(dev, &dev_priv->ring[VCS]); 616 } 617 618 static void 619 ironlake_irq_handler(void *arg) 620 { 621 struct drm_device *dev = arg; 622 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 623 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; 624 625 atomic_inc(&dev_priv->irq_received); 626 627 /* disable master interrupt before clearing iir */ 628 de_ier = I915_READ(DEIER); 629 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 630 POSTING_READ(DEIER); 631 632 de_iir = I915_READ(DEIIR); 633 gt_iir = I915_READ(GTIIR); 634 pch_iir = I915_READ(SDEIIR); 635 pm_iir = I915_READ(GEN6_PMIIR); 636 637 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && 638 (!IS_GEN6(dev) || pm_iir == 0)) 639 goto done; 640 641 if (IS_GEN5(dev)) 642 ilk_gt_irq_handler(dev, dev_priv, gt_iir); 643 else 644 snb_gt_irq_handler(dev, dev_priv, gt_iir); 645 646 if (gt_iir & GT_GEN6_BLT_USER_INTERRUPT) 647 notify_ring(dev, &dev_priv->ring[BCS]); 648 649 if (de_iir & DE_GSE) { 650 #if 1 651 KIB_NOTYET(); 652 #else 653 intel_opregion_gse_intr(dev); 654 #endif 655 } 656 657 if (de_iir & DE_PIPEA_VBLANK) 658 drm_handle_vblank(dev, 0); 659 660 if (de_iir & DE_PIPEB_VBLANK) 661 drm_handle_vblank(dev, 1); 662 663 if (de_iir & DE_PLANEA_FLIP_DONE) { 664 intel_prepare_page_flip(dev, 0); 665 intel_finish_page_flip_plane(dev, 0); 666 } 667 668 if (de_iir & DE_PLANEB_FLIP_DONE) { 669 intel_prepare_page_flip(dev, 1); 670 intel_finish_page_flip_plane(dev, 1); 671 } 672 673 /* check event from PCH */ 674 if (de_iir & DE_PCH_EVENT) { 675 if (HAS_PCH_CPT(dev)) 676 cpt_irq_handler(dev, pch_iir); 677 else 678 ibx_irq_handler(dev, pch_iir); 679 } 680 681 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 682 ironlake_handle_rps_change(dev); 683 684 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) 685 gen6_queue_rps_work(dev_priv, pm_iir); 686 687 /* should clear PCH hotplug event before clear CPU irq */ 688 I915_WRITE(SDEIIR, pch_iir); 689 I915_WRITE(GTIIR, gt_iir); 690 I915_WRITE(DEIIR, de_iir); 691 I915_WRITE(GEN6_PMIIR, pm_iir); 692 693 done: 694 I915_WRITE(DEIER, de_ier); 695 POSTING_READ(DEIER); 696 } 697 698 /** 699 * i915_error_work_func - do process context error handling work 700 * @work: work struct 701 * 702 * Fire an error uevent so userspace can see that a hang or error 703 * was detected. 704 */ 705 static void i915_error_work_func(struct work_struct *work) 706 { 707 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, 708 error_work); 709 struct drm_device *dev = dev_priv->dev; 710 711 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */ 712 713 if (atomic_read(&dev_priv->mm.wedged)) { 714 DRM_DEBUG_DRIVER("resetting chip\n"); 715 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */ 716 if (!i915_reset(dev, GRDOM_RENDER)) { 717 atomic_set(&dev_priv->mm.wedged, 0); 718 /* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */ 719 } 720 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); 721 dev_priv->error_completion++; 722 wakeup(&dev_priv->error_completion); 723 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); 724 } 725 } 726 727 static void i915_report_and_clear_eir(struct drm_device *dev) 728 { 729 struct drm_i915_private *dev_priv = dev->dev_private; 730 u32 eir = I915_READ(EIR); 731 int pipe; 732 733 if (!eir) 734 return; 735 736 kprintf("i915: render error detected, EIR: 0x%08x\n", eir); 737 738 if (IS_G4X(dev)) { 739 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 740 u32 ipeir = I915_READ(IPEIR_I965); 741 742 kprintf(" IPEIR: 0x%08x\n", 743 I915_READ(IPEIR_I965)); 744 kprintf(" IPEHR: 0x%08x\n", 745 I915_READ(IPEHR_I965)); 746 kprintf(" INSTDONE: 0x%08x\n", 747 I915_READ(INSTDONE_I965)); 748 kprintf(" INSTPS: 0x%08x\n", 749 I915_READ(INSTPS)); 750 kprintf(" INSTDONE1: 0x%08x\n", 751 I915_READ(INSTDONE1)); 752 kprintf(" ACTHD: 0x%08x\n", 753 I915_READ(ACTHD_I965)); 754 I915_WRITE(IPEIR_I965, ipeir); 755 POSTING_READ(IPEIR_I965); 756 } 757 if (eir & GM45_ERROR_PAGE_TABLE) { 758 u32 pgtbl_err = I915_READ(PGTBL_ER); 759 kprintf("page table error\n"); 760 kprintf(" PGTBL_ER: 0x%08x\n", 761 pgtbl_err); 762 I915_WRITE(PGTBL_ER, pgtbl_err); 763 POSTING_READ(PGTBL_ER); 764 } 765 } 766 767 if (!IS_GEN2(dev)) { 768 if (eir & I915_ERROR_PAGE_TABLE) { 769 u32 pgtbl_err = I915_READ(PGTBL_ER); 770 kprintf("page table error\n"); 771 kprintf(" PGTBL_ER: 0x%08x\n", 772 pgtbl_err); 773 I915_WRITE(PGTBL_ER, pgtbl_err); 774 POSTING_READ(PGTBL_ER); 775 } 776 } 777 778 if (eir & I915_ERROR_MEMORY_REFRESH) { 779 kprintf("memory refresh error:\n"); 780 for_each_pipe(pipe) 781 kprintf("pipe %c stat: 0x%08x\n", 782 pipe_name(pipe), I915_READ(PIPESTAT(pipe))); 783 /* pipestat has already been acked */ 784 } 785 if (eir & I915_ERROR_INSTRUCTION) { 786 kprintf("instruction error\n"); 787 kprintf(" INSTPM: 0x%08x\n", 788 I915_READ(INSTPM)); 789 if (INTEL_INFO(dev)->gen < 4) { 790 u32 ipeir = I915_READ(IPEIR); 791 792 kprintf(" IPEIR: 0x%08x\n", 793 I915_READ(IPEIR)); 794 kprintf(" IPEHR: 0x%08x\n", 795 I915_READ(IPEHR)); 796 kprintf(" INSTDONE: 0x%08x\n", 797 I915_READ(INSTDONE)); 798 kprintf(" ACTHD: 0x%08x\n", 799 I915_READ(ACTHD)); 800 I915_WRITE(IPEIR, ipeir); 801 POSTING_READ(IPEIR); 802 } else { 803 u32 ipeir = I915_READ(IPEIR_I965); 804 805 kprintf(" IPEIR: 0x%08x\n", 806 I915_READ(IPEIR_I965)); 807 kprintf(" IPEHR: 0x%08x\n", 808 I915_READ(IPEHR_I965)); 809 kprintf(" INSTDONE: 0x%08x\n", 810 I915_READ(INSTDONE_I965)); 811 kprintf(" INSTPS: 0x%08x\n", 812 I915_READ(INSTPS)); 813 kprintf(" INSTDONE1: 0x%08x\n", 814 I915_READ(INSTDONE1)); 815 kprintf(" ACTHD: 0x%08x\n", 816 I915_READ(ACTHD_I965)); 817 I915_WRITE(IPEIR_I965, ipeir); 818 POSTING_READ(IPEIR_I965); 819 } 820 } 821 822 I915_WRITE(EIR, eir); 823 POSTING_READ(EIR); 824 eir = I915_READ(EIR); 825 if (eir) { 826 /* 827 * some errors might have become stuck, 828 * mask them. 829 */ 830 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); 831 I915_WRITE(EMR, I915_READ(EMR) | eir); 832 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); 833 } 834 } 835 836 /** 837 * i915_handle_error - handle an error interrupt 838 * @dev: drm device 839 * 840 * Do some basic checking of regsiter state at error interrupt time and 841 * dump it to the syslog. Also call i915_capture_error_state() to make 842 * sure we get a record and make it available in debugfs. Fire a uevent 843 * so userspace knows something bad happened (should trigger collection 844 * of a ring dump etc.). 845 */ 846 void i915_handle_error(struct drm_device *dev, bool wedged) 847 { 848 struct drm_i915_private *dev_priv = dev->dev_private; 849 850 i915_capture_error_state(dev); 851 i915_report_and_clear_eir(dev); 852 853 if (wedged) { 854 lockmgr(&dev_priv->error_completion_lock, LK_EXCLUSIVE); 855 dev_priv->error_completion = 0; 856 atomic_set(&dev_priv->mm.wedged, 1); 857 /* unlock acts as rel barrier for store to wedged */ 858 lockmgr(&dev_priv->error_completion_lock, LK_RELEASE); 859 860 /* 861 * Wakeup waiting processes so they don't hang 862 */ 863 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_EXCLUSIVE); 864 wakeup(&dev_priv->ring[RCS]); 865 lockmgr(&dev_priv->ring[RCS].irq_lock, LK_RELEASE); 866 if (HAS_BSD(dev)) { 867 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_EXCLUSIVE); 868 wakeup(&dev_priv->ring[VCS]); 869 lockmgr(&dev_priv->ring[VCS].irq_lock, LK_RELEASE); 870 } 871 if (HAS_BLT(dev)) { 872 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_EXCLUSIVE); 873 wakeup(&dev_priv->ring[BCS]); 874 lockmgr(&dev_priv->ring[BCS].irq_lock, LK_RELEASE); 875 } 876 } 877 878 queue_work(dev_priv->wq, &dev_priv->error_work); 879 } 880 881 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) 882 { 883 drm_i915_private_t *dev_priv = dev->dev_private; 884 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 885 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 886 struct drm_i915_gem_object *obj; 887 struct intel_unpin_work *work; 888 bool stall_detected; 889 890 /* Ignore early vblank irqs */ 891 if (intel_crtc == NULL) 892 return; 893 894 lockmgr(&dev->event_lock, LK_EXCLUSIVE); 895 work = intel_crtc->unpin_work; 896 897 if (work == NULL || atomic_read(&work->pending) || 898 !work->enable_stall_check) { 899 /* Either the pending flip IRQ arrived, or we're too early. Don't check */ 900 lockmgr(&dev->event_lock, LK_RELEASE); 901 return; 902 } 903 904 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 905 obj = work->pending_flip_obj; 906 if (INTEL_INFO(dev)->gen >= 4) { 907 int dspsurf = DSPSURF(intel_crtc->plane); 908 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 909 } else { 910 int dspaddr = DSPADDR(intel_crtc->plane); 911 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 912 crtc->y * crtc->fb->pitches[0] + 913 crtc->x * crtc->fb->bits_per_pixel/8); 914 } 915 916 lockmgr(&dev->event_lock, LK_RELEASE); 917 918 if (stall_detected) { 919 DRM_DEBUG("Pageflip stall detected\n"); 920 intel_prepare_page_flip(dev, intel_crtc->plane); 921 } 922 } 923 924 static void 925 i915_driver_irq_handler(void *arg) 926 { 927 struct drm_device *dev = (struct drm_device *)arg; 928 drm_i915_private_t *dev_priv = (drm_i915_private_t *)dev->dev_private; 929 #if 0 930 struct drm_i915_master_private *master_priv; 931 #endif 932 u32 iir, new_iir; 933 u32 pipe_stats[I915_MAX_PIPES]; 934 u32 vblank_status; 935 int vblank = 0; 936 int irq_received; 937 int pipe; 938 bool blc_event = false; 939 940 atomic_inc(&dev_priv->irq_received); 941 942 iir = I915_READ(IIR); 943 944 if (INTEL_INFO(dev)->gen >= 4) 945 vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS; 946 else 947 vblank_status = PIPE_VBLANK_INTERRUPT_STATUS; 948 949 for (;;) { 950 irq_received = iir != 0; 951 952 /* Can't rely on pipestat interrupt bit in iir as it might 953 * have been cleared after the pipestat interrupt was received. 954 * It doesn't set the bit in iir again, but it still produces 955 * interrupts (for non-MSI). 956 */ 957 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 958 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 959 i915_handle_error(dev, false); 960 961 for_each_pipe(pipe) { 962 int reg = PIPESTAT(pipe); 963 pipe_stats[pipe] = I915_READ(reg); 964 965 /* 966 * Clear the PIPE*STAT regs before the IIR 967 */ 968 if (pipe_stats[pipe] & 0x8000ffff) { 969 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 970 DRM_DEBUG("pipe %c underrun\n", 971 pipe_name(pipe)); 972 I915_WRITE(reg, pipe_stats[pipe]); 973 irq_received = 1; 974 } 975 } 976 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 977 978 if (!irq_received) 979 break; 980 981 /* Consume port. Then clear IIR or we'll miss events */ 982 if ((I915_HAS_HOTPLUG(dev)) && 983 (iir & I915_DISPLAY_PORT_INTERRUPT)) { 984 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); 985 986 DRM_DEBUG("i915: hotplug event received, stat 0x%08x\n", 987 hotplug_status); 988 if (hotplug_status & dev_priv->hotplug_supported_mask) 989 queue_work(dev_priv->wq, 990 &dev_priv->hotplug_work); 991 992 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); 993 I915_READ(PORT_HOTPLUG_STAT); 994 } 995 996 I915_WRITE(IIR, iir); 997 new_iir = I915_READ(IIR); /* Flush posted writes */ 998 999 #if 0 1000 if (dev->primary->master) { 1001 master_priv = dev->primary->master->driver_priv; 1002 if (master_priv->sarea_priv) 1003 master_priv->sarea_priv->last_dispatch = 1004 READ_BREADCRUMB(dev_priv); 1005 } 1006 #else 1007 if (dev_priv->sarea_priv) 1008 dev_priv->sarea_priv->last_dispatch = 1009 READ_BREADCRUMB(dev_priv); 1010 #endif 1011 1012 if (iir & I915_USER_INTERRUPT) 1013 notify_ring(dev, &dev_priv->ring[RCS]); 1014 if (iir & I915_BSD_USER_INTERRUPT) 1015 notify_ring(dev, &dev_priv->ring[VCS]); 1016 1017 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1018 intel_prepare_page_flip(dev, 0); 1019 if (dev_priv->flip_pending_is_done) 1020 intel_finish_page_flip_plane(dev, 0); 1021 } 1022 1023 if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { 1024 intel_prepare_page_flip(dev, 1); 1025 if (dev_priv->flip_pending_is_done) 1026 intel_finish_page_flip_plane(dev, 1); 1027 } 1028 1029 for_each_pipe(pipe) { 1030 if (pipe_stats[pipe] & vblank_status && 1031 drm_handle_vblank(dev, pipe)) { 1032 vblank++; 1033 if (!dev_priv->flip_pending_is_done) { 1034 i915_pageflip_stall_check(dev, pipe); 1035 intel_finish_page_flip(dev, pipe); 1036 } 1037 } 1038 1039 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 1040 blc_event = true; 1041 } 1042 1043 1044 if (blc_event || (iir & I915_ASLE_INTERRUPT)) { 1045 #if 1 1046 KIB_NOTYET(); 1047 #else 1048 intel_opregion_asle_intr(dev); 1049 #endif 1050 } 1051 1052 /* With MSI, interrupts are only generated when iir 1053 * transitions from zero to nonzero. If another bit got 1054 * set while we were handling the existing iir bits, then 1055 * we would never get another interrupt. 1056 * 1057 * This is fine on non-MSI as well, as if we hit this path 1058 * we avoid exiting the interrupt handler only to generate 1059 * another one. 1060 * 1061 * Note that for MSI this could cause a stray interrupt report 1062 * if an interrupt landed in the time between writing IIR and 1063 * the posting read. This should be rare enough to never 1064 * trigger the 99% of 100,000 interrupts test for disabling 1065 * stray interrupts. 1066 */ 1067 iir = new_iir; 1068 } 1069 } 1070 1071 static int i915_emit_irq(struct drm_device * dev) 1072 { 1073 drm_i915_private_t *dev_priv = dev->dev_private; 1074 #if 0 1075 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1076 #endif 1077 1078 i915_kernel_lost_context(dev); 1079 1080 DRM_DEBUG("i915: emit_irq\n"); 1081 1082 dev_priv->counter++; 1083 if (dev_priv->counter > 0x7FFFFFFFUL) 1084 dev_priv->counter = 1; 1085 #if 0 1086 if (master_priv->sarea_priv) 1087 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1088 #else 1089 if (dev_priv->sarea_priv) 1090 dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 1091 #endif 1092 1093 if (BEGIN_LP_RING(4) == 0) { 1094 OUT_RING(MI_STORE_DWORD_INDEX); 1095 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1096 OUT_RING(dev_priv->counter); 1097 OUT_RING(MI_USER_INTERRUPT); 1098 ADVANCE_LP_RING(); 1099 } 1100 1101 return dev_priv->counter; 1102 } 1103 1104 static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1105 { 1106 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1107 #if 0 1108 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1109 #endif 1110 int ret; 1111 struct intel_ring_buffer *ring = LP_RING(dev_priv); 1112 1113 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 1114 READ_BREADCRUMB(dev_priv)); 1115 1116 #if 0 1117 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1118 if (master_priv->sarea_priv) 1119 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 1120 return 0; 1121 } 1122 1123 if (master_priv->sarea_priv) 1124 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1125 #else 1126 if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 1127 if (dev_priv->sarea_priv) { 1128 dev_priv->sarea_priv->last_dispatch = 1129 READ_BREADCRUMB(dev_priv); 1130 } 1131 return 0; 1132 } 1133 1134 if (dev_priv->sarea_priv) 1135 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1136 #endif 1137 1138 ret = 0; 1139 lockmgr(&ring->irq_lock, LK_EXCLUSIVE); 1140 if (ring->irq_get(ring)) { 1141 DRM_UNLOCK(dev); 1142 while (ret == 0 && READ_BREADCRUMB(dev_priv) < irq_nr) { 1143 ret = -lksleep(ring, &ring->irq_lock, PCATCH, 1144 "915wtq", 3 * hz); 1145 } 1146 ring->irq_put(ring); 1147 lockmgr(&ring->irq_lock, LK_RELEASE); 1148 DRM_LOCK(dev); 1149 } else { 1150 lockmgr(&ring->irq_lock, LK_RELEASE); 1151 if (_intel_wait_for(dev, READ_BREADCRUMB(dev_priv) >= irq_nr, 1152 3000, 1, "915wir")) 1153 ret = -EBUSY; 1154 } 1155 1156 if (ret == -EBUSY) { 1157 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1158 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 1159 } 1160 1161 return ret; 1162 } 1163 1164 /* Needs the lock as it touches the ring. 1165 */ 1166 int i915_irq_emit(struct drm_device *dev, void *data, 1167 struct drm_file *file_priv) 1168 { 1169 drm_i915_private_t *dev_priv = dev->dev_private; 1170 drm_i915_irq_emit_t *emit = data; 1171 int result; 1172 1173 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) { 1174 DRM_ERROR("called with no initialization\n"); 1175 return -EINVAL; 1176 } 1177 1178 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 1179 1180 DRM_LOCK(dev); 1181 result = i915_emit_irq(dev); 1182 DRM_UNLOCK(dev); 1183 1184 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 1185 DRM_ERROR("copy_to_user\n"); 1186 return -EFAULT; 1187 } 1188 1189 return 0; 1190 } 1191 1192 /* Doesn't need the hardware lock. 1193 */ 1194 int i915_irq_wait(struct drm_device *dev, void *data, 1195 struct drm_file *file_priv) 1196 { 1197 drm_i915_private_t *dev_priv = dev->dev_private; 1198 drm_i915_irq_wait_t *irqwait = data; 1199 1200 if (!dev_priv) { 1201 DRM_ERROR("called with no initialization\n"); 1202 return -EINVAL; 1203 } 1204 1205 return i915_wait_irq(dev, irqwait->irq_seq); 1206 } 1207 1208 /* Called from drm generic code, passed 'crtc' which 1209 * we use as a pipe index 1210 */ 1211 static int 1212 i915_enable_vblank(struct drm_device *dev, int pipe) 1213 { 1214 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1215 1216 if (!i915_pipe_enabled(dev, pipe)) 1217 return -EINVAL; 1218 1219 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1220 if (INTEL_INFO(dev)->gen >= 4) 1221 i915_enable_pipestat(dev_priv, pipe, 1222 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1223 else 1224 i915_enable_pipestat(dev_priv, pipe, 1225 PIPE_VBLANK_INTERRUPT_ENABLE); 1226 1227 /* maintain vblank delivery even in deep C-states */ 1228 if (dev_priv->info->gen == 3) 1229 I915_WRITE(INSTPM, INSTPM_AGPBUSY_DIS << 16); 1230 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1231 1232 return 0; 1233 } 1234 1235 static int 1236 ironlake_enable_vblank(struct drm_device *dev, int pipe) 1237 { 1238 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1239 1240 if (!i915_pipe_enabled(dev, pipe)) 1241 return -EINVAL; 1242 1243 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1244 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1245 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1246 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1247 1248 return 0; 1249 } 1250 1251 static int 1252 ivybridge_enable_vblank(struct drm_device *dev, int pipe) 1253 { 1254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1255 1256 if (!i915_pipe_enabled(dev, pipe)) 1257 return -EINVAL; 1258 1259 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1260 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1261 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1262 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1263 1264 return 0; 1265 } 1266 1267 1268 /* Called from drm generic code, passed 'crtc' which 1269 * we use as a pipe index 1270 */ 1271 static void 1272 i915_disable_vblank(struct drm_device *dev, int pipe) 1273 { 1274 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1275 1276 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1277 if (dev_priv->info->gen == 3) 1278 I915_WRITE(INSTPM, 1279 INSTPM_AGPBUSY_DIS << 16 | INSTPM_AGPBUSY_DIS); 1280 1281 i915_disable_pipestat(dev_priv, pipe, 1282 PIPE_VBLANK_INTERRUPT_ENABLE | 1283 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1284 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1285 } 1286 1287 static void 1288 ironlake_disable_vblank(struct drm_device *dev, int pipe) 1289 { 1290 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1291 1292 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1293 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1294 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); 1295 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1296 } 1297 1298 static void 1299 ivybridge_disable_vblank(struct drm_device *dev, int pipe) 1300 { 1301 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1302 1303 lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE); 1304 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1305 DE_PIPEA_VBLANK_IVB : DE_PIPEB_VBLANK_IVB); 1306 lockmgr(&dev_priv->irq_lock, LK_RELEASE); 1307 } 1308 1309 /* Set the vblank monitor pipe 1310 */ 1311 int i915_vblank_pipe_set(struct drm_device *dev, void *data, 1312 struct drm_file *file_priv) 1313 { 1314 drm_i915_private_t *dev_priv = dev->dev_private; 1315 1316 if (!dev_priv) { 1317 DRM_ERROR("called with no initialization\n"); 1318 return -EINVAL; 1319 } 1320 1321 return 0; 1322 } 1323 1324 int i915_vblank_pipe_get(struct drm_device *dev, void *data, 1325 struct drm_file *file_priv) 1326 { 1327 drm_i915_private_t *dev_priv = dev->dev_private; 1328 drm_i915_vblank_pipe_t *pipe = data; 1329 1330 if (!dev_priv) { 1331 DRM_ERROR("called with no initialization\n"); 1332 return -EINVAL; 1333 } 1334 1335 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1336 1337 return 0; 1338 } 1339 1340 /** 1341 * Schedule buffer swap at given vertical blank. 1342 */ 1343 int i915_vblank_swap(struct drm_device *dev, void *data, 1344 struct drm_file *file_priv) 1345 { 1346 /* The delayed swap mechanism was fundamentally racy, and has been 1347 * removed. The model was that the client requested a delayed flip/swap 1348 * from the kernel, then waited for vblank before continuing to perform 1349 * rendering. The problem was that the kernel might wake the client 1350 * up before it dispatched the vblank swap (since the lock has to be 1351 * held while touching the ringbuffer), in which case the client would 1352 * clear and start the next frame before the swap occurred, and 1353 * flicker would occur in addition to likely missing the vblank. 1354 * 1355 * In the absence of this ioctl, userland falls back to a correct path 1356 * of waiting for a vblank, then dispatching the swap on its own. 1357 * Context switching to userland and back is plenty fast enough for 1358 * meeting the requirements of vblank swapping. 1359 */ 1360 return -EINVAL; 1361 } 1362 1363 static u32 1364 ring_last_seqno(struct intel_ring_buffer *ring) 1365 { 1366 1367 if (list_empty(&ring->request_list)) 1368 return (0); 1369 else 1370 return (list_entry(ring->request_list.prev, 1371 struct drm_i915_gem_request, list)->seqno); 1372 } 1373 1374 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) 1375 { 1376 if (list_empty(&ring->request_list) || 1377 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) { 1378 /* Issue a wake-up to catch stuck h/w. */ 1379 if (ring->waiting_seqno) { 1380 DRM_ERROR( 1381 "Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n", 1382 ring->name, 1383 ring->waiting_seqno, 1384 ring->get_seqno(ring)); 1385 wakeup(ring); 1386 *err = true; 1387 } 1388 return true; 1389 } 1390 return false; 1391 } 1392 1393 static bool kick_ring(struct intel_ring_buffer *ring) 1394 { 1395 struct drm_device *dev = ring->dev; 1396 struct drm_i915_private *dev_priv = dev->dev_private; 1397 u32 tmp = I915_READ_CTL(ring); 1398 if (tmp & RING_WAIT) { 1399 DRM_ERROR("Kicking stuck wait on %s\n", 1400 ring->name); 1401 I915_WRITE_CTL(ring, tmp); 1402 return true; 1403 } 1404 return false; 1405 } 1406 1407 /** 1408 * This is called when the chip hasn't reported back with completed 1409 * batchbuffers in a long time. The first time this is called we simply record 1410 * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses 1411 * again, we assume the chip is wedged and try to fix it. 1412 */ 1413 void i915_hangcheck_elapsed(unsigned long data) 1414 { 1415 struct drm_device *dev = (struct drm_device *)data; 1416 drm_i915_private_t *dev_priv = dev->dev_private; 1417 uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt; 1418 bool err = false; 1419 1420 if (!i915_enable_hangcheck) 1421 return; 1422 1423 /* If all work is done then ACTHD clearly hasn't advanced. */ 1424 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1425 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1426 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { 1427 dev_priv->hangcheck_count = 0; 1428 if (err) 1429 goto repeat; 1430 return; 1431 } 1432 1433 if (INTEL_INFO(dev)->gen < 4) { 1434 instdone = I915_READ(INSTDONE); 1435 instdone1 = 0; 1436 } else { 1437 instdone = I915_READ(INSTDONE_I965); 1438 instdone1 = I915_READ(INSTDONE1); 1439 } 1440 acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]); 1441 acthd_bsd = HAS_BSD(dev) ? 1442 intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0; 1443 acthd_blt = HAS_BLT(dev) ? 1444 intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0; 1445 1446 if (dev_priv->last_acthd == acthd && 1447 dev_priv->last_acthd_bsd == acthd_bsd && 1448 dev_priv->last_acthd_blt == acthd_blt && 1449 dev_priv->last_instdone == instdone && 1450 dev_priv->last_instdone1 == instdone1) { 1451 if (dev_priv->hangcheck_count++ > 1) { 1452 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); 1453 i915_handle_error(dev, true); 1454 1455 if (!IS_GEN2(dev)) { 1456 /* Is the chip hanging on a WAIT_FOR_EVENT? 1457 * If so we can simply poke the RB_WAIT bit 1458 * and break the hang. This should work on 1459 * all but the second generation chipsets. 1460 */ 1461 if (kick_ring(&dev_priv->ring[RCS])) 1462 goto repeat; 1463 1464 if (HAS_BSD(dev) && 1465 kick_ring(&dev_priv->ring[VCS])) 1466 goto repeat; 1467 1468 if (HAS_BLT(dev) && 1469 kick_ring(&dev_priv->ring[BCS])) 1470 goto repeat; 1471 } 1472 1473 return; 1474 } 1475 } else { 1476 dev_priv->hangcheck_count = 0; 1477 1478 dev_priv->last_acthd = acthd; 1479 dev_priv->last_acthd_bsd = acthd_bsd; 1480 dev_priv->last_acthd_blt = acthd_blt; 1481 dev_priv->last_instdone = instdone; 1482 dev_priv->last_instdone1 = instdone1; 1483 } 1484 1485 repeat: 1486 /* Reset timer case chip hangs without another request being added */ 1487 mod_timer(&dev_priv->hangcheck_timer, 1488 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 1489 } 1490 1491 /* drm_dma.h hooks 1492 */ 1493 static void 1494 ironlake_irq_preinstall(struct drm_device *dev) 1495 { 1496 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1497 1498 atomic_set(&dev_priv->irq_received, 0); 1499 1500 I915_WRITE(HWSTAM, 0xeffe); 1501 1502 /* XXX hotplug from PCH */ 1503 1504 I915_WRITE(DEIMR, 0xffffffff); 1505 I915_WRITE(DEIER, 0x0); 1506 POSTING_READ(DEIER); 1507 1508 /* and GT */ 1509 I915_WRITE(GTIMR, 0xffffffff); 1510 I915_WRITE(GTIER, 0x0); 1511 POSTING_READ(GTIER); 1512 1513 /* south display irq */ 1514 I915_WRITE(SDEIMR, 0xffffffff); 1515 I915_WRITE(SDEIER, 0x0); 1516 POSTING_READ(SDEIER); 1517 } 1518 1519 /* 1520 * Enable digital hotplug on the PCH, and configure the DP short pulse 1521 * duration to 2ms (which is the minimum in the Display Port spec) 1522 * 1523 * This register is the same on all known PCH chips. 1524 */ 1525 1526 static void ironlake_enable_pch_hotplug(struct drm_device *dev) 1527 { 1528 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1529 u32 hotplug; 1530 1531 hotplug = I915_READ(PCH_PORT_HOTPLUG); 1532 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); 1533 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; 1534 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; 1535 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; 1536 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 1537 } 1538 1539 static int ironlake_irq_postinstall(struct drm_device *dev) 1540 { 1541 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1542 /* enable kind of interrupts always enabled */ 1543 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1544 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1545 u32 render_irqs; 1546 u32 hotplug_mask; 1547 1548 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1549 dev_priv->irq_mask = ~display_mask; 1550 1551 /* should always can generate irq */ 1552 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1553 I915_WRITE(DEIMR, dev_priv->irq_mask); 1554 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); 1555 POSTING_READ(DEIER); 1556 1557 dev_priv->gt_irq_mask = ~0; 1558 1559 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1560 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1561 1562 if (IS_GEN6(dev)) 1563 render_irqs = 1564 GT_USER_INTERRUPT | 1565 GT_GEN6_BSD_USER_INTERRUPT | 1566 GT_GEN6_BLT_USER_INTERRUPT; 1567 else 1568 render_irqs = 1569 GT_USER_INTERRUPT | 1570 GT_PIPE_NOTIFY | 1571 GT_BSD_USER_INTERRUPT; 1572 I915_WRITE(GTIER, render_irqs); 1573 POSTING_READ(GTIER); 1574 1575 if (HAS_PCH_CPT(dev)) { 1576 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1577 SDE_PORTB_HOTPLUG_CPT | 1578 SDE_PORTC_HOTPLUG_CPT | 1579 SDE_PORTD_HOTPLUG_CPT); 1580 } else { 1581 hotplug_mask = (SDE_CRT_HOTPLUG | 1582 SDE_PORTB_HOTPLUG | 1583 SDE_PORTC_HOTPLUG | 1584 SDE_PORTD_HOTPLUG | 1585 SDE_AUX_MASK); 1586 } 1587 1588 dev_priv->pch_irq_mask = ~hotplug_mask; 1589 1590 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1591 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1592 I915_WRITE(SDEIER, hotplug_mask); 1593 POSTING_READ(SDEIER); 1594 1595 ironlake_enable_pch_hotplug(dev); 1596 1597 if (IS_IRONLAKE_M(dev)) { 1598 /* Clear & enable PCU event interrupts */ 1599 I915_WRITE(DEIIR, DE_PCU_EVENT); 1600 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); 1601 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 1602 } 1603 1604 return 0; 1605 } 1606 1607 static int 1608 ivybridge_irq_postinstall(struct drm_device *dev) 1609 { 1610 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1611 /* enable kind of interrupts always enabled */ 1612 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | 1613 DE_PCH_EVENT_IVB | DE_PLANEA_FLIP_DONE_IVB | 1614 DE_PLANEB_FLIP_DONE_IVB; 1615 u32 render_irqs; 1616 u32 hotplug_mask; 1617 1618 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1619 dev_priv->irq_mask = ~display_mask; 1620 1621 /* should always can generate irq */ 1622 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1623 I915_WRITE(DEIMR, dev_priv->irq_mask); 1624 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK_IVB | 1625 DE_PIPEB_VBLANK_IVB); 1626 POSTING_READ(DEIER); 1627 1628 dev_priv->gt_irq_mask = ~0; 1629 1630 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1631 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 1632 1633 render_irqs = GT_USER_INTERRUPT | GT_GEN6_BSD_USER_INTERRUPT | 1634 GT_GEN6_BLT_USER_INTERRUPT; 1635 I915_WRITE(GTIER, render_irqs); 1636 POSTING_READ(GTIER); 1637 1638 hotplug_mask = (SDE_CRT_HOTPLUG_CPT | 1639 SDE_PORTB_HOTPLUG_CPT | 1640 SDE_PORTC_HOTPLUG_CPT | 1641 SDE_PORTD_HOTPLUG_CPT); 1642 dev_priv->pch_irq_mask = ~hotplug_mask; 1643 1644 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1645 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); 1646 I915_WRITE(SDEIER, hotplug_mask); 1647 POSTING_READ(SDEIER); 1648 1649 ironlake_enable_pch_hotplug(dev); 1650 1651 return 0; 1652 } 1653 1654 static void 1655 i915_driver_irq_preinstall(struct drm_device * dev) 1656 { 1657 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1658 int pipe; 1659 1660 atomic_set(&dev_priv->irq_received, 0); 1661 1662 if (I915_HAS_HOTPLUG(dev)) { 1663 I915_WRITE(PORT_HOTPLUG_EN, 0); 1664 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1665 } 1666 1667 I915_WRITE(HWSTAM, 0xeffe); 1668 for_each_pipe(pipe) 1669 I915_WRITE(PIPESTAT(pipe), 0); 1670 I915_WRITE(IMR, 0xffffffff); 1671 I915_WRITE(IER, 0x0); 1672 POSTING_READ(IER); 1673 } 1674 1675 /* 1676 * Must be called after intel_modeset_init or hotplug interrupts won't be 1677 * enabled correctly. 1678 */ 1679 static int 1680 i915_driver_irq_postinstall(struct drm_device *dev) 1681 { 1682 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1683 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1684 u32 error_mask; 1685 1686 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1687 1688 /* Unmask the interrupts that we always want on. */ 1689 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX; 1690 1691 dev_priv->pipestat[0] = 0; 1692 dev_priv->pipestat[1] = 0; 1693 1694 if (I915_HAS_HOTPLUG(dev)) { 1695 /* Enable in IER... */ 1696 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1697 /* and unmask in IMR */ 1698 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; 1699 } 1700 1701 /* 1702 * Enable some error detection, note the instruction error mask 1703 * bit is reserved, so we leave it masked. 1704 */ 1705 if (IS_G4X(dev)) { 1706 error_mask = ~(GM45_ERROR_PAGE_TABLE | 1707 GM45_ERROR_MEM_PRIV | 1708 GM45_ERROR_CP_PRIV | 1709 I915_ERROR_MEMORY_REFRESH); 1710 } else { 1711 error_mask = ~(I915_ERROR_PAGE_TABLE | 1712 I915_ERROR_MEMORY_REFRESH); 1713 } 1714 I915_WRITE(EMR, error_mask); 1715 1716 I915_WRITE(IMR, dev_priv->irq_mask); 1717 I915_WRITE(IER, enable_mask); 1718 POSTING_READ(IER); 1719 1720 if (I915_HAS_HOTPLUG(dev)) { 1721 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1722 1723 /* Note HDMI and DP share bits */ 1724 if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1725 hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1726 if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1727 hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1728 if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1729 hotplug_en |= HDMID_HOTPLUG_INT_EN; 1730 if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) 1731 hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1732 if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) 1733 hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1734 if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { 1735 hotplug_en |= CRT_HOTPLUG_INT_EN; 1736 1737 /* Programming the CRT detection parameters tends 1738 to generate a spurious hotplug event about three 1739 seconds later. So just do it once. 1740 */ 1741 if (IS_G4X(dev)) 1742 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 1743 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 1744 } 1745 1746 /* Ignore TV since it's buggy */ 1747 1748 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1749 } 1750 1751 #if 1 1752 KIB_NOTYET(); 1753 #else 1754 intel_opregion_enable_asle(dev); 1755 #endif 1756 1757 return 0; 1758 } 1759 1760 static void 1761 ironlake_irq_uninstall(struct drm_device *dev) 1762 { 1763 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1764 1765 if (dev_priv == NULL) 1766 return; 1767 1768 dev_priv->vblank_pipe = 0; 1769 1770 I915_WRITE(HWSTAM, 0xffffffff); 1771 1772 I915_WRITE(DEIMR, 0xffffffff); 1773 I915_WRITE(DEIER, 0x0); 1774 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1775 1776 I915_WRITE(GTIMR, 0xffffffff); 1777 I915_WRITE(GTIER, 0x0); 1778 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1779 1780 I915_WRITE(SDEIMR, 0xffffffff); 1781 I915_WRITE(SDEIER, 0x0); 1782 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1783 } 1784 1785 static void i915_driver_irq_uninstall(struct drm_device * dev) 1786 { 1787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1788 int pipe; 1789 1790 if (!dev_priv) 1791 return; 1792 1793 dev_priv->vblank_pipe = 0; 1794 1795 if (I915_HAS_HOTPLUG(dev)) { 1796 I915_WRITE(PORT_HOTPLUG_EN, 0); 1797 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 1798 } 1799 1800 I915_WRITE(HWSTAM, 0xffffffff); 1801 for_each_pipe(pipe) 1802 I915_WRITE(PIPESTAT(pipe), 0); 1803 I915_WRITE(IMR, 0xffffffff); 1804 I915_WRITE(IER, 0x0); 1805 1806 for_each_pipe(pipe) 1807 I915_WRITE(PIPESTAT(pipe), 1808 I915_READ(PIPESTAT(pipe)) & 0x8000ffff); 1809 I915_WRITE(IIR, I915_READ(IIR)); 1810 } 1811 1812 void 1813 intel_irq_init(struct drm_device *dev) 1814 { 1815 struct drm_i915_private *dev_priv = dev->dev_private; 1816 1817 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 1818 INIT_WORK(&dev_priv->error_work, i915_error_work_func); 1819 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); 1820 1821 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1822 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 1823 if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 1824 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 1825 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1826 } 1827 1828 if (drm_core_check_feature(dev, DRIVER_MODESET)) 1829 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; 1830 else 1831 dev->driver->get_vblank_timestamp = NULL; 1832 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 1833 1834 if (IS_IVYBRIDGE(dev)) { 1835 /* Share pre & uninstall handlers with ILK/SNB */ 1836 dev->driver->irq_handler = ivybridge_irq_handler; 1837 dev->driver->irq_preinstall = ironlake_irq_preinstall; 1838 dev->driver->irq_postinstall = ivybridge_irq_postinstall; 1839 dev->driver->irq_uninstall = ironlake_irq_uninstall; 1840 dev->driver->enable_vblank = ivybridge_enable_vblank; 1841 dev->driver->disable_vblank = ivybridge_disable_vblank; 1842 } else if (HAS_PCH_SPLIT(dev)) { 1843 dev->driver->irq_handler = ironlake_irq_handler; 1844 dev->driver->irq_preinstall = ironlake_irq_preinstall; 1845 dev->driver->irq_postinstall = ironlake_irq_postinstall; 1846 dev->driver->irq_uninstall = ironlake_irq_uninstall; 1847 dev->driver->enable_vblank = ironlake_enable_vblank; 1848 dev->driver->disable_vblank = ironlake_disable_vblank; 1849 } else { 1850 dev->driver->irq_preinstall = i915_driver_irq_preinstall; 1851 dev->driver->irq_postinstall = i915_driver_irq_postinstall; 1852 dev->driver->irq_uninstall = i915_driver_irq_uninstall; 1853 dev->driver->irq_handler = i915_driver_irq_handler; 1854 dev->driver->enable_vblank = i915_enable_vblank; 1855 dev->driver->disable_vblank = i915_disable_vblank; 1856 } 1857 } 1858 1859 static struct drm_i915_error_object * 1860 i915_error_object_create(struct drm_i915_private *dev_priv, 1861 struct drm_i915_gem_object *src) 1862 { 1863 struct drm_i915_error_object *dst; 1864 struct sf_buf *sf; 1865 void *d, *s; 1866 int page, page_count; 1867 u32 reloc_offset; 1868 1869 if (src == NULL || src->pages == NULL) 1870 return NULL; 1871 1872 page_count = src->base.size / PAGE_SIZE; 1873 1874 dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), DRM_I915_GEM, 1875 M_NOWAIT); 1876 if (dst == NULL) 1877 return (NULL); 1878 1879 reloc_offset = src->gtt_offset; 1880 for (page = 0; page < page_count; page++) { 1881 d = kmalloc(PAGE_SIZE, DRM_I915_GEM, M_NOWAIT); 1882 if (d == NULL) 1883 goto unwind; 1884 1885 if (reloc_offset < dev_priv->mm.gtt_mappable_end) { 1886 /* Simply ignore tiling or any overlapping fence. 1887 * It's part of the error state, and this hopefully 1888 * captures what the GPU read. 1889 */ 1890 s = pmap_mapdev_attr(src->base.dev->agp->base + 1891 reloc_offset, PAGE_SIZE, PAT_WRITE_COMBINING); 1892 memcpy(d, s, PAGE_SIZE); 1893 pmap_unmapdev((vm_offset_t)s, PAGE_SIZE); 1894 } else { 1895 drm_clflush_pages(&src->pages[page], 1); 1896 1897 sf = sf_buf_alloc(src->pages[page]); 1898 if (sf != NULL) { 1899 s = (void *)(uintptr_t)sf_buf_kva(sf); 1900 memcpy(d, s, PAGE_SIZE); 1901 sf_buf_free(sf); 1902 } else { 1903 bzero(d, PAGE_SIZE); 1904 strcpy(d, "XXXKIB"); 1905 } 1906 1907 drm_clflush_pages(&src->pages[page], 1); 1908 } 1909 1910 dst->pages[page] = d; 1911 1912 reloc_offset += PAGE_SIZE; 1913 } 1914 dst->page_count = page_count; 1915 dst->gtt_offset = src->gtt_offset; 1916 1917 return (dst); 1918 1919 unwind: 1920 while (page--) 1921 drm_free(dst->pages[page], DRM_I915_GEM); 1922 drm_free(dst, DRM_I915_GEM); 1923 return (NULL); 1924 } 1925 1926 static void 1927 i915_error_object_free(struct drm_i915_error_object *obj) 1928 { 1929 int page; 1930 1931 if (obj == NULL) 1932 return; 1933 1934 for (page = 0; page < obj->page_count; page++) 1935 drm_free(obj->pages[page], DRM_I915_GEM); 1936 1937 drm_free(obj, DRM_I915_GEM); 1938 } 1939 1940 static void 1941 i915_error_state_free(struct drm_device *dev, 1942 struct drm_i915_error_state *error) 1943 { 1944 int i; 1945 1946 for (i = 0; i < DRM_ARRAY_SIZE(error->ring); i++) { 1947 i915_error_object_free(error->ring[i].batchbuffer); 1948 i915_error_object_free(error->ring[i].ringbuffer); 1949 drm_free(error->ring[i].requests, DRM_I915_GEM); 1950 } 1951 1952 drm_free(error->active_bo, DRM_I915_GEM); 1953 drm_free(error->overlay, DRM_I915_GEM); 1954 drm_free(error, DRM_I915_GEM); 1955 } 1956 1957 static u32 1958 capture_bo_list(struct drm_i915_error_buffer *err, int count, 1959 struct list_head *head) 1960 { 1961 struct drm_i915_gem_object *obj; 1962 int i = 0; 1963 1964 list_for_each_entry(obj, head, mm_list) { 1965 err->size = obj->base.size; 1966 err->name = obj->base.name; 1967 err->seqno = obj->last_rendering_seqno; 1968 err->gtt_offset = obj->gtt_offset; 1969 err->read_domains = obj->base.read_domains; 1970 err->write_domain = obj->base.write_domain; 1971 err->fence_reg = obj->fence_reg; 1972 err->pinned = 0; 1973 if (obj->pin_count > 0) 1974 err->pinned = 1; 1975 if (obj->user_pin_count > 0) 1976 err->pinned = -1; 1977 err->tiling = obj->tiling_mode; 1978 err->dirty = obj->dirty; 1979 err->purgeable = obj->madv != I915_MADV_WILLNEED; 1980 err->ring = obj->ring ? obj->ring->id : -1; 1981 err->cache_level = obj->cache_level; 1982 1983 if (++i == count) 1984 break; 1985 1986 err++; 1987 } 1988 1989 return (i); 1990 } 1991 1992 static void 1993 i915_gem_record_fences(struct drm_device *dev, 1994 struct drm_i915_error_state *error) 1995 { 1996 struct drm_i915_private *dev_priv = dev->dev_private; 1997 int i; 1998 1999 /* Fences */ 2000 switch (INTEL_INFO(dev)->gen) { 2001 case 7: 2002 case 6: 2003 for (i = 0; i < 16; i++) 2004 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 2005 break; 2006 case 5: 2007 case 4: 2008 for (i = 0; i < 16; i++) 2009 error->fence[i] = I915_READ64(FENCE_REG_965_0 + 2010 (i * 8)); 2011 break; 2012 case 3: 2013 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 2014 for (i = 0; i < 8; i++) 2015 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + 2016 (i * 4)); 2017 case 2: 2018 for (i = 0; i < 8; i++) 2019 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 2020 break; 2021 2022 } 2023 } 2024 2025 static struct drm_i915_error_object * 2026 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 2027 struct intel_ring_buffer *ring) 2028 { 2029 struct drm_i915_gem_object *obj; 2030 u32 seqno; 2031 2032 if (!ring->get_seqno) 2033 return (NULL); 2034 2035 seqno = ring->get_seqno(ring); 2036 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 2037 if (obj->ring != ring) 2038 continue; 2039 2040 if (i915_seqno_passed(seqno, obj->last_rendering_seqno)) 2041 continue; 2042 2043 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) 2044 continue; 2045 2046 /* We need to copy these to an anonymous buffer as the simplest 2047 * method to avoid being overwritten by userspace. 2048 */ 2049 return (i915_error_object_create(dev_priv, obj)); 2050 } 2051 2052 return NULL; 2053 } 2054 2055 static void 2056 i915_record_ring_state(struct drm_device *dev, 2057 struct drm_i915_error_state *error, 2058 struct intel_ring_buffer *ring) 2059 { 2060 struct drm_i915_private *dev_priv = dev->dev_private; 2061 2062 if (INTEL_INFO(dev)->gen >= 6) { 2063 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); 2064 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); 2065 error->semaphore_mboxes[ring->id][0] 2066 = I915_READ(RING_SYNC_0(ring->mmio_base)); 2067 error->semaphore_mboxes[ring->id][1] 2068 = I915_READ(RING_SYNC_1(ring->mmio_base)); 2069 } 2070 2071 if (INTEL_INFO(dev)->gen >= 4) { 2072 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); 2073 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); 2074 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); 2075 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); 2076 if (ring->id == RCS) { 2077 error->instdone1 = I915_READ(INSTDONE1); 2078 error->bbaddr = I915_READ64(BB_ADDR); 2079 } 2080 } else { 2081 error->ipeir[ring->id] = I915_READ(IPEIR); 2082 error->ipehr[ring->id] = I915_READ(IPEHR); 2083 error->instdone[ring->id] = I915_READ(INSTDONE); 2084 } 2085 2086 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); 2087 error->seqno[ring->id] = ring->get_seqno(ring); 2088 error->acthd[ring->id] = intel_ring_get_active_head(ring); 2089 error->head[ring->id] = I915_READ_HEAD(ring); 2090 error->tail[ring->id] = I915_READ_TAIL(ring); 2091 2092 error->cpu_ring_head[ring->id] = ring->head; 2093 error->cpu_ring_tail[ring->id] = ring->tail; 2094 } 2095 2096 static void 2097 i915_gem_record_rings(struct drm_device *dev, 2098 struct drm_i915_error_state *error) 2099 { 2100 struct drm_i915_private *dev_priv = dev->dev_private; 2101 struct drm_i915_gem_request *request; 2102 int i, count; 2103 2104 for (i = 0; i < I915_NUM_RINGS; i++) { 2105 struct intel_ring_buffer *ring = &dev_priv->ring[i]; 2106 2107 if (ring->obj == NULL) 2108 continue; 2109 2110 i915_record_ring_state(dev, error, ring); 2111 2112 error->ring[i].batchbuffer = 2113 i915_error_first_batchbuffer(dev_priv, ring); 2114 2115 error->ring[i].ringbuffer = 2116 i915_error_object_create(dev_priv, ring->obj); 2117 2118 count = 0; 2119 list_for_each_entry(request, &ring->request_list, list) 2120 count++; 2121 2122 error->ring[i].num_requests = count; 2123 error->ring[i].requests = kmalloc(count * 2124 sizeof(struct drm_i915_error_request), DRM_I915_GEM, 2125 M_WAITOK); 2126 if (error->ring[i].requests == NULL) { 2127 error->ring[i].num_requests = 0; 2128 continue; 2129 } 2130 2131 count = 0; 2132 list_for_each_entry(request, &ring->request_list, list) { 2133 struct drm_i915_error_request *erq; 2134 2135 erq = &error->ring[i].requests[count++]; 2136 erq->seqno = request->seqno; 2137 erq->jiffies = request->emitted_jiffies; 2138 erq->tail = request->tail; 2139 } 2140 } 2141 } 2142 2143 static void 2144 i915_capture_error_state(struct drm_device *dev) 2145 { 2146 struct drm_i915_private *dev_priv = dev->dev_private; 2147 struct drm_i915_gem_object *obj; 2148 struct drm_i915_error_state *error; 2149 int i, pipe; 2150 2151 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 2152 error = dev_priv->first_error; 2153 lockmgr(&dev_priv->error_lock, LK_RELEASE); 2154 if (error != NULL) 2155 return; 2156 2157 /* Account for pipe specific data like PIPE*STAT */ 2158 error = kmalloc(sizeof(*error), DRM_I915_GEM, M_NOWAIT | M_ZERO); 2159 if (error == NULL) { 2160 DRM_DEBUG("out of memory, not capturing error state\n"); 2161 return; 2162 } 2163 2164 DRM_INFO("capturing error event; look for more information in " 2165 "sysctl hw.dri.%d.info.i915_error_state\n", dev->sysctl_node_idx); 2166 2167 error->eir = I915_READ(EIR); 2168 error->pgtbl_er = I915_READ(PGTBL_ER); 2169 for_each_pipe(pipe) 2170 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); 2171 2172 if (INTEL_INFO(dev)->gen >= 6) { 2173 error->error = I915_READ(ERROR_GEN6); 2174 error->done_reg = I915_READ(DONE_REG); 2175 } 2176 2177 i915_gem_record_fences(dev, error); 2178 i915_gem_record_rings(dev, error); 2179 2180 /* Record buffers on the active and pinned lists. */ 2181 error->active_bo = NULL; 2182 error->pinned_bo = NULL; 2183 2184 i = 0; 2185 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 2186 i++; 2187 error->active_bo_count = i; 2188 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list) 2189 i++; 2190 error->pinned_bo_count = i - error->active_bo_count; 2191 2192 error->active_bo = NULL; 2193 error->pinned_bo = NULL; 2194 if (i) { 2195 error->active_bo = kmalloc(sizeof(*error->active_bo) * i, 2196 DRM_I915_GEM, M_NOWAIT); 2197 if (error->active_bo) 2198 error->pinned_bo = error->active_bo + 2199 error->active_bo_count; 2200 } 2201 2202 if (error->active_bo) 2203 error->active_bo_count = capture_bo_list(error->active_bo, 2204 error->active_bo_count, &dev_priv->mm.active_list); 2205 2206 if (error->pinned_bo) 2207 error->pinned_bo_count = capture_bo_list(error->pinned_bo, 2208 error->pinned_bo_count, &dev_priv->mm.pinned_list); 2209 2210 microtime(&error->time); 2211 2212 error->overlay = intel_overlay_capture_error_state(dev); 2213 error->display = intel_display_capture_error_state(dev); 2214 2215 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 2216 if (dev_priv->first_error == NULL) { 2217 dev_priv->first_error = error; 2218 error = NULL; 2219 } 2220 lockmgr(&dev_priv->error_lock, LK_RELEASE); 2221 2222 if (error != NULL) 2223 i915_error_state_free(dev, error); 2224 } 2225 2226 void 2227 i915_destroy_error_state(struct drm_device *dev) 2228 { 2229 struct drm_i915_private *dev_priv = dev->dev_private; 2230 struct drm_i915_error_state *error; 2231 2232 lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE); 2233 error = dev_priv->first_error; 2234 dev_priv->first_error = NULL; 2235 lockmgr(&dev_priv->error_lock, LK_RELEASE); 2236 2237 if (error != NULL) 2238 i915_error_state_free(dev, error); 2239 } 2240