xref: /dragonfly/sys/dev/drm/i915/i915_irq.c (revision d37f73b6)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "intel_drv.h"
33 
34 /* For display hotplug interrupt */
35 static void
36 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
37 {
38 	if ((dev_priv->irq_mask & mask) != 0) {
39 		dev_priv->irq_mask &= ~mask;
40 		I915_WRITE(DEIMR, dev_priv->irq_mask);
41 		POSTING_READ(DEIMR);
42 	}
43 }
44 
45 static inline void
46 ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
47 {
48 	if ((dev_priv->irq_mask & mask) != mask) {
49 		dev_priv->irq_mask |= mask;
50 		I915_WRITE(DEIMR, dev_priv->irq_mask);
51 		POSTING_READ(DEIMR);
52 	}
53 }
54 
55 void
56 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
57 {
58 	if ((dev_priv->pipestat[pipe] & mask) != mask) {
59 		u32 reg = PIPESTAT(pipe);
60 
61 		dev_priv->pipestat[pipe] |= mask;
62 		/* Enable the interrupt, clear any pending status */
63 		I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
64 		POSTING_READ(reg);
65 	}
66 }
67 
68 void
69 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
70 {
71 	if ((dev_priv->pipestat[pipe] & mask) != 0) {
72 		u32 reg = PIPESTAT(pipe);
73 
74 		dev_priv->pipestat[pipe] &= ~mask;
75 		I915_WRITE(reg, dev_priv->pipestat[pipe]);
76 		POSTING_READ(reg);
77 	}
78 }
79 
80 /**
81  * intel_enable_asle - enable ASLE interrupt for OpRegion
82  */
83 void intel_enable_asle(struct drm_device *dev)
84 {
85 	drm_i915_private_t *dev_priv = dev->dev_private;
86 
87 	/* FIXME: opregion/asle for VLV */
88 	if (IS_VALLEYVIEW(dev))
89 		return;
90 
91 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
92 
93 	if (HAS_PCH_SPLIT(dev))
94 		ironlake_enable_display_irq(dev_priv, DE_GSE);
95 	else {
96 		i915_enable_pipestat(dev_priv, 1,
97 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
98 		if (INTEL_INFO(dev)->gen >= 4)
99 			i915_enable_pipestat(dev_priv, 0,
100 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
101 	}
102 
103 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
104 }
105 
106 /**
107  * i915_pipe_enabled - check if a pipe is enabled
108  * @dev: DRM device
109  * @pipe: pipe to check
110  *
111  * Reading certain registers when the pipe is disabled can hang the chip.
112  * Use this routine to make sure the PLL is running and the pipe is active
113  * before reading such registers if unsure.
114  */
115 static int
116 i915_pipe_enabled(struct drm_device *dev, int pipe)
117 {
118 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
119 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
120 								      pipe);
121 
122 	return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
123 }
124 
125 /* Called from drm generic code, passed a 'crtc', which
126  * we use as a pipe index
127  */
128 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
129 {
130 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
131 	unsigned long high_frame;
132 	unsigned long low_frame;
133 	u32 high1, high2, low;
134 
135 	if (!i915_pipe_enabled(dev, pipe)) {
136 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
137 				"pipe %c\n", pipe_name(pipe));
138 		return 0;
139 	}
140 
141 	high_frame = PIPEFRAME(pipe);
142 	low_frame = PIPEFRAMEPIXEL(pipe);
143 
144 	/*
145 	 * High & low register fields aren't synchronized, so make sure
146 	 * we get a low value that's stable across two reads of the high
147 	 * register.
148 	 */
149 	do {
150 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
151 		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
152 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
153 	} while (high1 != high2);
154 
155 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
156 	low >>= PIPE_FRAME_LOW_SHIFT;
157 	return (high1 << 8) | low;
158 }
159 
160 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
161 {
162 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
163 	int reg = PIPE_FRMCOUNT_GM45(pipe);
164 
165 	if (!i915_pipe_enabled(dev, pipe)) {
166 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
167 				 "pipe %c\n", pipe_name(pipe));
168 		return 0;
169 	}
170 
171 	return I915_READ(reg);
172 }
173 
174 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
175 			     int *vpos, int *hpos)
176 {
177 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
178 	u32 vbl = 0, position = 0;
179 	int vbl_start, vbl_end, htotal, vtotal;
180 	bool in_vbl = true;
181 	int ret = 0;
182 	enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
183 								      pipe);
184 
185 	if (!i915_pipe_enabled(dev, pipe)) {
186 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
187 				 "pipe %c\n", pipe_name(pipe));
188 		return 0;
189 	}
190 
191 	/* Get vtotal. */
192 	vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
193 
194 	if (INTEL_INFO(dev)->gen >= 4) {
195 		/* No obvious pixelcount register. Only query vertical
196 		 * scanout position from Display scan line register.
197 		 */
198 		position = I915_READ(PIPEDSL(pipe));
199 
200 		/* Decode into vertical scanout position. Don't have
201 		 * horizontal scanout position.
202 		 */
203 		*vpos = position & 0x1fff;
204 		*hpos = 0;
205 	} else {
206 		/* Have access to pixelcount since start of frame.
207 		 * We can split this into vertical and horizontal
208 		 * scanout position.
209 		 */
210 		position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
211 
212 		htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
213 		*vpos = position / htotal;
214 		*hpos = position - (*vpos * htotal);
215 	}
216 
217 	/* Query vblank area. */
218 	vbl = I915_READ(VBLANK(cpu_transcoder));
219 
220 	/* Test position against vblank region. */
221 	vbl_start = vbl & 0x1fff;
222 	vbl_end = (vbl >> 16) & 0x1fff;
223 
224 	if ((*vpos < vbl_start) || (*vpos > vbl_end))
225 		in_vbl = false;
226 
227 	/* Inside "upper part" of vblank area? Apply corrective offset: */
228 	if (in_vbl && (*vpos >= vbl_start))
229 		*vpos = *vpos - vtotal;
230 
231 	/* Readouts valid? */
232 	if (vbl > 0)
233 		ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
234 
235 	/* In vblank? */
236 	if (in_vbl)
237 		ret |= DRM_SCANOUTPOS_INVBL;
238 
239 	return ret;
240 }
241 
242 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
243 			      int *max_error,
244 			      struct timeval *vblank_time,
245 			      unsigned flags)
246 {
247 	struct drm_i915_private *dev_priv = dev->dev_private;
248 	struct drm_crtc *crtc;
249 
250 	if (pipe < 0 || pipe >= dev_priv->num_pipe) {
251 		DRM_ERROR("Invalid crtc %d\n", pipe);
252 		return -EINVAL;
253 	}
254 
255 	/* Get drm_crtc to timestamp: */
256 	crtc = intel_get_crtc_for_pipe(dev, pipe);
257 	if (crtc == NULL) {
258 		DRM_ERROR("Invalid crtc %d\n", pipe);
259 		return -EINVAL;
260 	}
261 
262 	if (!crtc->enabled) {
263 		DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
264 		return -EBUSY;
265 	}
266 
267 	/* Helper routine in DRM core does all the work: */
268 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
269 						     vblank_time, flags,
270 						     crtc);
271 }
272 
273 /*
274  * Handle hotplug events outside the interrupt handler proper.
275  */
276 static void i915_hotplug_work_func(struct work_struct *work)
277 {
278 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
279 						    hotplug_work);
280 	struct drm_device *dev = dev_priv->dev;
281 	struct drm_mode_config *mode_config = &dev->mode_config;
282 	struct intel_encoder *encoder;
283 
284 	lockmgr(&mode_config->mutex, LK_EXCLUSIVE);
285 	DRM_DEBUG_KMS("running encoder hotplug functions\n");
286 
287 	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
288 		if (encoder->hot_plug)
289 			encoder->hot_plug(encoder);
290 
291 	lockmgr(&mode_config->mutex, LK_RELEASE);
292 
293 	/* Just fire off a uevent and let userspace tell us what to do */
294 	drm_helper_hpd_irq_event(dev);
295 }
296 
297 /* defined intel_pm.c */
298 extern struct lock mchdev_lock;
299 
300 static void ironlake_handle_rps_change(struct drm_device *dev)
301 {
302 	drm_i915_private_t *dev_priv = dev->dev_private;
303 	u32 busy_up, busy_down, max_avg, min_avg;
304 	u8 new_delay;
305 
306 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
307 
308 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
309 
310 	new_delay = dev_priv->ips.cur_delay;
311 
312 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
313 	busy_up = I915_READ(RCPREVBSYTUPAVG);
314 	busy_down = I915_READ(RCPREVBSYTDNAVG);
315 	max_avg = I915_READ(RCBMAXAVG);
316 	min_avg = I915_READ(RCBMINAVG);
317 
318 	/* Handle RCS change request from hw */
319 	if (busy_up > max_avg) {
320 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
321 			new_delay = dev_priv->ips.cur_delay - 1;
322 		if (new_delay < dev_priv->ips.max_delay)
323 			new_delay = dev_priv->ips.max_delay;
324 	} else if (busy_down < min_avg) {
325 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
326 			new_delay = dev_priv->ips.cur_delay + 1;
327 		if (new_delay > dev_priv->ips.min_delay)
328 			new_delay = dev_priv->ips.min_delay;
329 	}
330 
331 	if (ironlake_set_drps(dev, new_delay))
332 		dev_priv->ips.cur_delay = new_delay;
333 
334 	lockmgr(&mchdev_lock, LK_RELEASE);
335 
336 	return;
337 }
338 
339 static void notify_ring(struct drm_device *dev,
340 			struct intel_ring_buffer *ring)
341 {
342 	struct drm_i915_private *dev_priv = dev->dev_private;
343 
344 	if (ring->obj == NULL)
345 		return;
346 
347 	wake_up_all(&ring->irq_queue);
348 	if (i915_enable_hangcheck) {
349 		dev_priv->hangcheck_count = 0;
350 		mod_timer(&dev_priv->hangcheck_timer,
351 			  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
352 	}
353 }
354 
355 static void gen6_pm_rps_work(struct work_struct *work)
356 {
357 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
358 						    rps.work);
359 	u32 pm_iir, pm_imr;
360 	u8 new_delay;
361 
362 	spin_lock(&dev_priv->rps.lock);
363 	pm_iir = dev_priv->rps.pm_iir;
364 	dev_priv->rps.pm_iir = 0;
365 	pm_imr = I915_READ(GEN6_PMIMR);
366 	I915_WRITE(GEN6_PMIMR, 0);
367 	spin_unlock(&dev_priv->rps.lock);
368 
369 	if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
370 		return;
371 
372 	lockmgr(&dev_priv->rps.hw_lock, LK_EXCLUSIVE);
373 
374 	if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
375 		new_delay = dev_priv->rps.cur_delay + 1;
376 	else
377 		new_delay = dev_priv->rps.cur_delay - 1;
378 
379 	/* sysfs frequency interfaces may have snuck in while servicing the
380 	 * interrupt
381 	 */
382 	if (!(new_delay > dev_priv->rps.max_delay ||
383 	      new_delay < dev_priv->rps.min_delay)) {
384 		gen6_set_rps(dev_priv->dev, new_delay);
385 	}
386 
387 	lockmgr(&dev_priv->rps.hw_lock, LK_RELEASE);
388 }
389 
390 
391 /**
392  * ivybridge_parity_work - Workqueue called when a parity error interrupt
393  * occurred.
394  * @work: workqueue struct
395  *
396  * Doesn't actually do anything except notify userspace. As a consequence of
397  * this event, userspace should try to remap the bad rows since statistically
398  * it is likely the same row is more likely to go bad again.
399  */
400 static void ivybridge_parity_work(struct work_struct *work)
401 {
402 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
403 						    l3_parity.error_work);
404 	u32 error_status, row, bank, subbank;
405 	uint32_t misccpctl;
406 
407 	/* We must turn off DOP level clock gating to access the L3 registers.
408 	 * In order to prevent a get/put style interface, acquire struct mutex
409 	 * any time we access those registers.
410 	 */
411 	DRM_LOCK(dev_priv->dev);
412 
413 	misccpctl = I915_READ(GEN7_MISCCPCTL);
414 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
415 	POSTING_READ(GEN7_MISCCPCTL);
416 
417 	error_status = I915_READ(GEN7_L3CDERRST1);
418 	row = GEN7_PARITY_ERROR_ROW(error_status);
419 	bank = GEN7_PARITY_ERROR_BANK(error_status);
420 	subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
421 
422 	I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
423 				    GEN7_L3CDERRST1_ENABLE);
424 	POSTING_READ(GEN7_L3CDERRST1);
425 
426 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
427 
428 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
429 	dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
430 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
431 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
432 
433 	DRM_UNLOCK(dev_priv->dev);
434 
435 	DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
436 		  row, bank, subbank);
437 }
438 
439 static void ivybridge_handle_parity_error(struct drm_device *dev)
440 {
441 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
442 
443 	if (!HAS_L3_GPU_CACHE(dev))
444 		return;
445 
446 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
447 	dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
448 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
449 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
450 
451 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
452 }
453 
454 static void snb_gt_irq_handler(struct drm_device *dev,
455 			       struct drm_i915_private *dev_priv,
456 			       u32 gt_iir)
457 {
458 
459 	if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
460 		      GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
461 		notify_ring(dev, &dev_priv->ring[RCS]);
462 	if (gt_iir & GEN6_BSD_USER_INTERRUPT)
463 		notify_ring(dev, &dev_priv->ring[VCS]);
464 	if (gt_iir & GEN6_BLITTER_USER_INTERRUPT)
465 		notify_ring(dev, &dev_priv->ring[BCS]);
466 
467 	if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT |
468 		      GT_GEN6_BSD_CS_ERROR_INTERRUPT |
469 		      GT_RENDER_CS_ERROR_INTERRUPT)) {
470 		DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
471 		i915_handle_error(dev, false);
472 	}
473 
474 	if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT)
475 		ivybridge_handle_parity_error(dev);
476 }
477 
478 static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
479 				u32 pm_iir)
480 {
481 
482 	/*
483 	 * IIR bits should never already be set because IMR should
484 	 * prevent an interrupt from being shown in IIR. The warning
485 	 * displays a case where we've unsafely cleared
486 	 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
487 	 * type is not a problem, it displays a problem in the logic.
488 	 *
489 	 * The mask bit in IMR is cleared by dev_priv->rps.work.
490 	 */
491 
492 	spin_lock(&dev_priv->rps.lock);
493 	dev_priv->rps.pm_iir |= pm_iir;
494 	I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
495 	POSTING_READ(GEN6_PMIMR);
496 	spin_unlock(&dev_priv->rps.lock);
497 
498 	queue_work(dev_priv->wq, &dev_priv->rps.work);
499 }
500 
501 static irqreturn_t valleyview_irq_handler(void *arg)
502 {
503 	struct drm_device *dev = (struct drm_device *) arg;
504 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
505 	u32 iir, gt_iir, pm_iir;
506 	int pipe;
507 	u32 pipe_stats[I915_MAX_PIPES];
508 	bool blc_event;
509 
510 	atomic_inc(&dev_priv->irq_received);
511 
512 	while (true) {
513 		iir = I915_READ(VLV_IIR);
514 		gt_iir = I915_READ(GTIIR);
515 		pm_iir = I915_READ(GEN6_PMIIR);
516 
517 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
518 			goto out;
519 
520 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
521 
522 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
523 		for_each_pipe(pipe) {
524 			int reg = PIPESTAT(pipe);
525 			pipe_stats[pipe] = I915_READ(reg);
526 
527 			/*
528 			 * Clear the PIPE*STAT regs before the IIR
529 			 */
530 			if (pipe_stats[pipe] & 0x8000ffff) {
531 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
532 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
533 							 pipe_name(pipe));
534 				I915_WRITE(reg, pipe_stats[pipe]);
535 			}
536 		}
537 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
538 
539 		for_each_pipe(pipe) {
540 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
541 				drm_handle_vblank(dev, pipe);
542 
543 			if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
544 				intel_prepare_page_flip(dev, pipe);
545 				intel_finish_page_flip(dev, pipe);
546 			}
547 		}
548 
549 		/* Consume port.  Then clear IIR or we'll miss events */
550 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
551 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
552 
553 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
554 					 hotplug_status);
555 			if (hotplug_status & dev_priv->hotplug_supported_mask)
556 				queue_work(dev_priv->wq,
557 					   &dev_priv->hotplug_work);
558 
559 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
560 			I915_READ(PORT_HOTPLUG_STAT);
561 		}
562 
563 		if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
564 			blc_event = true;
565 
566 		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
567 			gen6_queue_rps_work(dev_priv, pm_iir);
568 
569 		I915_WRITE(GTIIR, gt_iir);
570 		I915_WRITE(GEN6_PMIIR, pm_iir);
571 		I915_WRITE(VLV_IIR, iir);
572 	}
573 
574 out:
575 	return;
576 }
577 
578 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
579 {
580 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
581 	int pipe;
582 
583 	if (pch_iir & SDE_HOTPLUG_MASK)
584 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
585 
586 	if (pch_iir & SDE_AUDIO_POWER_MASK)
587 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
588 				 (pch_iir & SDE_AUDIO_POWER_MASK) >>
589 				 SDE_AUDIO_POWER_SHIFT);
590 
591 	if (pch_iir & SDE_GMBUS)
592 		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
593 
594 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
595 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
596 
597 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
598 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
599 
600 	if (pch_iir & SDE_POISON)
601 		DRM_ERROR("PCH poison interrupt\n");
602 
603 	if (pch_iir & SDE_FDI_MASK)
604 		for_each_pipe(pipe)
605 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
606 					 pipe_name(pipe),
607 					 I915_READ(FDI_RX_IIR(pipe)));
608 
609 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
610 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
611 
612 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
613 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
614 
615 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
616 		DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n");
617 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
618 		DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
619 }
620 
621 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
622 {
623 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
624 	int pipe;
625 
626 	if (pch_iir & SDE_HOTPLUG_MASK_CPT)
627 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
628 
629 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
630 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
631 				 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
632 				 SDE_AUDIO_POWER_SHIFT_CPT);
633 
634 	if (pch_iir & SDE_AUX_MASK_CPT)
635 		DRM_DEBUG_DRIVER("AUX channel interrupt\n");
636 
637 	if (pch_iir & SDE_GMBUS_CPT)
638 		DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
639 
640 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
641 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
642 
643 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
644 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
645 
646 	if (pch_iir & SDE_FDI_MASK_CPT)
647 		for_each_pipe(pipe)
648 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
649 					 pipe_name(pipe),
650 					 I915_READ(FDI_RX_IIR(pipe)));
651 }
652 
653 static irqreturn_t ivybridge_irq_handler(void *arg)
654 {
655 	struct drm_device *dev = (struct drm_device *) arg;
656 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
657 	u32 de_iir, gt_iir, de_ier, pm_iir;
658 	int i;
659 
660 	atomic_inc(&dev_priv->irq_received);
661 
662 	/* disable master interrupt before clearing iir  */
663 	de_ier = I915_READ(DEIER);
664 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
665 
666 	gt_iir = I915_READ(GTIIR);
667 	if (gt_iir) {
668 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
669 		I915_WRITE(GTIIR, gt_iir);
670 	}
671 
672 	de_iir = I915_READ(DEIIR);
673 	if (de_iir) {
674 		if (de_iir & DE_GSE_IVB)
675 			intel_opregion_gse_intr(dev);
676 
677 		for (i = 0; i < 3; i++) {
678 			if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
679 				drm_handle_vblank(dev, i);
680 			if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
681 				intel_prepare_page_flip(dev, i);
682 				intel_finish_page_flip_plane(dev, i);
683 			}
684 		}
685 
686 		/* check event from PCH */
687 		if (de_iir & DE_PCH_EVENT_IVB) {
688 			u32 pch_iir = I915_READ(SDEIIR);
689 
690 			cpt_irq_handler(dev, pch_iir);
691 
692 			/* clear PCH hotplug event before clear CPU irq */
693 			I915_WRITE(SDEIIR, pch_iir);
694 		}
695 
696 		I915_WRITE(DEIIR, de_iir);
697 	}
698 
699 	pm_iir = I915_READ(GEN6_PMIIR);
700 	if (pm_iir) {
701 		if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
702 			gen6_queue_rps_work(dev_priv, pm_iir);
703 		I915_WRITE(GEN6_PMIIR, pm_iir);
704 	}
705 
706 	I915_WRITE(DEIER, de_ier);
707 	POSTING_READ(DEIER);
708 }
709 
710 static void ilk_gt_irq_handler(struct drm_device *dev,
711 			       struct drm_i915_private *dev_priv,
712 			       u32 gt_iir)
713 {
714 	if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
715 		notify_ring(dev, &dev_priv->ring[RCS]);
716 	if (gt_iir & GT_BSD_USER_INTERRUPT)
717 		notify_ring(dev, &dev_priv->ring[VCS]);
718 }
719 
720 static irqreturn_t ironlake_irq_handler(void *arg)
721 {
722 	struct drm_device *dev = (struct drm_device *) arg;
723 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
724 	u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
725 
726 	atomic_inc(&dev_priv->irq_received);
727 
728 	/* disable master interrupt before clearing iir  */
729 	de_ier = I915_READ(DEIER);
730 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
731 	POSTING_READ(DEIER);
732 
733 	de_iir = I915_READ(DEIIR);
734 	gt_iir = I915_READ(GTIIR);
735 	pch_iir = I915_READ(SDEIIR);
736 	pm_iir = I915_READ(GEN6_PMIIR);
737 
738 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
739 	    (!IS_GEN6(dev) || pm_iir == 0))
740 		goto done;
741 
742 	if (IS_GEN5(dev))
743 		ilk_gt_irq_handler(dev, dev_priv, gt_iir);
744 	else
745 		snb_gt_irq_handler(dev, dev_priv, gt_iir);
746 
747 	if (de_iir & DE_GSE)
748 		intel_opregion_gse_intr(dev);
749 
750 	if (de_iir & DE_PIPEA_VBLANK)
751 		drm_handle_vblank(dev, 0);
752 
753 	if (de_iir & DE_PIPEB_VBLANK)
754 		drm_handle_vblank(dev, 1);
755 
756 	if (de_iir & DE_PLANEA_FLIP_DONE) {
757 		intel_prepare_page_flip(dev, 0);
758 		intel_finish_page_flip_plane(dev, 0);
759 	}
760 
761 	if (de_iir & DE_PLANEB_FLIP_DONE) {
762 		intel_prepare_page_flip(dev, 1);
763 		intel_finish_page_flip_plane(dev, 1);
764 	}
765 
766 	/* check event from PCH */
767 	if (de_iir & DE_PCH_EVENT) {
768 		if (HAS_PCH_CPT(dev))
769 			cpt_irq_handler(dev, pch_iir);
770 		else
771 			ibx_irq_handler(dev, pch_iir);
772 	}
773 
774 	if (IS_GEN5(dev) &&  de_iir & DE_PCU_EVENT)
775 		ironlake_handle_rps_change(dev);
776 
777 	if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
778 		gen6_queue_rps_work(dev_priv, pm_iir);
779 
780 	/* should clear PCH hotplug event before clear CPU irq */
781 	I915_WRITE(SDEIIR, pch_iir);
782 	I915_WRITE(GTIIR, gt_iir);
783 	I915_WRITE(DEIIR, de_iir);
784 	I915_WRITE(GEN6_PMIIR, pm_iir);
785 
786 done:
787 	I915_WRITE(DEIER, de_ier);
788 	POSTING_READ(DEIER);
789 }
790 
791 /**
792  * i915_error_work_func - do process context error handling work
793  * @work: work struct
794  *
795  * Fire an error uevent so userspace can see that a hang or error
796  * was detected.
797  */
798 static void i915_error_work_func(struct work_struct *work)
799 {
800 	drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
801 						    error_work);
802 	struct drm_device *dev = dev_priv->dev;
803 
804 	/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); */
805 
806 	if (atomic_read(&dev_priv->mm.wedged)) {
807 		DRM_DEBUG_DRIVER("resetting chip\n");
808 		/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); */
809 		if (!i915_reset(dev)) {
810 			atomic_set(&dev_priv->mm.wedged, 0);
811 			/* kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); */
812 		}
813 		complete_all(&dev_priv->error_completion);
814 	}
815 }
816 
817 /* NB: please notice the memset */
818 static void i915_get_extra_instdone(struct drm_device *dev,
819 				    uint32_t *instdone)
820 {
821 	struct drm_i915_private *dev_priv = dev->dev_private;
822 	memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
823 
824 	switch(INTEL_INFO(dev)->gen) {
825 	case 2:
826 	case 3:
827 		instdone[0] = I915_READ(INSTDONE);
828 		break;
829 	case 4:
830 	case 5:
831 	case 6:
832 		instdone[0] = I915_READ(INSTDONE_I965);
833 		instdone[1] = I915_READ(INSTDONE1);
834 		break;
835 	default:
836 #if 0
837 		WARN_ONCE(1, "Unsupported platform\n");
838 #endif
839 	case 7:
840 		instdone[0] = I915_READ(GEN7_INSTDONE_1);
841 		instdone[1] = I915_READ(GEN7_SC_INSTDONE);
842 		instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
843 		instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
844 		break;
845 	}
846 }
847 
848 #if 0 /* CONFIG_DEBUG_FS */
849 static struct drm_i915_error_object *
850 i915_error_object_create(struct drm_i915_private *dev_priv,
851 			 struct drm_i915_gem_object *src)
852 {
853 	struct drm_i915_error_object *dst;
854 	int i, count;
855 	u32 reloc_offset;
856 
857 	if (src == NULL || src->pages == NULL)
858 		return NULL;
859 
860 	count = src->base.size / PAGE_SIZE;
861 
862 	dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC);
863 	if (dst == NULL)
864 		return NULL;
865 
866 	reloc_offset = src->gtt_offset;
867 	for (i = 0; i < count; i++) {
868 		unsigned long flags;
869 		void *d;
870 
871 		d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
872 		if (d == NULL)
873 			goto unwind;
874 
875 		local_irq_save(flags);
876 		if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
877 		    src->has_global_gtt_mapping) {
878 			void __iomem *s;
879 
880 			/* Simply ignore tiling or any overlapping fence.
881 			 * It's part of the error state, and this hopefully
882 			 * captures what the GPU read.
883 			 */
884 
885 			s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
886 						     reloc_offset);
887 			memcpy_fromio(d, s, PAGE_SIZE);
888 			io_mapping_unmap_atomic(s);
889 		} else {
890 			struct page *page;
891 			void *s;
892 
893 			page = i915_gem_object_get_page(src, i);
894 
895 			drm_clflush_pages(&page, 1);
896 
897 			s = kmap_atomic(page);
898 			memcpy(d, s, PAGE_SIZE);
899 			kunmap_atomic(s);
900 
901 			drm_clflush_pages(&page, 1);
902 		}
903 		local_irq_restore(flags);
904 
905 		dst->pages[i] = d;
906 
907 		reloc_offset += PAGE_SIZE;
908 	}
909 	dst->page_count = count;
910 	dst->gtt_offset = src->gtt_offset;
911 
912 	return dst;
913 
914 unwind:
915 	while (i--)
916 		kfree(dst->pages[i]);
917 	kfree(dst);
918 	return NULL;
919 }
920 
921 static void
922 i915_error_object_free(struct drm_i915_error_object *obj)
923 {
924 	int page;
925 
926 	if (obj == NULL)
927 		return;
928 
929 	for (page = 0; page < obj->page_count; page++)
930 		kfree(obj->pages[page]);
931 
932 	kfree(obj);
933 }
934 
935 void
936 i915_error_state_free(struct drm_device *dev,
937 		      struct drm_i915_error_state *error)
938 {
939 	struct drm_i915_error_state *error = container_of(error_ref,
940 							  typeof(*error), ref);
941 	int i;
942 
943 	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
944 		i915_error_object_free(error->ring[i].batchbuffer);
945 		i915_error_object_free(error->ring[i].ringbuffer);
946 		kfree(error->ring[i].requests);
947 	}
948 
949 	kfree(error->active_bo);
950 	kfree(error->overlay);
951 	kfree(error);
952 }
953 static void capture_bo(struct drm_i915_error_buffer *err,
954 		       struct drm_i915_gem_object *obj)
955 {
956 	err->size = obj->base.size;
957 	err->name = obj->base.name;
958 	err->rseqno = obj->last_read_seqno;
959 	err->wseqno = obj->last_write_seqno;
960 	err->gtt_offset = obj->gtt_offset;
961 	err->read_domains = obj->base.read_domains;
962 	err->write_domain = obj->base.write_domain;
963 	err->fence_reg = obj->fence_reg;
964 	err->pinned = 0;
965 	if (obj->pin_count > 0)
966 		err->pinned = 1;
967 	if (obj->user_pin_count > 0)
968 		err->pinned = -1;
969 	err->tiling = obj->tiling_mode;
970 	err->dirty = obj->dirty;
971 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
972 	err->ring = obj->ring ? obj->ring->id : -1;
973 	err->cache_level = obj->cache_level;
974 }
975 
976 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
977 			     int count, struct list_head *head)
978 {
979 	struct drm_i915_gem_object *obj;
980 	int i = 0;
981 
982 	list_for_each_entry(obj, head, mm_list) {
983 		capture_bo(err++, obj);
984 		if (++i == count)
985 			break;
986 	}
987 
988 	return i;
989 }
990 
991 static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
992 			     int count, struct list_head *head)
993 {
994 	struct drm_i915_gem_object *obj;
995 	int i = 0;
996 
997 	list_for_each_entry(obj, head, gtt_list) {
998 		if (obj->pin_count == 0)
999 			continue;
1000 
1001 		capture_bo(err++, obj);
1002 		if (++i == count)
1003 			break;
1004 	}
1005 
1006 	return i;
1007 }
1008 
1009 static void i915_gem_record_fences(struct drm_device *dev,
1010 				   struct drm_i915_error_state *error)
1011 {
1012 	struct drm_i915_private *dev_priv = dev->dev_private;
1013 	int i;
1014 
1015 	/* Fences */
1016 	switch (INTEL_INFO(dev)->gen) {
1017 	case 7:
1018 	case 6:
1019 		for (i = 0; i < 16; i++)
1020 			error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1021 		break;
1022 	case 5:
1023 	case 4:
1024 		for (i = 0; i < 16; i++)
1025 			error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1026 		break;
1027 	case 3:
1028 		if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1029 			for (i = 0; i < 8; i++)
1030 				error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1031 	case 2:
1032 		for (i = 0; i < 8; i++)
1033 			error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1034 		break;
1035 
1036 	}
1037 }
1038 
1039 static struct drm_i915_error_object *
1040 i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1041 			     struct intel_ring_buffer *ring)
1042 {
1043 	struct drm_i915_gem_object *obj;
1044 	u32 seqno;
1045 
1046 	if (!ring->get_seqno)
1047 		return NULL;
1048 
1049 	if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1050 		u32 acthd = I915_READ(ACTHD);
1051 
1052 		if (WARN_ON(ring->id != RCS))
1053 			return NULL;
1054 
1055 		obj = ring->private;
1056 		if (acthd >= obj->gtt_offset &&
1057 		    acthd < obj->gtt_offset + obj->base.size)
1058 			return i915_error_object_create(dev_priv, obj);
1059 	}
1060 
1061 	seqno = ring->get_seqno(ring, false);
1062 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1063 		if (obj->ring != ring)
1064 			continue;
1065 
1066 		if (i915_seqno_passed(seqno, obj->last_read_seqno))
1067 			continue;
1068 
1069 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1070 			continue;
1071 
1072 		/* We need to copy these to an anonymous buffer as the simplest
1073 		 * method to avoid being overwritten by userspace.
1074 		 */
1075 		return i915_error_object_create(dev_priv, obj);
1076 	}
1077 
1078 	return NULL;
1079 }
1080 
1081 static void i915_record_ring_state(struct drm_device *dev,
1082 				   struct drm_i915_error_state *error,
1083 				   struct intel_ring_buffer *ring)
1084 {
1085 	struct drm_i915_private *dev_priv = dev->dev_private;
1086 
1087 	if (INTEL_INFO(dev)->gen >= 6) {
1088 		error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
1089 		error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
1090 		error->semaphore_mboxes[ring->id][0]
1091 			= I915_READ(RING_SYNC_0(ring->mmio_base));
1092 		error->semaphore_mboxes[ring->id][1]
1093 			= I915_READ(RING_SYNC_1(ring->mmio_base));
1094 		error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1095 		error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
1096 	}
1097 
1098 	if (INTEL_INFO(dev)->gen >= 4) {
1099 		error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
1100 		error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1101 		error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1102 		error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
1103 		error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
1104 		if (ring->id == RCS)
1105 			error->bbaddr = I915_READ64(BB_ADDR);
1106 	} else {
1107 		error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
1108 		error->ipeir[ring->id] = I915_READ(IPEIR);
1109 		error->ipehr[ring->id] = I915_READ(IPEHR);
1110 		error->instdone[ring->id] = I915_READ(INSTDONE);
1111 	}
1112 
1113 	error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
1114 	error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
1115 	error->seqno[ring->id] = ring->get_seqno(ring, false);
1116 	error->acthd[ring->id] = intel_ring_get_active_head(ring);
1117 	error->head[ring->id] = I915_READ_HEAD(ring);
1118 	error->tail[ring->id] = I915_READ_TAIL(ring);
1119 	error->ctl[ring->id] = I915_READ_CTL(ring);
1120 
1121 	error->cpu_ring_head[ring->id] = ring->head;
1122 	error->cpu_ring_tail[ring->id] = ring->tail;
1123 }
1124 
1125 static void i915_gem_record_rings(struct drm_device *dev,
1126 				  struct drm_i915_error_state *error)
1127 {
1128 	struct drm_i915_private *dev_priv = dev->dev_private;
1129 	struct intel_ring_buffer *ring;
1130 	struct drm_i915_gem_request *request;
1131 	int i, count;
1132 
1133 	for_each_ring(ring, dev_priv, i) {
1134 		i915_record_ring_state(dev, error, ring);
1135 
1136 		error->ring[i].batchbuffer =
1137 			i915_error_first_batchbuffer(dev_priv, ring);
1138 
1139 		error->ring[i].ringbuffer =
1140 			i915_error_object_create(dev_priv, ring->obj);
1141 
1142 		count = 0;
1143 		list_for_each_entry(request, &ring->request_list, list)
1144 			count++;
1145 
1146 		error->ring[i].num_requests = count;
1147 		error->ring[i].requests =
1148 			kmalloc(count*sizeof(struct drm_i915_error_request),
1149 				GFP_ATOMIC);
1150 		if (error->ring[i].requests == NULL) {
1151 			error->ring[i].num_requests = 0;
1152 			continue;
1153 		}
1154 
1155 		count = 0;
1156 		list_for_each_entry(request, &ring->request_list, list) {
1157 			struct drm_i915_error_request *erq;
1158 
1159 			erq = &error->ring[i].requests[count++];
1160 			erq->seqno = request->seqno;
1161 			erq->jiffies = request->emitted_jiffies;
1162 			erq->tail = request->tail;
1163 		}
1164 	}
1165 }
1166 
1167 /**
1168  * i915_capture_error_state - capture an error record for later analysis
1169  * @dev: drm device
1170  *
1171  * Should be called when an error is detected (either a hang or an error
1172  * interrupt) to capture error state from the time of the error.  Fills
1173  * out a structure which becomes available in debugfs for user level tools
1174  * to pick up.
1175  */
1176 static void i915_capture_error_state(struct drm_device *dev)
1177 {
1178 	struct drm_i915_private *dev_priv = dev->dev_private;
1179 	struct drm_i915_gem_object *obj;
1180 	struct drm_i915_error_state *error;
1181 	unsigned long flags;
1182 	int i, pipe;
1183 
1184 	spin_lock_irqsave(&dev_priv->error_lock, flags);
1185 	error = dev_priv->first_error;
1186 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1187 	if (error)
1188 		return;
1189 
1190 	/* Account for pipe specific data like PIPE*STAT */
1191 	error = kmalloc(sizeof(*error), M_DRM, M_WAITOK | M_NULLOK | M_ZERO);
1192 	if (!error) {
1193 		DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1194 		return;
1195 	}
1196 
1197 	DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
1198 		 dev->primary->index);
1199 
1200 	kref_init(&error->ref);
1201 	error->eir = I915_READ(EIR);
1202 	error->pgtbl_er = I915_READ(PGTBL_ER);
1203 	error->ccid = I915_READ(CCID);
1204 
1205 	if (HAS_PCH_SPLIT(dev))
1206 		error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1207 	else if (IS_VALLEYVIEW(dev))
1208 		error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1209 	else if (IS_GEN2(dev))
1210 		error->ier = I915_READ16(IER);
1211 	else
1212 		error->ier = I915_READ(IER);
1213 
1214 	if (INTEL_INFO(dev)->gen >= 6)
1215 		error->derrmr = I915_READ(DERRMR);
1216 
1217 	if (IS_VALLEYVIEW(dev))
1218 		error->forcewake = I915_READ(FORCEWAKE_VLV);
1219 	else if (INTEL_INFO(dev)->gen >= 7)
1220 		error->forcewake = I915_READ(FORCEWAKE_MT);
1221 	else if (INTEL_INFO(dev)->gen == 6)
1222 		error->forcewake = I915_READ(FORCEWAKE);
1223 
1224 	for_each_pipe(pipe)
1225 		error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
1226 
1227 	if (INTEL_INFO(dev)->gen >= 6) {
1228 		error->error = I915_READ(ERROR_GEN6);
1229 		error->done_reg = I915_READ(DONE_REG);
1230 	}
1231 
1232 	if (INTEL_INFO(dev)->gen == 7)
1233 		error->err_int = I915_READ(GEN7_ERR_INT);
1234 
1235 	i915_get_extra_instdone(dev, error->extra_instdone);
1236 
1237 	i915_gem_record_fences(dev, error);
1238 	i915_gem_record_rings(dev, error);
1239 
1240 	/* Record buffers on the active and pinned lists. */
1241 	error->active_bo = NULL;
1242 	error->pinned_bo = NULL;
1243 
1244 	i = 0;
1245 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1246 		i++;
1247 	error->active_bo_count = i;
1248 	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list)
1249 		if (obj->pin_count)
1250 			i++;
1251 	error->pinned_bo_count = i - error->active_bo_count;
1252 
1253 	error->active_bo = NULL;
1254 	error->pinned_bo = NULL;
1255 	if (i) {
1256 		error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
1257 					   GFP_ATOMIC);
1258 		if (error->active_bo)
1259 			error->pinned_bo =
1260 				error->active_bo + error->active_bo_count;
1261 	}
1262 
1263 	if (error->active_bo)
1264 		error->active_bo_count =
1265 			capture_active_bo(error->active_bo,
1266 					  error->active_bo_count,
1267 					  &dev_priv->mm.active_list);
1268 
1269 	if (error->pinned_bo)
1270 		error->pinned_bo_count =
1271 			capture_pinned_bo(error->pinned_bo,
1272 					  error->pinned_bo_count,
1273 					  &dev_priv->mm.bound_list);
1274 
1275 	do_gettimeofday(&error->time);
1276 
1277 	error->overlay = intel_overlay_capture_error_state(dev);
1278 	error->display = intel_display_capture_error_state(dev);
1279 
1280 	spin_lock_irqsave(&dev_priv->error_lock, flags);
1281 	if (dev_priv->first_error == NULL) {
1282 		dev_priv->first_error = error;
1283 		error = NULL;
1284 	}
1285 	spin_unlock_irqrestore(&dev_priv->error_lock, flags);
1286 
1287 	if (error)
1288 		i915_error_state_free(&error->ref);
1289 }
1290 
1291 void i915_destroy_error_state(struct drm_device *dev)
1292 {
1293 	struct drm_i915_private *dev_priv = dev->dev_private;
1294 	struct drm_i915_error_state *error;
1295 
1296 	lockmgr(&dev_priv->error_lock, LK_EXCLUSIVE);
1297 	error = dev_priv->first_error;
1298 	dev_priv->first_error = NULL;
1299 	lockmgr(&dev_priv->error_lock, LK_RELEASE);
1300 
1301 	if (error)
1302 		i915_error_state_free(dev, error);
1303 }
1304 #else
1305 #define i915_capture_error_state(x)
1306 #endif
1307 
1308 static void i915_report_and_clear_eir(struct drm_device *dev)
1309 {
1310 	struct drm_i915_private *dev_priv = dev->dev_private;
1311 	uint32_t instdone[I915_NUM_INSTDONE_REG];
1312 	u32 eir = I915_READ(EIR);
1313 	int pipe, i;
1314 
1315 	if (!eir)
1316 		return;
1317 
1318 	pr_err("render error detected, EIR: 0x%08x\n", eir);
1319 
1320 	i915_get_extra_instdone(dev, instdone);
1321 
1322 	if (IS_G4X(dev)) {
1323 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
1324 			u32 ipeir = I915_READ(IPEIR_I965);
1325 
1326 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1327 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1328 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
1329 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1330 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1331 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1332 			I915_WRITE(IPEIR_I965, ipeir);
1333 			POSTING_READ(IPEIR_I965);
1334 		}
1335 		if (eir & GM45_ERROR_PAGE_TABLE) {
1336 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1337 			pr_err("page table error\n");
1338 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1339 			I915_WRITE(PGTBL_ER, pgtbl_err);
1340 			POSTING_READ(PGTBL_ER);
1341 		}
1342 	}
1343 
1344 	if (!IS_GEN2(dev)) {
1345 		if (eir & I915_ERROR_PAGE_TABLE) {
1346 			u32 pgtbl_err = I915_READ(PGTBL_ER);
1347 			pr_err("page table error\n");
1348 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
1349 			I915_WRITE(PGTBL_ER, pgtbl_err);
1350 			POSTING_READ(PGTBL_ER);
1351 		}
1352 	}
1353 
1354 	if (eir & I915_ERROR_MEMORY_REFRESH) {
1355 		pr_err("memory refresh error:\n");
1356 		for_each_pipe(pipe)
1357 			pr_err("pipe %c stat: 0x%08x\n",
1358 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
1359 		/* pipestat has already been acked */
1360 	}
1361 	if (eir & I915_ERROR_INSTRUCTION) {
1362 		pr_err("instruction error\n");
1363 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
1364 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
1365 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
1366 		if (INTEL_INFO(dev)->gen < 4) {
1367 			u32 ipeir = I915_READ(IPEIR);
1368 
1369 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
1370 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
1371 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
1372 			I915_WRITE(IPEIR, ipeir);
1373 			POSTING_READ(IPEIR);
1374 		} else {
1375 			u32 ipeir = I915_READ(IPEIR_I965);
1376 
1377 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
1378 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
1379 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
1380 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
1381 			I915_WRITE(IPEIR_I965, ipeir);
1382 			POSTING_READ(IPEIR_I965);
1383 		}
1384 	}
1385 
1386 	I915_WRITE(EIR, eir);
1387 	POSTING_READ(EIR);
1388 	eir = I915_READ(EIR);
1389 	if (eir) {
1390 		/*
1391 		 * some errors might have become stuck,
1392 		 * mask them.
1393 		 */
1394 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
1395 		I915_WRITE(EMR, I915_READ(EMR) | eir);
1396 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
1397 	}
1398 }
1399 
1400 /**
1401  * i915_handle_error - handle an error interrupt
1402  * @dev: drm device
1403  *
1404  * Do some basic checking of regsiter state at error interrupt time and
1405  * dump it to the syslog.  Also call i915_capture_error_state() to make
1406  * sure we get a record and make it available in debugfs.  Fire a uevent
1407  * so userspace knows something bad happened (should trigger collection
1408  * of a ring dump etc.).
1409  */
1410 void i915_handle_error(struct drm_device *dev, bool wedged)
1411 {
1412 	struct drm_i915_private *dev_priv = dev->dev_private;
1413 	struct intel_ring_buffer *ring;
1414 	int i;
1415 
1416 	i915_capture_error_state(dev);
1417 	i915_report_and_clear_eir(dev);
1418 
1419 	if (wedged) {
1420 		INIT_COMPLETION(dev_priv->error_completion);
1421 		atomic_set(&dev_priv->mm.wedged, 1);
1422 
1423 		/*
1424 		 * Wakeup waiting processes so they don't hang
1425 		 */
1426 		for_each_ring(ring, dev_priv, i)
1427 			wake_up_all(&ring->irq_queue);
1428 	}
1429 
1430 	queue_work(dev_priv->wq, &dev_priv->error_work);
1431 }
1432 
1433 static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1434 {
1435 	drm_i915_private_t *dev_priv = dev->dev_private;
1436 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1437 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1438 	struct drm_i915_gem_object *obj;
1439 	struct intel_unpin_work *work;
1440 	bool stall_detected;
1441 
1442 	/* Ignore early vblank irqs */
1443 	if (intel_crtc == NULL)
1444 		return;
1445 
1446 	lockmgr(&dev->event_lock, LK_EXCLUSIVE);
1447 	work = intel_crtc->unpin_work;
1448 
1449 	if (work == NULL ||
1450 	    atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
1451 	    !work->enable_stall_check) {
1452 		/* Either the pending flip IRQ arrived, or we're too early. Don't check */
1453 		lockmgr(&dev->event_lock, LK_RELEASE);
1454 		return;
1455 	}
1456 
1457 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
1458 	obj = work->pending_flip_obj;
1459 	if (INTEL_INFO(dev)->gen >= 4) {
1460 		int dspsurf = DSPSURF(intel_crtc->plane);
1461 		stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1462 					obj->gtt_offset;
1463 	} else {
1464 		int dspaddr = DSPADDR(intel_crtc->plane);
1465 		stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
1466 							crtc->y * crtc->fb->pitches[0] +
1467 							crtc->x * crtc->fb->bits_per_pixel/8);
1468 	}
1469 
1470 	lockmgr(&dev->event_lock, LK_RELEASE);
1471 
1472 	if (stall_detected) {
1473 		DRM_DEBUG_DRIVER("Pageflip stall detected\n");
1474 		intel_prepare_page_flip(dev, intel_crtc->plane);
1475 	}
1476 }
1477 
1478 /* Called from drm generic code, passed 'crtc' which
1479  * we use as a pipe index
1480  */
1481 static int i915_enable_vblank(struct drm_device *dev, int pipe)
1482 {
1483 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1484 
1485 	if (!i915_pipe_enabled(dev, pipe))
1486 		return -EINVAL;
1487 
1488 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1489 	if (INTEL_INFO(dev)->gen >= 4)
1490 		i915_enable_pipestat(dev_priv, pipe,
1491 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1492 	else
1493 		i915_enable_pipestat(dev_priv, pipe,
1494 				     PIPE_VBLANK_INTERRUPT_ENABLE);
1495 
1496 	/* maintain vblank delivery even in deep C-states */
1497 	if (dev_priv->info->gen == 3)
1498 		I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1499 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1500 
1501 	return 0;
1502 }
1503 
1504 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
1505 {
1506 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1507 
1508 	if (!i915_pipe_enabled(dev, pipe))
1509 		return -EINVAL;
1510 
1511 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1512 	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1513 				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1514 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1515 
1516 	return 0;
1517 }
1518 
1519 static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
1520 {
1521 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1522 
1523 	if (!i915_pipe_enabled(dev, pipe))
1524 		return -EINVAL;
1525 
1526 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1527 	ironlake_enable_display_irq(dev_priv,
1528 				    DE_PIPEA_VBLANK_IVB << (5 * pipe));
1529 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1530 
1531 	return 0;
1532 }
1533 
1534 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
1535 {
1536 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1537 	u32 imr;
1538 
1539 	if (!i915_pipe_enabled(dev, pipe))
1540 		return -EINVAL;
1541 
1542 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1543 	imr = I915_READ(VLV_IMR);
1544 	if (pipe == 0)
1545 		imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1546 	else
1547 		imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1548 	I915_WRITE(VLV_IMR, imr);
1549 	i915_enable_pipestat(dev_priv, pipe,
1550 			     PIPE_START_VBLANK_INTERRUPT_ENABLE);
1551 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1552 
1553 	return 0;
1554 }
1555 
1556 /* Called from drm generic code, passed 'crtc' which
1557  * we use as a pipe index
1558  */
1559 static void i915_disable_vblank(struct drm_device *dev, int pipe)
1560 {
1561 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1562 
1563 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1564 	if (dev_priv->info->gen == 3)
1565 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
1566 
1567 	i915_disable_pipestat(dev_priv, pipe,
1568 			      PIPE_VBLANK_INTERRUPT_ENABLE |
1569 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1570 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1571 }
1572 
1573 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
1574 {
1575 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1576 
1577 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1578 	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1579 				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1580 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1581 }
1582 
1583 static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
1584 {
1585 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1586 
1587 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1588 	ironlake_disable_display_irq(dev_priv,
1589 				     DE_PIPEA_VBLANK_IVB << (pipe * 5));
1590 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1591 }
1592 
1593 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
1594 {
1595 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1596 	u32 imr;
1597 
1598 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1599 	i915_disable_pipestat(dev_priv, pipe,
1600 			      PIPE_START_VBLANK_INTERRUPT_ENABLE);
1601 	imr = I915_READ(VLV_IMR);
1602 	if (pipe == 0)
1603 		imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
1604 	else
1605 		imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1606 	I915_WRITE(VLV_IMR, imr);
1607 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1608 }
1609 
1610 static u32
1611 ring_last_seqno(struct intel_ring_buffer *ring)
1612 {
1613 	return list_entry(ring->request_list.prev,
1614 			  struct drm_i915_gem_request, list)->seqno;
1615 }
1616 
1617 static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1618 {
1619 	if (list_empty(&ring->request_list) ||
1620 	    i915_seqno_passed(ring->get_seqno(ring, false),
1621 			      ring_last_seqno(ring))) {
1622 		/* Issue a wake-up to catch stuck h/w. */
1623 #if 0 /* XXX From OpenBSD */
1624 		if (waitqueue_active(&ring->irq_queue)) {
1625 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
1626 				  ring->name);
1627 			wake_up_all(&ring->irq_queue);
1628 			*err = true;
1629 		}
1630 #else
1631 		wake_up_all(&ring->irq_queue);
1632 #endif
1633 		return true;
1634 	}
1635 	return false;
1636 }
1637 
1638 static bool kick_ring(struct intel_ring_buffer *ring)
1639 {
1640 	struct drm_device *dev = ring->dev;
1641 	struct drm_i915_private *dev_priv = dev->dev_private;
1642 	u32 tmp = I915_READ_CTL(ring);
1643 	if (tmp & RING_WAIT) {
1644 		DRM_ERROR("Kicking stuck wait on %s\n",
1645 			  ring->name);
1646 		I915_WRITE_CTL(ring, tmp);
1647 		return true;
1648 	}
1649 	return false;
1650 }
1651 
1652 static bool i915_hangcheck_hung(struct drm_device *dev)
1653 {
1654 	drm_i915_private_t *dev_priv = dev->dev_private;
1655 
1656 	if (dev_priv->hangcheck_count++ > 1) {
1657 		bool hung = true;
1658 
1659 		DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1660 		i915_handle_error(dev, true);
1661 
1662 		if (!IS_GEN2(dev)) {
1663 			struct intel_ring_buffer *ring;
1664 			int i;
1665 
1666 			/* Is the chip hanging on a WAIT_FOR_EVENT?
1667 			 * If so we can simply poke the RB_WAIT bit
1668 			 * and break the hang. This should work on
1669 			 * all but the second generation chipsets.
1670 			 */
1671 			for_each_ring(ring, dev_priv, i)
1672 				hung &= !kick_ring(ring);
1673 		}
1674 
1675 		return hung;
1676 	}
1677 
1678 	return false;
1679 }
1680 
1681 /**
1682  * This is called when the chip hasn't reported back with completed
1683  * batchbuffers in a long time. The first time this is called we simply record
1684  * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses
1685  * again, we assume the chip is wedged and try to fix it.
1686  */
1687 void i915_hangcheck_elapsed(unsigned long data)
1688 {
1689 	struct drm_device *dev = (struct drm_device *)data;
1690 	drm_i915_private_t *dev_priv = dev->dev_private;
1691 	uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG];
1692 	struct intel_ring_buffer *ring;
1693 	bool err = false, idle;
1694 	int i;
1695 
1696 	if (!i915_enable_hangcheck)
1697 		return;
1698 
1699 	memset(acthd, 0, sizeof(acthd));
1700 	idle = true;
1701 	for_each_ring(ring, dev_priv, i) {
1702 	    idle &= i915_hangcheck_ring_idle(ring, &err);
1703 	    acthd[i] = intel_ring_get_active_head(ring);
1704 	}
1705 
1706 	/* If all work is done then ACTHD clearly hasn't advanced. */
1707 	if (idle) {
1708 		if (err) {
1709 			if (i915_hangcheck_hung(dev))
1710 				return;
1711 
1712 			goto repeat;
1713 		}
1714 
1715 		dev_priv->hangcheck_count = 0;
1716 		return;
1717 	}
1718 
1719 	i915_get_extra_instdone(dev, instdone);
1720 	if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
1721 	    memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
1722 		if (i915_hangcheck_hung(dev))
1723 			return;
1724 	} else {
1725 		dev_priv->hangcheck_count = 0;
1726 
1727 		memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
1728 		memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
1729 	}
1730 
1731 repeat:
1732 	/* Reset timer case chip hangs without another request being added */
1733 	mod_timer(&dev_priv->hangcheck_timer,
1734 		  round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
1735 }
1736 
1737 /* drm_dma.h hooks
1738 */
1739 static void ironlake_irq_preinstall(struct drm_device *dev)
1740 {
1741 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1742 
1743 	atomic_set(&dev_priv->irq_received, 0);
1744 
1745 	I915_WRITE(HWSTAM, 0xeffe);
1746 
1747 	/* XXX hotplug from PCH */
1748 
1749 	I915_WRITE(DEIMR, 0xffffffff);
1750 	I915_WRITE(DEIER, 0x0);
1751 	POSTING_READ(DEIER);
1752 
1753 	/* and GT */
1754 	I915_WRITE(GTIMR, 0xffffffff);
1755 	I915_WRITE(GTIER, 0x0);
1756 	POSTING_READ(GTIER);
1757 
1758 	/* south display irq */
1759 	I915_WRITE(SDEIMR, 0xffffffff);
1760 	I915_WRITE(SDEIER, 0x0);
1761 	POSTING_READ(SDEIER);
1762 }
1763 
1764 static void valleyview_irq_preinstall(struct drm_device *dev)
1765 {
1766 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1767 	int pipe;
1768 
1769 	atomic_set(&dev_priv->irq_received, 0);
1770 
1771 	/* VLV magic */
1772 	I915_WRITE(VLV_IMR, 0);
1773 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
1774 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
1775 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
1776 
1777 	/* and GT */
1778 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1779 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1780 	I915_WRITE(GTIMR, 0xffffffff);
1781 	I915_WRITE(GTIER, 0x0);
1782 	POSTING_READ(GTIER);
1783 
1784 	I915_WRITE(DPINVGTT, 0xff);
1785 
1786 	I915_WRITE(PORT_HOTPLUG_EN, 0);
1787 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
1788 	for_each_pipe(pipe)
1789 		I915_WRITE(PIPESTAT(pipe), 0xffff);
1790 	I915_WRITE(VLV_IIR, 0xffffffff);
1791 	I915_WRITE(VLV_IMR, 0xffffffff);
1792 	I915_WRITE(VLV_IER, 0x0);
1793 	POSTING_READ(VLV_IER);
1794 }
1795 
1796 /*
1797  * Enable digital hotplug on the PCH, and configure the DP short pulse
1798  * duration to 2ms (which is the minimum in the Display Port spec)
1799  *
1800  * This register is the same on all known PCH chips.
1801  */
1802 
1803 static void ironlake_enable_pch_hotplug(struct drm_device *dev)
1804 {
1805 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1806 	u32	hotplug;
1807 
1808 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
1809 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
1810 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
1811 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
1812 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
1813 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
1814 }
1815 
1816 static int ironlake_irq_postinstall(struct drm_device *dev)
1817 {
1818 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1819 	/* enable kind of interrupts always enabled */
1820 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1821 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1822 	u32 render_irqs;
1823 	u32 hotplug_mask;
1824 
1825 	dev_priv->irq_mask = ~display_mask;
1826 
1827 	/* should always can generate irq */
1828 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1829 	I915_WRITE(DEIMR, dev_priv->irq_mask);
1830 	I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1831 	POSTING_READ(DEIER);
1832 
1833 	dev_priv->gt_irq_mask = ~0;
1834 
1835 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1836 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1837 
1838 	if (IS_GEN6(dev))
1839 		render_irqs =
1840 			GT_USER_INTERRUPT |
1841 			GEN6_BSD_USER_INTERRUPT |
1842 			GEN6_BLITTER_USER_INTERRUPT;
1843 	else
1844 		render_irqs =
1845 			GT_USER_INTERRUPT |
1846 			GT_PIPE_NOTIFY |
1847 			GT_BSD_USER_INTERRUPT;
1848 	I915_WRITE(GTIER, render_irqs);
1849 	POSTING_READ(GTIER);
1850 
1851 	if (HAS_PCH_CPT(dev)) {
1852 		hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1853 				SDE_PORTB_HOTPLUG_CPT |
1854 				SDE_PORTC_HOTPLUG_CPT |
1855 				SDE_PORTD_HOTPLUG_CPT);
1856 	} else {
1857 		hotplug_mask = (SDE_CRT_HOTPLUG |
1858 				SDE_PORTB_HOTPLUG |
1859 				SDE_PORTC_HOTPLUG |
1860 				SDE_PORTD_HOTPLUG |
1861 				SDE_AUX_MASK);
1862 	}
1863 
1864 	dev_priv->pch_irq_mask = ~hotplug_mask;
1865 
1866 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1867 	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1868 	I915_WRITE(SDEIER, hotplug_mask);
1869 	POSTING_READ(SDEIER);
1870 
1871 	ironlake_enable_pch_hotplug(dev);
1872 
1873 	if (IS_IRONLAKE_M(dev)) {
1874 		/* Clear & enable PCU event interrupts */
1875 		I915_WRITE(DEIIR, DE_PCU_EVENT);
1876 		I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
1877 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int ivybridge_irq_postinstall(struct drm_device *dev)
1884 {
1885 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1886 	/* enable kind of interrupts always enabled */
1887 	u32 display_mask =
1888 		DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
1889 		DE_PLANEC_FLIP_DONE_IVB |
1890 		DE_PLANEB_FLIP_DONE_IVB |
1891 		DE_PLANEA_FLIP_DONE_IVB;
1892 	u32 render_irqs;
1893 	u32 hotplug_mask;
1894 
1895 	dev_priv->irq_mask = ~display_mask;
1896 
1897 	/* should always can generate irq */
1898 	I915_WRITE(DEIIR, I915_READ(DEIIR));
1899 	I915_WRITE(DEIMR, dev_priv->irq_mask);
1900 	I915_WRITE(DEIER,
1901 		   display_mask |
1902 		   DE_PIPEC_VBLANK_IVB |
1903 		   DE_PIPEB_VBLANK_IVB |
1904 		   DE_PIPEA_VBLANK_IVB);
1905 	POSTING_READ(DEIER);
1906 
1907 	dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1908 
1909 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1910 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1911 
1912 	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1913 		GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT;
1914 	I915_WRITE(GTIER, render_irqs);
1915 	POSTING_READ(GTIER);
1916 
1917 	hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
1918 			SDE_PORTB_HOTPLUG_CPT |
1919 			SDE_PORTC_HOTPLUG_CPT |
1920 			SDE_PORTD_HOTPLUG_CPT);
1921 	dev_priv->pch_irq_mask = ~hotplug_mask;
1922 
1923 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1924 	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1925 	I915_WRITE(SDEIER, hotplug_mask);
1926 	POSTING_READ(SDEIER);
1927 
1928 	ironlake_enable_pch_hotplug(dev);
1929 
1930 	return 0;
1931 }
1932 
1933 static int valleyview_irq_postinstall(struct drm_device *dev)
1934 {
1935 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1936 	u32 enable_mask;
1937 	u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
1938 	u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
1939 	u32 render_irqs;
1940 	u16 msid;
1941 
1942 	enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1943 	enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
1944 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1945 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
1946 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1947 
1948 	/*
1949 	 *Leave vblank interrupts masked initially.  enable/disable will
1950 	 * toggle them based on usage.
1951 	 */
1952 	dev_priv->irq_mask = (~enable_mask) |
1953 		I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
1954 		I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
1955 
1956 	dev_priv->pipestat[0] = 0;
1957 	dev_priv->pipestat[1] = 0;
1958 
1959 	/* Hack for broken MSIs on VLV */
1960 	pci_write_config(dev_priv->dev->dev, 0x94, 0xfee00000, 4);
1961 	msid = pci_read_config(dev->dev, 0x98, 2);
1962 	msid &= 0xff; /* mask out delivery bits */
1963 	msid |= (1<<14);
1964 	pci_write_config(dev_priv->dev->dev, 0x98, msid, 4);
1965 
1966 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
1967 	I915_WRITE(VLV_IER, enable_mask);
1968 	I915_WRITE(VLV_IIR, 0xffffffff);
1969 	I915_WRITE(PIPESTAT(0), 0xffff);
1970 	I915_WRITE(PIPESTAT(1), 0xffff);
1971 	POSTING_READ(VLV_IER);
1972 
1973 	i915_enable_pipestat(dev_priv, 0, pipestat_enable);
1974 	i915_enable_pipestat(dev_priv, 1, pipestat_enable);
1975 
1976 	I915_WRITE(VLV_IIR, 0xffffffff);
1977 	I915_WRITE(VLV_IIR, 0xffffffff);
1978 
1979 	I915_WRITE(GTIIR, I915_READ(GTIIR));
1980 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1981 
1982 	render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
1983 		GEN6_BLITTER_USER_INTERRUPT;
1984 	I915_WRITE(GTIER, render_irqs);
1985 	POSTING_READ(GTIER);
1986 
1987 	/* ack & enable invalid PTE error interrupts */
1988 #if 0 /* FIXME: add support to irq handler for checking these bits */
1989 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
1990 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
1991 #endif
1992 
1993 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1994 	/* Note HDMI and DP share bits */
1995 	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
1996 		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
1997 	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
1998 		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
1999 	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2000 		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2001 	if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2002 		hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2003 	if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2004 		hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2005 	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2006 		hotplug_en |= CRT_HOTPLUG_INT_EN;
2007 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2008 	}
2009 
2010 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2011 
2012 	return 0;
2013 }
2014 
2015 static void valleyview_irq_uninstall(struct drm_device *dev)
2016 {
2017 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2018 	int pipe;
2019 
2020 	if (!dev_priv)
2021 		return;
2022 
2023 	for_each_pipe(pipe)
2024 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2025 
2026 	I915_WRITE(HWSTAM, 0xffffffff);
2027 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2028 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2029 	for_each_pipe(pipe)
2030 		I915_WRITE(PIPESTAT(pipe), 0xffff);
2031 	I915_WRITE(VLV_IIR, 0xffffffff);
2032 	I915_WRITE(VLV_IMR, 0xffffffff);
2033 	I915_WRITE(VLV_IER, 0x0);
2034 	POSTING_READ(VLV_IER);
2035 }
2036 
2037 static void ironlake_irq_uninstall(struct drm_device *dev)
2038 {
2039 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2040 
2041 	if (!dev_priv)
2042 		return;
2043 
2044 	I915_WRITE(HWSTAM, 0xffffffff);
2045 
2046 	I915_WRITE(DEIMR, 0xffffffff);
2047 	I915_WRITE(DEIER, 0x0);
2048 	I915_WRITE(DEIIR, I915_READ(DEIIR));
2049 
2050 	I915_WRITE(GTIMR, 0xffffffff);
2051 	I915_WRITE(GTIER, 0x0);
2052 	I915_WRITE(GTIIR, I915_READ(GTIIR));
2053 
2054 	I915_WRITE(SDEIMR, 0xffffffff);
2055 	I915_WRITE(SDEIER, 0x0);
2056 	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2057 }
2058 
2059 static void i8xx_irq_preinstall(struct drm_device * dev)
2060 {
2061 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2062 	int pipe;
2063 
2064 	atomic_set(&dev_priv->irq_received, 0);
2065 
2066 	for_each_pipe(pipe)
2067 		I915_WRITE(PIPESTAT(pipe), 0);
2068 	I915_WRITE16(IMR, 0xffff);
2069 	I915_WRITE16(IER, 0x0);
2070 	POSTING_READ16(IER);
2071 }
2072 
2073 static int i8xx_irq_postinstall(struct drm_device *dev)
2074 {
2075 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2076 
2077 	dev_priv->pipestat[0] = 0;
2078 	dev_priv->pipestat[1] = 0;
2079 
2080 	I915_WRITE16(EMR,
2081 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2082 
2083 	/* Unmask the interrupts that we always want on. */
2084 	dev_priv->irq_mask =
2085 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2086 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2087 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2088 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2089 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2090 	I915_WRITE16(IMR, dev_priv->irq_mask);
2091 
2092 	I915_WRITE16(IER,
2093 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2094 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2095 		     I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2096 		     I915_USER_INTERRUPT);
2097 	POSTING_READ16(IER);
2098 
2099 	return 0;
2100 }
2101 
2102 static irqreturn_t i8xx_irq_handler(void *arg)
2103 {
2104 	struct drm_device *dev = (struct drm_device *) arg;
2105 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2106 	u16 iir, new_iir;
2107 	u32 pipe_stats[2];
2108 	int irq_received;
2109 	int pipe;
2110 	u16 flip_mask =
2111 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2112 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2113 
2114 	atomic_inc(&dev_priv->irq_received);
2115 
2116 	iir = I915_READ16(IIR);
2117 	if (iir == 0)
2118 		return;
2119 
2120 	while (iir & ~flip_mask) {
2121 		/* Can't rely on pipestat interrupt bit in iir as it might
2122 		 * have been cleared after the pipestat interrupt was received.
2123 		 * It doesn't set the bit in iir again, but it still produces
2124 		 * interrupts (for non-MSI).
2125 		 */
2126 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2127 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2128 			i915_handle_error(dev, false);
2129 
2130 		for_each_pipe(pipe) {
2131 			int reg = PIPESTAT(pipe);
2132 			pipe_stats[pipe] = I915_READ(reg);
2133 
2134 			/*
2135 			 * Clear the PIPE*STAT regs before the IIR
2136 			 */
2137 			if (pipe_stats[pipe] & 0x8000ffff) {
2138 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2139 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2140 							 pipe_name(pipe));
2141 				I915_WRITE(reg, pipe_stats[pipe]);
2142 				irq_received = 1;
2143 			}
2144 		}
2145 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2146 
2147 		I915_WRITE16(IIR, iir & ~flip_mask);
2148 		new_iir = I915_READ16(IIR); /* Flush posted writes */
2149 
2150 		i915_update_dri1_breadcrumb(dev);
2151 
2152 		if (iir & I915_USER_INTERRUPT)
2153 			notify_ring(dev, &dev_priv->ring[RCS]);
2154 
2155 		if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
2156 		    drm_handle_vblank(dev, 0)) {
2157 			if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
2158 				intel_prepare_page_flip(dev, 0);
2159 				intel_finish_page_flip(dev, 0);
2160 				flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT;
2161 			}
2162 		}
2163 
2164 		if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
2165 		    drm_handle_vblank(dev, 1)) {
2166 			if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) {
2167 				intel_prepare_page_flip(dev, 1);
2168 				intel_finish_page_flip(dev, 1);
2169 				flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2170 			}
2171 		}
2172 
2173 		iir = new_iir;
2174 	}
2175 
2176 	return;
2177 }
2178 
2179 static void i8xx_irq_uninstall(struct drm_device * dev)
2180 {
2181 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2182 	int pipe;
2183 
2184 	for_each_pipe(pipe) {
2185 		/* Clear enable bits; then clear status bits */
2186 		I915_WRITE(PIPESTAT(pipe), 0);
2187 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2188 	}
2189 	I915_WRITE16(IMR, 0xffff);
2190 	I915_WRITE16(IER, 0x0);
2191 	I915_WRITE16(IIR, I915_READ16(IIR));
2192 }
2193 
2194 static void i915_irq_preinstall(struct drm_device * dev)
2195 {
2196 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2197 	int pipe;
2198 
2199 	atomic_set(&dev_priv->irq_received, 0);
2200 
2201 	if (I915_HAS_HOTPLUG(dev)) {
2202 		I915_WRITE(PORT_HOTPLUG_EN, 0);
2203 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2204 	}
2205 
2206 	I915_WRITE16(HWSTAM, 0xeffe);
2207 	for_each_pipe(pipe)
2208 		I915_WRITE(PIPESTAT(pipe), 0);
2209 	I915_WRITE(IMR, 0xffffffff);
2210 	I915_WRITE(IER, 0x0);
2211 	POSTING_READ(IER);
2212 }
2213 
2214 static int i915_irq_postinstall(struct drm_device *dev)
2215 {
2216 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2217 	u32 enable_mask;
2218 
2219 	dev_priv->pipestat[0] = 0;
2220 	dev_priv->pipestat[1] = 0;
2221 
2222 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2223 
2224 	/* Unmask the interrupts that we always want on. */
2225 	dev_priv->irq_mask =
2226 		~(I915_ASLE_INTERRUPT |
2227 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2228 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2229 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2230 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2231 		  I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2232 
2233 	enable_mask =
2234 		I915_ASLE_INTERRUPT |
2235 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2236 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2237 		I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2238 		I915_USER_INTERRUPT;
2239 
2240 	if (I915_HAS_HOTPLUG(dev)) {
2241 		/* Enable in IER... */
2242 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
2243 		/* and unmask in IMR */
2244 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
2245 	}
2246 
2247 	I915_WRITE(IMR, dev_priv->irq_mask);
2248 	I915_WRITE(IER, enable_mask);
2249 	POSTING_READ(IER);
2250 
2251 	if (I915_HAS_HOTPLUG(dev)) {
2252 		u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
2253 
2254 		if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2255 			hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2256 		if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2257 			hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2258 		if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2259 			hotplug_en |= HDMID_HOTPLUG_INT_EN;
2260 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
2261 			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2262 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2263 			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2264 		if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2265 			hotplug_en |= CRT_HOTPLUG_INT_EN;
2266 			hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2267 		}
2268 
2269 		/* Ignore TV since it's buggy */
2270 
2271 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2272 	}
2273 
2274 	intel_opregion_enable_asle(dev);
2275 
2276 	return 0;
2277 }
2278 
2279 static irqreturn_t i915_irq_handler(void *arg)
2280 {
2281 	struct drm_device *dev = (struct drm_device *) arg;
2282 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2283 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
2284 	u32 flip_mask =
2285 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2286 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
2287 	u32 flip[2] = {
2288 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT,
2289 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT
2290 	};
2291 	int pipe;
2292 
2293 	atomic_inc(&dev_priv->irq_received);
2294 
2295 	iir = I915_READ(IIR);
2296 	do {
2297 		bool irq_received = (iir & ~flip_mask) != 0;
2298 		bool blc_event = false;
2299 
2300 		/* Can't rely on pipestat interrupt bit in iir as it might
2301 		 * have been cleared after the pipestat interrupt was received.
2302 		 * It doesn't set the bit in iir again, but it still produces
2303 		 * interrupts (for non-MSI).
2304 		 */
2305 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2306 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2307 			i915_handle_error(dev, false);
2308 
2309 		for_each_pipe(pipe) {
2310 			int reg = PIPESTAT(pipe);
2311 			pipe_stats[pipe] = I915_READ(reg);
2312 
2313 			/* Clear the PIPE*STAT regs before the IIR */
2314 			if (pipe_stats[pipe] & 0x8000ffff) {
2315 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2316 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2317 							 pipe_name(pipe));
2318 				I915_WRITE(reg, pipe_stats[pipe]);
2319 				irq_received = true;
2320 			}
2321 		}
2322 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2323 
2324 		if (!irq_received)
2325 			break;
2326 
2327 		/* Consume port.  Then clear IIR or we'll miss events */
2328 		if ((I915_HAS_HOTPLUG(dev)) &&
2329 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
2330 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2331 
2332 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2333 				  hotplug_status);
2334 			if (hotplug_status & dev_priv->hotplug_supported_mask)
2335 				queue_work(dev_priv->wq,
2336 					   &dev_priv->hotplug_work);
2337 
2338 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2339 			POSTING_READ(PORT_HOTPLUG_STAT);
2340 		}
2341 
2342 		I915_WRITE(IIR, iir & ~flip_mask);
2343 		new_iir = I915_READ(IIR); /* Flush posted writes */
2344 
2345 		if (iir & I915_USER_INTERRUPT)
2346 			notify_ring(dev, &dev_priv->ring[RCS]);
2347 
2348 		for_each_pipe(pipe) {
2349 			int plane = pipe;
2350 			if (IS_MOBILE(dev))
2351 				plane = !plane;
2352 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
2353 			    drm_handle_vblank(dev, pipe)) {
2354 				if (iir & flip[plane]) {
2355 					intel_prepare_page_flip(dev, plane);
2356 					intel_finish_page_flip(dev, pipe);
2357 					flip_mask &= ~flip[plane];
2358 				}
2359 			}
2360 
2361 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2362 				blc_event = true;
2363 		}
2364 
2365 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2366 			intel_opregion_asle_intr(dev);
2367 
2368 		/* With MSI, interrupts are only generated when iir
2369 		 * transitions from zero to nonzero.  If another bit got
2370 		 * set while we were handling the existing iir bits, then
2371 		 * we would never get another interrupt.
2372 		 *
2373 		 * This is fine on non-MSI as well, as if we hit this path
2374 		 * we avoid exiting the interrupt handler only to generate
2375 		 * another one.
2376 		 *
2377 		 * Note that for MSI this could cause a stray interrupt report
2378 		 * if an interrupt landed in the time between writing IIR and
2379 		 * the posting read.  This should be rare enough to never
2380 		 * trigger the 99% of 100,000 interrupts test for disabling
2381 		 * stray interrupts.
2382 		 */
2383 		iir = new_iir;
2384 	} while (iir & ~flip_mask);
2385 
2386 	i915_update_dri1_breadcrumb(dev);
2387 }
2388 
2389 static void i915_irq_uninstall(struct drm_device * dev)
2390 {
2391 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2392 	int pipe;
2393 
2394 	if (I915_HAS_HOTPLUG(dev)) {
2395 		I915_WRITE(PORT_HOTPLUG_EN, 0);
2396 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2397 	}
2398 
2399 	I915_WRITE16(HWSTAM, 0xffff);
2400 	for_each_pipe(pipe) {
2401 		/* Clear enable bits; then clear status bits */
2402 		I915_WRITE(PIPESTAT(pipe), 0);
2403 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
2404 	}
2405 	I915_WRITE(IMR, 0xffffffff);
2406 	I915_WRITE(IER, 0x0);
2407 
2408 	I915_WRITE(IIR, I915_READ(IIR));
2409 }
2410 
2411 static void i965_irq_preinstall(struct drm_device * dev)
2412 {
2413 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2414 	int pipe;
2415 
2416 	atomic_set(&dev_priv->irq_received, 0);
2417 
2418 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2419 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2420 
2421 	I915_WRITE(HWSTAM, 0xeffe);
2422 	for_each_pipe(pipe)
2423 		I915_WRITE(PIPESTAT(pipe), 0);
2424 	I915_WRITE(IMR, 0xffffffff);
2425 	I915_WRITE(IER, 0x0);
2426 	POSTING_READ(IER);
2427 }
2428 
2429 static int i965_irq_postinstall(struct drm_device *dev)
2430 {
2431 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2432 	u32 hotplug_en;
2433 	u32 enable_mask;
2434 	u32 error_mask;
2435 
2436 	/* Unmask the interrupts that we always want on. */
2437 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
2438 			       I915_DISPLAY_PORT_INTERRUPT |
2439 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2440 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2441 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2442 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2443 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2444 
2445 	enable_mask = ~dev_priv->irq_mask;
2446 	enable_mask |= I915_USER_INTERRUPT;
2447 
2448 	if (IS_G4X(dev))
2449 		enable_mask |= I915_BSD_USER_INTERRUPT;
2450 
2451 	dev_priv->pipestat[0] = 0;
2452 	dev_priv->pipestat[1] = 0;
2453 
2454 	/*
2455 	 * Enable some error detection, note the instruction error mask
2456 	 * bit is reserved, so we leave it masked.
2457 	 */
2458 	if (IS_G4X(dev)) {
2459 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
2460 			       GM45_ERROR_MEM_PRIV |
2461 			       GM45_ERROR_CP_PRIV |
2462 			       I915_ERROR_MEMORY_REFRESH);
2463 	} else {
2464 		error_mask = ~(I915_ERROR_PAGE_TABLE |
2465 			       I915_ERROR_MEMORY_REFRESH);
2466 	}
2467 	I915_WRITE(EMR, error_mask);
2468 
2469 	I915_WRITE(IMR, dev_priv->irq_mask);
2470 	I915_WRITE(IER, enable_mask);
2471 	POSTING_READ(IER);
2472 
2473 	/* Note HDMI and DP share hotplug bits */
2474 	hotplug_en = 0;
2475 	if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
2476 		hotplug_en |= HDMIB_HOTPLUG_INT_EN;
2477 	if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
2478 		hotplug_en |= HDMIC_HOTPLUG_INT_EN;
2479 	if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
2480 		hotplug_en |= HDMID_HOTPLUG_INT_EN;
2481 	if (IS_G4X(dev)) {
2482 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
2483 			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2484 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X)
2485 			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2486 	} else {
2487 		if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965)
2488 			hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2489 		if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965)
2490 			hotplug_en |= SDVOB_HOTPLUG_INT_EN;
2491 	}
2492 	if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
2493 		hotplug_en |= CRT_HOTPLUG_INT_EN;
2494 
2495 		/* Programming the CRT detection parameters tends
2496 		   to generate a spurious hotplug event about three
2497 		   seconds later.  So just do it once.
2498 		   */
2499 		if (IS_G4X(dev))
2500 			hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
2501 		hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
2502 	}
2503 
2504 	/* Ignore TV since it's buggy */
2505 
2506 	I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
2507 
2508 	intel_opregion_enable_asle(dev);
2509 
2510 	return 0;
2511 }
2512 
2513 static irqreturn_t i965_irq_handler(void *arg)
2514 {
2515 	struct drm_device *dev = (struct drm_device *) arg;
2516 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2517 	u32 iir, new_iir;
2518 	u32 pipe_stats[I915_MAX_PIPES];
2519 	int irq_received;
2520 	int pipe;
2521 
2522 	atomic_inc(&dev_priv->irq_received);
2523 
2524 	iir = I915_READ(IIR);
2525 
2526 	for (;;) {
2527 		bool blc_event = false;
2528 
2529 		irq_received = iir != 0;
2530 
2531 		/* Can't rely on pipestat interrupt bit in iir as it might
2532 		 * have been cleared after the pipestat interrupt was received.
2533 		 * It doesn't set the bit in iir again, but it still produces
2534 		 * interrupts (for non-MSI).
2535 		 */
2536 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
2537 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
2538 			i915_handle_error(dev, false);
2539 
2540 		for_each_pipe(pipe) {
2541 			int reg = PIPESTAT(pipe);
2542 			pipe_stats[pipe] = I915_READ(reg);
2543 
2544 			/*
2545 			 * Clear the PIPE*STAT regs before the IIR
2546 			 */
2547 			if (pipe_stats[pipe] & 0x8000ffff) {
2548 				if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2549 					DRM_DEBUG_DRIVER("pipe %c underrun\n",
2550 							 pipe_name(pipe));
2551 				I915_WRITE(reg, pipe_stats[pipe]);
2552 				irq_received = 1;
2553 			}
2554 		}
2555 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
2556 
2557 		if (!irq_received)
2558 			break;
2559 
2560 		/* Consume port.  Then clear IIR or we'll miss events */
2561 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
2562 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2563 
2564 			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
2565 				  hotplug_status);
2566 			if (hotplug_status & dev_priv->hotplug_supported_mask)
2567 				queue_work(dev_priv->wq,
2568 					   &dev_priv->hotplug_work);
2569 
2570 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2571 			I915_READ(PORT_HOTPLUG_STAT);
2572 		}
2573 
2574 		I915_WRITE(IIR, iir);
2575 		new_iir = I915_READ(IIR); /* Flush posted writes */
2576 
2577 		if (iir & I915_USER_INTERRUPT)
2578 			notify_ring(dev, &dev_priv->ring[RCS]);
2579 		if (iir & I915_BSD_USER_INTERRUPT)
2580 			notify_ring(dev, &dev_priv->ring[VCS]);
2581 
2582 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
2583 			intel_prepare_page_flip(dev, 0);
2584 
2585 		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
2586 			intel_prepare_page_flip(dev, 1);
2587 
2588 		for_each_pipe(pipe) {
2589 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2590 			    drm_handle_vblank(dev, pipe)) {
2591 				i915_pageflip_stall_check(dev, pipe);
2592 				intel_finish_page_flip(dev, pipe);
2593 			}
2594 
2595 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
2596 				blc_event = true;
2597 		}
2598 
2599 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
2600 			intel_opregion_asle_intr(dev);
2601 
2602 		/* With MSI, interrupts are only generated when iir
2603 		 * transitions from zero to nonzero.  If another bit got
2604 		 * set while we were handling the existing iir bits, then
2605 		 * we would never get another interrupt.
2606 		 *
2607 		 * This is fine on non-MSI as well, as if we hit this path
2608 		 * we avoid exiting the interrupt handler only to generate
2609 		 * another one.
2610 		 *
2611 		 * Note that for MSI this could cause a stray interrupt report
2612 		 * if an interrupt landed in the time between writing IIR and
2613 		 * the posting read.  This should be rare enough to never
2614 		 * trigger the 99% of 100,000 interrupts test for disabling
2615 		 * stray interrupts.
2616 		 */
2617 		iir = new_iir;
2618 	}
2619 
2620 	i915_update_dri1_breadcrumb(dev);
2621 }
2622 
2623 static void i965_irq_uninstall(struct drm_device * dev)
2624 {
2625 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2626 	int pipe;
2627 
2628 	if (!dev_priv)
2629 		return;
2630 
2631 	I915_WRITE(PORT_HOTPLUG_EN, 0);
2632 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2633 
2634 	I915_WRITE(HWSTAM, 0xffffffff);
2635 	for_each_pipe(pipe)
2636 		I915_WRITE(PIPESTAT(pipe), 0);
2637 	I915_WRITE(IMR, 0xffffffff);
2638 	I915_WRITE(IER, 0x0);
2639 
2640 	for_each_pipe(pipe)
2641 		I915_WRITE(PIPESTAT(pipe),
2642 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
2643 	I915_WRITE(IIR, I915_READ(IIR));
2644 }
2645 
2646 void intel_irq_init(struct drm_device *dev)
2647 {
2648 	struct drm_i915_private *dev_priv = dev->dev_private;
2649 
2650 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
2651 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
2652 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
2653 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
2654 
2655 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
2656 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
2657 	if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
2658 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
2659 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
2660 	}
2661 
2662 	if (drm_core_check_feature(dev, DRIVER_MODESET))
2663 		dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
2664 	else
2665 		dev->driver->get_vblank_timestamp = NULL;
2666 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
2667 
2668 	if (IS_VALLEYVIEW(dev)) {
2669 		dev->driver->irq_handler = valleyview_irq_handler;
2670 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
2671 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
2672 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
2673 		dev->driver->enable_vblank = valleyview_enable_vblank;
2674 		dev->driver->disable_vblank = valleyview_disable_vblank;
2675 	} else if (IS_IVYBRIDGE(dev)) {
2676 		/* Share pre & uninstall handlers with ILK/SNB */
2677 		dev->driver->irq_handler = ivybridge_irq_handler;
2678 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2679 		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2680 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2681 		dev->driver->enable_vblank = ivybridge_enable_vblank;
2682 		dev->driver->disable_vblank = ivybridge_disable_vblank;
2683 	} else if (IS_HASWELL(dev)) {
2684 		/* Share interrupts handling with IVB */
2685 		dev->driver->irq_handler = ivybridge_irq_handler;
2686 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2687 		dev->driver->irq_postinstall = ivybridge_irq_postinstall;
2688 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2689 		dev->driver->enable_vblank = ivybridge_enable_vblank;
2690 		dev->driver->disable_vblank = ivybridge_disable_vblank;
2691 	} else if (HAS_PCH_SPLIT(dev)) {
2692 		dev->driver->irq_handler = ironlake_irq_handler;
2693 		dev->driver->irq_preinstall = ironlake_irq_preinstall;
2694 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
2695 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
2696 		dev->driver->enable_vblank = ironlake_enable_vblank;
2697 		dev->driver->disable_vblank = ironlake_disable_vblank;
2698 	} else {
2699 		if (INTEL_INFO(dev)->gen == 2) {
2700 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
2701 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
2702 			dev->driver->irq_handler = i8xx_irq_handler;
2703 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
2704 		} else if (INTEL_INFO(dev)->gen == 3) {
2705 			dev->driver->irq_preinstall = i915_irq_preinstall;
2706 			dev->driver->irq_postinstall = i915_irq_postinstall;
2707 			dev->driver->irq_uninstall = i915_irq_uninstall;
2708 			dev->driver->irq_handler = i915_irq_handler;
2709 		} else {
2710 			dev->driver->irq_preinstall = i965_irq_preinstall;
2711 			dev->driver->irq_postinstall = i965_irq_postinstall;
2712 			dev->driver->irq_uninstall = i965_irq_uninstall;
2713 			dev->driver->irq_handler = i965_irq_handler;
2714 		}
2715 		dev->driver->enable_vblank = i915_enable_vblank;
2716 		dev->driver->disable_vblank = i915_disable_vblank;
2717 	}
2718 }
2719