xref: /dragonfly/sys/dev/drm/i915/i915_irq.c (revision f1e2e235)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #define KBUILD_MODNAME	"i915"
30 
31 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 
33 #include <linux/sysrq.h>
34 #include <linux/slab.h>
35 #include <linux/circ_buf.h>
36 #include <drm/drmP.h>
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "i915_trace.h"
40 #include "intel_drv.h"
41 
42 /**
43  * DOC: interrupt handling
44  *
45  * These functions provide the basic support for enabling and disabling the
46  * interrupt handling support. There's a lot more functionality in i915_irq.c
47  * and related files, but that will be described in separate chapters.
48  */
49 
50 static const u32 hpd_ilk[HPD_NUM_PINS] = {
51 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
52 };
53 
54 static const u32 hpd_ivb[HPD_NUM_PINS] = {
55 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
56 };
57 
58 static const u32 hpd_bdw[HPD_NUM_PINS] = {
59 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
60 };
61 
62 static const u32 hpd_ibx[HPD_NUM_PINS] = {
63 	[HPD_CRT] = SDE_CRT_HOTPLUG,
64 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
65 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
66 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
67 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
68 };
69 
70 static const u32 hpd_cpt[HPD_NUM_PINS] = {
71 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
72 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
73 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
76 };
77 
78 static const u32 hpd_spt[HPD_NUM_PINS] = {
79 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
80 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
81 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
82 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
83 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
84 };
85 
86 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
87 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
88 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
89 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
90 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
91 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
92 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
93 };
94 
95 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
96 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
97 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
98 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
99 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
100 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
101 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
102 };
103 
104 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
105 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
106 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
107 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
108 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
109 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
110 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
111 };
112 
113 /* BXT hpd list */
114 static const u32 hpd_bxt[HPD_NUM_PINS] = {
115 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
116 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
117 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
118 };
119 
120 /* IIR can theoretically queue up two events. Be paranoid. */
121 #define GEN8_IRQ_RESET_NDX(type, which) do { \
122 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
123 	POSTING_READ(GEN8_##type##_IMR(which)); \
124 	I915_WRITE(GEN8_##type##_IER(which), 0); \
125 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 	POSTING_READ(GEN8_##type##_IIR(which)); \
127 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
128 	POSTING_READ(GEN8_##type##_IIR(which)); \
129 } while (0)
130 
131 #define GEN5_IRQ_RESET(type) do { \
132 	I915_WRITE(type##IMR, 0xffffffff); \
133 	POSTING_READ(type##IMR); \
134 	I915_WRITE(type##IER, 0); \
135 	I915_WRITE(type##IIR, 0xffffffff); \
136 	POSTING_READ(type##IIR); \
137 	I915_WRITE(type##IIR, 0xffffffff); \
138 	POSTING_READ(type##IIR); \
139 } while (0)
140 
141 /*
142  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
143  */
144 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
145 				    i915_reg_t reg)
146 {
147 	u32 val = I915_READ(reg);
148 
149 	if (val == 0)
150 		return;
151 
152 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
153 	     i915_mmio_reg_offset(reg), val);
154 	I915_WRITE(reg, 0xffffffff);
155 	POSTING_READ(reg);
156 	I915_WRITE(reg, 0xffffffff);
157 	POSTING_READ(reg);
158 }
159 
160 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
161 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
162 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
163 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
164 	POSTING_READ(GEN8_##type##_IMR(which)); \
165 } while (0)
166 
167 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
168 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
169 	I915_WRITE(type##IER, (ier_val)); \
170 	I915_WRITE(type##IMR, (imr_val)); \
171 	POSTING_READ(type##IMR); \
172 } while (0)
173 
174 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
175 
176 /* For display hotplug interrupt */
177 static inline void
178 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
179 				     uint32_t mask,
180 				     uint32_t bits)
181 {
182 	uint32_t val;
183 
184 	assert_spin_locked(&dev_priv->irq_lock);
185 	WARN_ON(bits & ~mask);
186 
187 	val = I915_READ(PORT_HOTPLUG_EN);
188 	val &= ~mask;
189 	val |= bits;
190 	I915_WRITE(PORT_HOTPLUG_EN, val);
191 }
192 
193 /**
194  * i915_hotplug_interrupt_update - update hotplug interrupt enable
195  * @dev_priv: driver private
196  * @mask: bits to update
197  * @bits: bits to enable
198  * NOTE: the HPD enable bits are modified both inside and outside
199  * of an interrupt context. To avoid that read-modify-write cycles
200  * interfer, these bits are protected by a spinlock. Since this
201  * function is usually not called from a context where the lock is
202  * held already, this function acquires the lock itself. A non-locking
203  * version is also available.
204  */
205 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
206 				   uint32_t mask,
207 				   uint32_t bits)
208 {
209 	spin_lock_irq(&dev_priv->irq_lock);
210 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
211 	spin_unlock_irq(&dev_priv->irq_lock);
212 }
213 
214 /**
215  * ilk_update_display_irq - update DEIMR
216  * @dev_priv: driver private
217  * @interrupt_mask: mask of interrupt bits to update
218  * @enabled_irq_mask: mask of interrupt bits to enable
219  */
220 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
221 			    uint32_t interrupt_mask,
222 			    uint32_t enabled_irq_mask)
223 {
224 	uint32_t new_val;
225 
226 	assert_spin_locked(&dev_priv->irq_lock);
227 
228 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
229 
230 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
231 		return;
232 
233 	new_val = dev_priv->irq_mask;
234 	new_val &= ~interrupt_mask;
235 	new_val |= (~enabled_irq_mask & interrupt_mask);
236 
237 	if (new_val != dev_priv->irq_mask) {
238 		dev_priv->irq_mask = new_val;
239 		I915_WRITE(DEIMR, dev_priv->irq_mask);
240 		POSTING_READ(DEIMR);
241 	}
242 }
243 
244 /**
245  * ilk_update_gt_irq - update GTIMR
246  * @dev_priv: driver private
247  * @interrupt_mask: mask of interrupt bits to update
248  * @enabled_irq_mask: mask of interrupt bits to enable
249  */
250 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
251 			      uint32_t interrupt_mask,
252 			      uint32_t enabled_irq_mask)
253 {
254 	assert_spin_locked(&dev_priv->irq_lock);
255 
256 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
257 
258 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
259 		return;
260 
261 	dev_priv->gt_irq_mask &= ~interrupt_mask;
262 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
263 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
264 	POSTING_READ(GTIMR);
265 }
266 
267 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
268 {
269 	ilk_update_gt_irq(dev_priv, mask, mask);
270 }
271 
272 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
273 {
274 	ilk_update_gt_irq(dev_priv, mask, 0);
275 }
276 
277 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
278 {
279 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
280 }
281 
282 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
283 {
284 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
285 }
286 
287 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
288 {
289 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
290 }
291 
292 /**
293  * snb_update_pm_irq - update GEN6_PMIMR
294  * @dev_priv: driver private
295  * @interrupt_mask: mask of interrupt bits to update
296  * @enabled_irq_mask: mask of interrupt bits to enable
297  */
298 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
299 			      uint32_t interrupt_mask,
300 			      uint32_t enabled_irq_mask)
301 {
302 	uint32_t new_val;
303 
304 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
305 
306 	assert_spin_locked(&dev_priv->irq_lock);
307 
308 	new_val = dev_priv->pm_irq_mask;
309 	new_val &= ~interrupt_mask;
310 	new_val |= (~enabled_irq_mask & interrupt_mask);
311 
312 	if (new_val != dev_priv->pm_irq_mask) {
313 		dev_priv->pm_irq_mask = new_val;
314 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
315 		POSTING_READ(gen6_pm_imr(dev_priv));
316 	}
317 }
318 
319 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
320 {
321 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
322 		return;
323 
324 	snb_update_pm_irq(dev_priv, mask, mask);
325 }
326 
327 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
328 				  uint32_t mask)
329 {
330 	snb_update_pm_irq(dev_priv, mask, 0);
331 }
332 
333 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
334 {
335 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
336 		return;
337 
338 	__gen6_disable_pm_irq(dev_priv, mask);
339 }
340 
341 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
342 {
343 	i915_reg_t reg = gen6_pm_iir(dev_priv);
344 
345 	spin_lock_irq(&dev_priv->irq_lock);
346 	I915_WRITE(reg, dev_priv->pm_rps_events);
347 	I915_WRITE(reg, dev_priv->pm_rps_events);
348 	POSTING_READ(reg);
349 	dev_priv->rps.pm_iir = 0;
350 	spin_unlock_irq(&dev_priv->irq_lock);
351 }
352 
353 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
354 {
355 	spin_lock_irq(&dev_priv->irq_lock);
356 
357 	WARN_ON(dev_priv->rps.pm_iir);
358 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
359 	dev_priv->rps.interrupts_enabled = true;
360 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
361 				dev_priv->pm_rps_events);
362 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
363 
364 	spin_unlock_irq(&dev_priv->irq_lock);
365 }
366 
367 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
368 {
369 	return (mask & ~dev_priv->rps.pm_intr_keep);
370 }
371 
372 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
373 {
374 	spin_lock_irq(&dev_priv->irq_lock);
375 	dev_priv->rps.interrupts_enabled = false;
376 	spin_unlock_irq(&dev_priv->irq_lock);
377 
378 	cancel_work_sync(&dev_priv->rps.work);
379 
380 	spin_lock_irq(&dev_priv->irq_lock);
381 
382 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
383 
384 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
385 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
386 				~dev_priv->pm_rps_events);
387 
388 	spin_unlock_irq(&dev_priv->irq_lock);
389 
390 	synchronize_irq(dev_priv->dev->irq);
391 }
392 
393 /**
394  * bdw_update_port_irq - update DE port interrupt
395  * @dev_priv: driver private
396  * @interrupt_mask: mask of interrupt bits to update
397  * @enabled_irq_mask: mask of interrupt bits to enable
398  */
399 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
400 				uint32_t interrupt_mask,
401 				uint32_t enabled_irq_mask)
402 {
403 	uint32_t new_val;
404 	uint32_t old_val;
405 
406 	assert_spin_locked(&dev_priv->irq_lock);
407 
408 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
409 
410 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
411 		return;
412 
413 	old_val = I915_READ(GEN8_DE_PORT_IMR);
414 
415 	new_val = old_val;
416 	new_val &= ~interrupt_mask;
417 	new_val |= (~enabled_irq_mask & interrupt_mask);
418 
419 	if (new_val != old_val) {
420 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
421 		POSTING_READ(GEN8_DE_PORT_IMR);
422 	}
423 }
424 
425 /**
426  * bdw_update_pipe_irq - update DE pipe interrupt
427  * @dev_priv: driver private
428  * @pipe: pipe whose interrupt to update
429  * @interrupt_mask: mask of interrupt bits to update
430  * @enabled_irq_mask: mask of interrupt bits to enable
431  */
432 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
433 			 enum i915_pipe pipe,
434 			 uint32_t interrupt_mask,
435 			 uint32_t enabled_irq_mask)
436 {
437 	uint32_t new_val;
438 
439 	assert_spin_locked(&dev_priv->irq_lock);
440 
441 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
442 
443 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
444 		return;
445 
446 	new_val = dev_priv->de_irq_mask[pipe];
447 	new_val &= ~interrupt_mask;
448 	new_val |= (~enabled_irq_mask & interrupt_mask);
449 
450 	if (new_val != dev_priv->de_irq_mask[pipe]) {
451 		dev_priv->de_irq_mask[pipe] = new_val;
452 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
453 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
454 	}
455 }
456 
457 /**
458  * ibx_display_interrupt_update - update SDEIMR
459  * @dev_priv: driver private
460  * @interrupt_mask: mask of interrupt bits to update
461  * @enabled_irq_mask: mask of interrupt bits to enable
462  */
463 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
464 				  uint32_t interrupt_mask,
465 				  uint32_t enabled_irq_mask)
466 {
467 	uint32_t sdeimr = I915_READ(SDEIMR);
468 	sdeimr &= ~interrupt_mask;
469 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
470 
471 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
472 
473 	assert_spin_locked(&dev_priv->irq_lock);
474 
475 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
476 		return;
477 
478 	I915_WRITE(SDEIMR, sdeimr);
479 	POSTING_READ(SDEIMR);
480 }
481 
482 static void
483 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
484 		       u32 enable_mask, u32 status_mask)
485 {
486 	i915_reg_t reg = PIPESTAT(pipe);
487 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
488 
489 	assert_spin_locked(&dev_priv->irq_lock);
490 	WARN_ON(!intel_irqs_enabled(dev_priv));
491 
492 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
493 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
494 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
495 		      pipe_name(pipe), enable_mask, status_mask))
496 		return;
497 
498 	if ((pipestat & enable_mask) == enable_mask)
499 		return;
500 
501 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
502 
503 	/* Enable the interrupt, clear any pending status */
504 	pipestat |= enable_mask | status_mask;
505 	I915_WRITE(reg, pipestat);
506 	POSTING_READ(reg);
507 }
508 
509 static void
510 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
511 		        u32 enable_mask, u32 status_mask)
512 {
513 	i915_reg_t reg = PIPESTAT(pipe);
514 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
515 
516 	assert_spin_locked(&dev_priv->irq_lock);
517 	WARN_ON(!intel_irqs_enabled(dev_priv));
518 
519 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
520 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
521 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
522 		      pipe_name(pipe), enable_mask, status_mask))
523 		return;
524 
525 	if ((pipestat & enable_mask) == 0)
526 		return;
527 
528 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
529 
530 	pipestat &= ~enable_mask;
531 	I915_WRITE(reg, pipestat);
532 	POSTING_READ(reg);
533 }
534 
535 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
536 {
537 	u32 enable_mask = status_mask << 16;
538 
539 	/*
540 	 * On pipe A we don't support the PSR interrupt yet,
541 	 * on pipe B and C the same bit MBZ.
542 	 */
543 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
544 		return 0;
545 	/*
546 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
547 	 * A the same bit is for perf counters which we don't use either.
548 	 */
549 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
550 		return 0;
551 
552 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
553 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
554 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
555 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
556 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
557 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
558 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
559 
560 	return enable_mask;
561 }
562 
563 void
564 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
565 		     u32 status_mask)
566 {
567 	u32 enable_mask;
568 
569 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
570 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
571 							   status_mask);
572 	else
573 		enable_mask = status_mask << 16;
574 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
575 }
576 
577 void
578 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
579 		      u32 status_mask)
580 {
581 	u32 enable_mask;
582 
583 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
584 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
585 							   status_mask);
586 	else
587 		enable_mask = status_mask << 16;
588 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
589 }
590 
591 /**
592  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
593  * @dev_priv: i915 device private
594  */
595 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
596 {
597 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
598 		return;
599 
600 	spin_lock_irq(&dev_priv->irq_lock);
601 
602 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
603 	if (INTEL_GEN(dev_priv) >= 4)
604 		i915_enable_pipestat(dev_priv, PIPE_A,
605 				     PIPE_LEGACY_BLC_EVENT_STATUS);
606 
607 	spin_unlock_irq(&dev_priv->irq_lock);
608 }
609 
610 /*
611  * This timing diagram depicts the video signal in and
612  * around the vertical blanking period.
613  *
614  * Assumptions about the fictitious mode used in this example:
615  *  vblank_start >= 3
616  *  vsync_start = vblank_start + 1
617  *  vsync_end = vblank_start + 2
618  *  vtotal = vblank_start + 3
619  *
620  *           start of vblank:
621  *           latch double buffered registers
622  *           increment frame counter (ctg+)
623  *           generate start of vblank interrupt (gen4+)
624  *           |
625  *           |          frame start:
626  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
627  *           |          may be shifted forward 1-3 extra lines via PIPECONF
628  *           |          |
629  *           |          |  start of vsync:
630  *           |          |  generate vsync interrupt
631  *           |          |  |
632  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
633  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
634  * ----va---> <-----------------vb--------------------> <--------va-------------
635  *       |          |       <----vs----->                     |
636  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
637  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
638  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
639  *       |          |                                         |
640  *       last visible pixel                                   first visible pixel
641  *                  |                                         increment frame counter (gen3/4)
642  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
643  *
644  * x  = horizontal active
645  * _  = horizontal blanking
646  * hs = horizontal sync
647  * va = vertical active
648  * vb = vertical blanking
649  * vs = vertical sync
650  * vbs = vblank_start (number)
651  *
652  * Summary:
653  * - most events happen at the start of horizontal sync
654  * - frame start happens at the start of horizontal blank, 1-4 lines
655  *   (depending on PIPECONF settings) after the start of vblank
656  * - gen3/4 pixel and frame counter are synchronized with the start
657  *   of horizontal active on the first line of vertical active
658  */
659 
660 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
661 {
662 	/* Gen2 doesn't have a hardware frame counter */
663 	return 0;
664 }
665 
666 /* Called from drm generic code, passed a 'crtc', which
667  * we use as a pipe index
668  */
669 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
670 {
671 	struct drm_i915_private *dev_priv = dev->dev_private;
672 	i915_reg_t high_frame, low_frame;
673 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
674 	struct intel_crtc *intel_crtc =
675 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
676 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
677 
678 	htotal = mode->crtc_htotal;
679 	hsync_start = mode->crtc_hsync_start;
680 	vbl_start = mode->crtc_vblank_start;
681 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
682 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
683 
684 	/* Convert to pixel count */
685 	vbl_start *= htotal;
686 
687 	/* Start of vblank event occurs at start of hsync */
688 	vbl_start -= htotal - hsync_start;
689 
690 	high_frame = PIPEFRAME(pipe);
691 	low_frame = PIPEFRAMEPIXEL(pipe);
692 
693 	/*
694 	 * High & low register fields aren't synchronized, so make sure
695 	 * we get a low value that's stable across two reads of the high
696 	 * register.
697 	 */
698 	do {
699 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
700 		low   = I915_READ(low_frame);
701 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
702 	} while (high1 != high2);
703 
704 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
705 	pixel = low & PIPE_PIXEL_MASK;
706 	low >>= PIPE_FRAME_LOW_SHIFT;
707 
708 	/*
709 	 * The frame counter increments at beginning of active.
710 	 * Cook up a vblank counter by also checking the pixel
711 	 * counter against vblank start.
712 	 */
713 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
714 }
715 
716 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
717 {
718 	struct drm_i915_private *dev_priv = dev->dev_private;
719 
720 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
721 }
722 
723 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
724 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
725 {
726 	struct drm_device *dev = crtc->base.dev;
727 	struct drm_i915_private *dev_priv = dev->dev_private;
728 	const struct drm_display_mode *mode = &crtc->base.hwmode;
729 	enum i915_pipe pipe = crtc->pipe;
730 	int position, vtotal;
731 
732 	vtotal = mode->crtc_vtotal;
733 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
734 		vtotal /= 2;
735 
736 	if (IS_GEN2(dev_priv))
737 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
738 	else
739 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
740 
741 	/*
742 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
743 	 * read it just before the start of vblank.  So try it again
744 	 * so we don't accidentally end up spanning a vblank frame
745 	 * increment, causing the pipe_update_end() code to squak at us.
746 	 *
747 	 * The nature of this problem means we can't simply check the ISR
748 	 * bit and return the vblank start value; nor can we use the scanline
749 	 * debug register in the transcoder as it appears to have the same
750 	 * problem.  We may need to extend this to include other platforms,
751 	 * but so far testing only shows the problem on HSW.
752 	 */
753 	if (HAS_DDI(dev_priv) && !position) {
754 		int i, temp;
755 
756 		for (i = 0; i < 100; i++) {
757 			udelay(1);
758 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
759 				DSL_LINEMASK_GEN3;
760 			if (temp != position) {
761 				position = temp;
762 				break;
763 			}
764 		}
765 	}
766 
767 	/*
768 	 * See update_scanline_offset() for the details on the
769 	 * scanline_offset adjustment.
770 	 */
771 	return (position + crtc->scanline_offset) % vtotal;
772 }
773 
774 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
775 				    unsigned int flags, int *vpos, int *hpos,
776 				    ktime_t *stime, ktime_t *etime,
777 				    const struct drm_display_mode *mode)
778 {
779 	struct drm_i915_private *dev_priv = dev->dev_private;
780 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
781 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
782 	int position;
783 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
784 	bool in_vbl = true;
785 	int ret = 0;
786 	unsigned long irqflags;
787 
788 	if (WARN_ON(!mode->crtc_clock)) {
789 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
790 				 "pipe %c\n", pipe_name(pipe));
791 		return 0;
792 	}
793 
794 	htotal = mode->crtc_htotal;
795 	hsync_start = mode->crtc_hsync_start;
796 	vtotal = mode->crtc_vtotal;
797 	vbl_start = mode->crtc_vblank_start;
798 	vbl_end = mode->crtc_vblank_end;
799 
800 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
801 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
802 		vbl_end /= 2;
803 		vtotal /= 2;
804 	}
805 
806 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
807 
808 	/*
809 	 * Lock uncore.lock, as we will do multiple timing critical raw
810 	 * register reads, potentially with preemption disabled, so the
811 	 * following code must not block on uncore.lock.
812 	 */
813 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
814 
815 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
816 
817 	/* Get optional system timestamp before query. */
818 	if (stime)
819 		*stime = ktime_get();
820 
821 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
822 		/* No obvious pixelcount register. Only query vertical
823 		 * scanout position from Display scan line register.
824 		 */
825 		position = __intel_get_crtc_scanline(intel_crtc);
826 	} else {
827 		/* Have access to pixelcount since start of frame.
828 		 * We can split this into vertical and horizontal
829 		 * scanout position.
830 		 */
831 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
832 
833 		/* convert to pixel counts */
834 		vbl_start *= htotal;
835 		vbl_end *= htotal;
836 		vtotal *= htotal;
837 
838 		/*
839 		 * In interlaced modes, the pixel counter counts all pixels,
840 		 * so one field will have htotal more pixels. In order to avoid
841 		 * the reported position from jumping backwards when the pixel
842 		 * counter is beyond the length of the shorter field, just
843 		 * clamp the position the length of the shorter field. This
844 		 * matches how the scanline counter based position works since
845 		 * the scanline counter doesn't count the two half lines.
846 		 */
847 		if (position >= vtotal)
848 			position = vtotal - 1;
849 
850 		/*
851 		 * Start of vblank interrupt is triggered at start of hsync,
852 		 * just prior to the first active line of vblank. However we
853 		 * consider lines to start at the leading edge of horizontal
854 		 * active. So, should we get here before we've crossed into
855 		 * the horizontal active of the first line in vblank, we would
856 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
857 		 * always add htotal-hsync_start to the current pixel position.
858 		 */
859 		position = (position + htotal - hsync_start) % vtotal;
860 	}
861 
862 	/* Get optional system timestamp after query. */
863 	if (etime)
864 		*etime = ktime_get();
865 
866 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
867 
868 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
869 
870 	in_vbl = position >= vbl_start && position < vbl_end;
871 
872 	/*
873 	 * While in vblank, position will be negative
874 	 * counting up towards 0 at vbl_end. And outside
875 	 * vblank, position will be positive counting
876 	 * up since vbl_end.
877 	 */
878 	if (position >= vbl_start)
879 		position -= vbl_end;
880 	else
881 		position += vtotal - vbl_end;
882 
883 	if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
884 		*vpos = position;
885 		*hpos = 0;
886 	} else {
887 		*vpos = position / htotal;
888 		*hpos = position - (*vpos * htotal);
889 	}
890 
891 	/* In vblank? */
892 	if (in_vbl)
893 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
894 
895 	return ret;
896 }
897 
898 int intel_get_crtc_scanline(struct intel_crtc *crtc)
899 {
900 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
901 	unsigned long irqflags;
902 	int position;
903 
904 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
905 	position = __intel_get_crtc_scanline(crtc);
906 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
907 
908 	return position;
909 }
910 
911 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
912 			      int *max_error,
913 			      struct timeval *vblank_time,
914 			      unsigned flags)
915 {
916 	struct drm_crtc *crtc;
917 
918 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
919 		DRM_ERROR("Invalid crtc %u\n", pipe);
920 		return -EINVAL;
921 	}
922 
923 	/* Get drm_crtc to timestamp: */
924 	crtc = intel_get_crtc_for_pipe(dev, pipe);
925 	if (crtc == NULL) {
926 		DRM_ERROR("Invalid crtc %u\n", pipe);
927 		return -EINVAL;
928 	}
929 
930 	if (!crtc->hwmode.crtc_clock) {
931 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
932 		return -EBUSY;
933 	}
934 
935 	/* Helper routine in DRM core does all the work: */
936 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
937 						     vblank_time, flags,
938 						     &crtc->hwmode);
939 }
940 
941 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
942 {
943 	u32 busy_up, busy_down, max_avg, min_avg;
944 	u8 new_delay;
945 
946 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
947 
948 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
949 
950 	new_delay = dev_priv->ips.cur_delay;
951 
952 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
953 	busy_up = I915_READ(RCPREVBSYTUPAVG);
954 	busy_down = I915_READ(RCPREVBSYTDNAVG);
955 	max_avg = I915_READ(RCBMAXAVG);
956 	min_avg = I915_READ(RCBMINAVG);
957 
958 	/* Handle RCS change request from hw */
959 	if (busy_up > max_avg) {
960 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
961 			new_delay = dev_priv->ips.cur_delay - 1;
962 		if (new_delay < dev_priv->ips.max_delay)
963 			new_delay = dev_priv->ips.max_delay;
964 	} else if (busy_down < min_avg) {
965 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
966 			new_delay = dev_priv->ips.cur_delay + 1;
967 		if (new_delay > dev_priv->ips.min_delay)
968 			new_delay = dev_priv->ips.min_delay;
969 	}
970 
971 	if (ironlake_set_drps(dev_priv, new_delay))
972 		dev_priv->ips.cur_delay = new_delay;
973 
974 	lockmgr(&mchdev_lock, LK_RELEASE);
975 
976 	return;
977 }
978 
979 static void notify_ring(struct intel_engine_cs *engine)
980 {
981 	if (!intel_engine_initialized(engine))
982 		return;
983 
984 	trace_i915_gem_request_notify(engine);
985 	engine->user_interrupts++;
986 
987 	wake_up_all(&engine->irq_queue);
988 }
989 
990 static void vlv_c0_read(struct drm_i915_private *dev_priv,
991 			struct intel_rps_ei *ei)
992 {
993 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
994 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
995 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
996 }
997 
998 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
999 			 const struct intel_rps_ei *old,
1000 			 const struct intel_rps_ei *now,
1001 			 int threshold)
1002 {
1003 	u64 time, c0;
1004 	unsigned int mul = 100;
1005 
1006 	if (old->cz_clock == 0)
1007 		return false;
1008 
1009 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1010 		mul <<= 8;
1011 
1012 	time = now->cz_clock - old->cz_clock;
1013 	time *= threshold * dev_priv->czclk_freq;
1014 
1015 	/* Workload can be split between render + media, e.g. SwapBuffers
1016 	 * being blitted in X after being rendered in mesa. To account for
1017 	 * this we need to combine both engines into our activity counter.
1018 	 */
1019 	c0 = now->render_c0 - old->render_c0;
1020 	c0 += now->media_c0 - old->media_c0;
1021 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1022 
1023 	return c0 >= time;
1024 }
1025 
1026 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1027 {
1028 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1029 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1030 }
1031 
1032 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1033 {
1034 	struct intel_rps_ei now;
1035 	u32 events = 0;
1036 
1037 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1038 		return 0;
1039 
1040 	vlv_c0_read(dev_priv, &now);
1041 	if (now.cz_clock == 0)
1042 		return 0;
1043 
1044 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1045 		if (!vlv_c0_above(dev_priv,
1046 				  &dev_priv->rps.down_ei, &now,
1047 				  dev_priv->rps.down_threshold))
1048 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1049 		dev_priv->rps.down_ei = now;
1050 	}
1051 
1052 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1053 		if (vlv_c0_above(dev_priv,
1054 				 &dev_priv->rps.up_ei, &now,
1055 				 dev_priv->rps.up_threshold))
1056 			events |= GEN6_PM_RP_UP_THRESHOLD;
1057 		dev_priv->rps.up_ei = now;
1058 	}
1059 
1060 	return events;
1061 }
1062 
1063 static bool any_waiters(struct drm_i915_private *dev_priv)
1064 {
1065 	struct intel_engine_cs *engine;
1066 
1067 	for_each_engine(engine, dev_priv)
1068 		if (engine->irq_refcount)
1069 			return true;
1070 
1071 	return false;
1072 }
1073 
1074 static void gen6_pm_rps_work(struct work_struct *work)
1075 {
1076 	struct drm_i915_private *dev_priv =
1077 		container_of(work, struct drm_i915_private, rps.work);
1078 	bool client_boost;
1079 	int new_delay, adj, min, max;
1080 	u32 pm_iir;
1081 
1082 	spin_lock_irq(&dev_priv->irq_lock);
1083 	/* Speed up work cancelation during disabling rps interrupts. */
1084 	if (!dev_priv->rps.interrupts_enabled) {
1085 		spin_unlock_irq(&dev_priv->irq_lock);
1086 		return;
1087 	}
1088 
1089 	/*
1090 	 * The RPS work is synced during runtime suspend, we don't require a
1091 	 * wakeref. TODO: instead of disabling the asserts make sure that we
1092 	 * always hold an RPM reference while the work is running.
1093 	 */
1094 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1095 
1096 	pm_iir = dev_priv->rps.pm_iir;
1097 	dev_priv->rps.pm_iir = 0;
1098 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1099 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1100 	client_boost = dev_priv->rps.client_boost;
1101 	dev_priv->rps.client_boost = false;
1102 	spin_unlock_irq(&dev_priv->irq_lock);
1103 
1104 	/* Make sure we didn't queue anything we're not going to process. */
1105 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1106 
1107 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1108 		goto out;
1109 
1110 	mutex_lock(&dev_priv->rps.hw_lock);
1111 
1112 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1113 
1114 	adj = dev_priv->rps.last_adj;
1115 	new_delay = dev_priv->rps.cur_freq;
1116 	min = dev_priv->rps.min_freq_softlimit;
1117 	max = dev_priv->rps.max_freq_softlimit;
1118 
1119 	if (client_boost) {
1120 		new_delay = dev_priv->rps.max_freq_softlimit;
1121 		adj = 0;
1122 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1123 		if (adj > 0)
1124 			adj *= 2;
1125 		else /* CHV needs even encode values */
1126 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1127 		/*
1128 		 * For better performance, jump directly
1129 		 * to RPe if we're below it.
1130 		 */
1131 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1132 			new_delay = dev_priv->rps.efficient_freq;
1133 			adj = 0;
1134 		}
1135 	} else if (any_waiters(dev_priv)) {
1136 		adj = 0;
1137 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1138 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1139 			new_delay = dev_priv->rps.efficient_freq;
1140 		else
1141 			new_delay = dev_priv->rps.min_freq_softlimit;
1142 		adj = 0;
1143 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1144 		if (adj < 0)
1145 			adj *= 2;
1146 		else /* CHV needs even encode values */
1147 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1148 	} else { /* unknown event */
1149 		adj = 0;
1150 	}
1151 
1152 	dev_priv->rps.last_adj = adj;
1153 
1154 	/* sysfs frequency interfaces may have snuck in while servicing the
1155 	 * interrupt
1156 	 */
1157 	new_delay += adj;
1158 	new_delay = clamp_t(int, new_delay, min, max);
1159 
1160 	intel_set_rps(dev_priv, new_delay);
1161 
1162 	mutex_unlock(&dev_priv->rps.hw_lock);
1163 out:
1164 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1165 }
1166 
1167 
1168 /**
1169  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1170  * occurred.
1171  * @work: workqueue struct
1172  *
1173  * Doesn't actually do anything except notify userspace. As a consequence of
1174  * this event, userspace should try to remap the bad rows since statistically
1175  * it is likely the same row is more likely to go bad again.
1176  */
1177 static void ivybridge_parity_work(struct work_struct *work)
1178 {
1179 	struct drm_i915_private *dev_priv =
1180 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1181 	u32 error_status, row, bank, subbank;
1182 	char *parity_event[6];
1183 	uint32_t misccpctl;
1184 	uint8_t slice = 0;
1185 
1186 	/* We must turn off DOP level clock gating to access the L3 registers.
1187 	 * In order to prevent a get/put style interface, acquire struct mutex
1188 	 * any time we access those registers.
1189 	 */
1190 	mutex_lock(&dev_priv->dev->struct_mutex);
1191 
1192 	/* If we've screwed up tracking, just let the interrupt fire again */
1193 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1194 		goto out;
1195 
1196 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1197 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1198 	POSTING_READ(GEN7_MISCCPCTL);
1199 
1200 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1201 		i915_reg_t reg;
1202 
1203 		slice--;
1204 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1205 			break;
1206 
1207 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1208 
1209 		reg = GEN7_L3CDERRST1(slice);
1210 
1211 		error_status = I915_READ(reg);
1212 		row = GEN7_PARITY_ERROR_ROW(error_status);
1213 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1214 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1215 
1216 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1217 		POSTING_READ(reg);
1218 
1219 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1220 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1221 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1222 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1223 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1224 		parity_event[5] = NULL;
1225 
1226 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1227 				   KOBJ_CHANGE, parity_event);
1228 
1229 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1230 			  slice, row, bank, subbank);
1231 
1232 		kfree(parity_event[4]);
1233 		kfree(parity_event[3]);
1234 		kfree(parity_event[2]);
1235 		kfree(parity_event[1]);
1236 	}
1237 
1238 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1239 
1240 out:
1241 	WARN_ON(dev_priv->l3_parity.which_slice);
1242 	spin_lock_irq(&dev_priv->irq_lock);
1243 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1244 	spin_unlock_irq(&dev_priv->irq_lock);
1245 
1246 	mutex_unlock(&dev_priv->dev->struct_mutex);
1247 }
1248 
1249 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1250 					       u32 iir)
1251 {
1252 	if (!HAS_L3_DPF(dev_priv))
1253 		return;
1254 
1255 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1256 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1257 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1258 
1259 	iir &= GT_PARITY_ERROR(dev_priv);
1260 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1261 		dev_priv->l3_parity.which_slice |= 1 << 1;
1262 
1263 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1264 		dev_priv->l3_parity.which_slice |= 1 << 0;
1265 
1266 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1267 }
1268 
1269 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1270 			       u32 gt_iir)
1271 {
1272 	if (gt_iir &
1273 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1274 		notify_ring(&dev_priv->engine[RCS]);
1275 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1276 		notify_ring(&dev_priv->engine[VCS]);
1277 }
1278 
1279 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1280 			       u32 gt_iir)
1281 {
1282 
1283 	if (gt_iir &
1284 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1285 		notify_ring(&dev_priv->engine[RCS]);
1286 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1287 		notify_ring(&dev_priv->engine[VCS]);
1288 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1289 		notify_ring(&dev_priv->engine[BCS]);
1290 
1291 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1292 		      GT_BSD_CS_ERROR_INTERRUPT |
1293 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1294 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1295 
1296 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1297 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1298 }
1299 
1300 static __always_inline void
1301 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1302 {
1303 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1304 		notify_ring(engine);
1305 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1306 		tasklet_schedule(&engine->irq_tasklet);
1307 }
1308 
1309 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1310 				   u32 master_ctl,
1311 				   u32 gt_iir[4])
1312 {
1313 	irqreturn_t ret = IRQ_NONE;
1314 
1315 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1316 		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1317 		if (gt_iir[0]) {
1318 			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1319 			ret = IRQ_HANDLED;
1320 		} else
1321 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1322 	}
1323 
1324 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1325 		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1326 		if (gt_iir[1]) {
1327 			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1328 			ret = IRQ_HANDLED;
1329 		} else
1330 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1331 	}
1332 
1333 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1334 		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1335 		if (gt_iir[3]) {
1336 			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1337 			ret = IRQ_HANDLED;
1338 		} else
1339 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1340 	}
1341 
1342 	if (master_ctl & GEN8_GT_PM_IRQ) {
1343 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1344 		if (gt_iir[2] & dev_priv->pm_rps_events) {
1345 			I915_WRITE_FW(GEN8_GT_IIR(2),
1346 				      gt_iir[2] & dev_priv->pm_rps_events);
1347 			ret = IRQ_HANDLED;
1348 		} else
1349 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1350 	}
1351 
1352 	return ret;
1353 }
1354 
1355 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1356 				u32 gt_iir[4])
1357 {
1358 	if (gt_iir[0]) {
1359 		gen8_cs_irq_handler(&dev_priv->engine[RCS],
1360 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1361 		gen8_cs_irq_handler(&dev_priv->engine[BCS],
1362 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1363 	}
1364 
1365 	if (gt_iir[1]) {
1366 		gen8_cs_irq_handler(&dev_priv->engine[VCS],
1367 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1368 		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1369 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1370 	}
1371 
1372 	if (gt_iir[3])
1373 		gen8_cs_irq_handler(&dev_priv->engine[VECS],
1374 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1375 
1376 	if (gt_iir[2] & dev_priv->pm_rps_events)
1377 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1378 }
1379 
1380 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1381 {
1382 	switch (port) {
1383 	case PORT_A:
1384 		return val & PORTA_HOTPLUG_LONG_DETECT;
1385 	case PORT_B:
1386 		return val & PORTB_HOTPLUG_LONG_DETECT;
1387 	case PORT_C:
1388 		return val & PORTC_HOTPLUG_LONG_DETECT;
1389 	default:
1390 		return false;
1391 	}
1392 }
1393 
1394 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1395 {
1396 	switch (port) {
1397 	case PORT_E:
1398 		return val & PORTE_HOTPLUG_LONG_DETECT;
1399 	default:
1400 		return false;
1401 	}
1402 }
1403 
1404 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1405 {
1406 	switch (port) {
1407 	case PORT_A:
1408 		return val & PORTA_HOTPLUG_LONG_DETECT;
1409 	case PORT_B:
1410 		return val & PORTB_HOTPLUG_LONG_DETECT;
1411 	case PORT_C:
1412 		return val & PORTC_HOTPLUG_LONG_DETECT;
1413 	case PORT_D:
1414 		return val & PORTD_HOTPLUG_LONG_DETECT;
1415 	default:
1416 		return false;
1417 	}
1418 }
1419 
1420 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1421 {
1422 	switch (port) {
1423 	case PORT_A:
1424 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1425 	default:
1426 		return false;
1427 	}
1428 }
1429 
1430 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1431 {
1432 	switch (port) {
1433 	case PORT_B:
1434 		return val & PORTB_HOTPLUG_LONG_DETECT;
1435 	case PORT_C:
1436 		return val & PORTC_HOTPLUG_LONG_DETECT;
1437 	case PORT_D:
1438 		return val & PORTD_HOTPLUG_LONG_DETECT;
1439 	default:
1440 		return false;
1441 	}
1442 }
1443 
1444 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1445 {
1446 	switch (port) {
1447 	case PORT_B:
1448 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1449 	case PORT_C:
1450 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1451 	case PORT_D:
1452 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1453 	default:
1454 		return false;
1455 	}
1456 }
1457 
1458 /*
1459  * Get a bit mask of pins that have triggered, and which ones may be long.
1460  * This can be called multiple times with the same masks to accumulate
1461  * hotplug detection results from several registers.
1462  *
1463  * Note that the caller is expected to zero out the masks initially.
1464  */
1465 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1466 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1467 			     const u32 hpd[HPD_NUM_PINS],
1468 			     bool long_pulse_detect(enum port port, u32 val))
1469 {
1470 	enum port port;
1471 	int i;
1472 
1473 	for_each_hpd_pin(i) {
1474 		if ((hpd[i] & hotplug_trigger) == 0)
1475 			continue;
1476 
1477 		*pin_mask |= BIT(i);
1478 
1479 		if (!intel_hpd_pin_to_port(i, &port))
1480 			continue;
1481 
1482 		if (long_pulse_detect(port, dig_hotplug_reg))
1483 			*long_mask |= BIT(i);
1484 	}
1485 
1486 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1487 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1488 
1489 }
1490 
1491 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1492 {
1493 	wake_up_all(&dev_priv->gmbus_wait_queue);
1494 }
1495 
1496 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1497 {
1498 	wake_up_all(&dev_priv->gmbus_wait_queue);
1499 }
1500 
1501 #if defined(CONFIG_DEBUG_FS)
1502 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1503 					 enum i915_pipe pipe,
1504 					 uint32_t crc0, uint32_t crc1,
1505 					 uint32_t crc2, uint32_t crc3,
1506 					 uint32_t crc4)
1507 {
1508 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1509 	struct intel_pipe_crc_entry *entry;
1510 	int head, tail;
1511 
1512 	spin_lock(&pipe_crc->lock);
1513 
1514 	if (!pipe_crc->entries) {
1515 		spin_unlock(&pipe_crc->lock);
1516 		DRM_DEBUG_KMS("spurious interrupt\n");
1517 		return;
1518 	}
1519 
1520 	head = pipe_crc->head;
1521 	tail = pipe_crc->tail;
1522 
1523 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1524 		spin_unlock(&pipe_crc->lock);
1525 		DRM_ERROR("CRC buffer overflowing\n");
1526 		return;
1527 	}
1528 
1529 	entry = &pipe_crc->entries[head];
1530 
1531 	entry->frame = dev_priv->dev->driver->get_vblank_counter(dev_priv->dev,
1532 								 pipe);
1533 	entry->crc[0] = crc0;
1534 	entry->crc[1] = crc1;
1535 	entry->crc[2] = crc2;
1536 	entry->crc[3] = crc3;
1537 	entry->crc[4] = crc4;
1538 
1539 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1540 	pipe_crc->head = head;
1541 
1542 	spin_unlock(&pipe_crc->lock);
1543 
1544 	wake_up_interruptible(&pipe_crc->wq);
1545 }
1546 #else
1547 static inline void
1548 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1549 			     enum i915_pipe pipe,
1550 			     uint32_t crc0, uint32_t crc1,
1551 			     uint32_t crc2, uint32_t crc3,
1552 			     uint32_t crc4) {}
1553 #endif
1554 
1555 
1556 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1557 				     enum i915_pipe pipe)
1558 {
1559 	display_pipe_crc_irq_handler(dev_priv, pipe,
1560 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1561 				     0, 0, 0, 0);
1562 }
1563 
1564 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1565 				     enum i915_pipe pipe)
1566 {
1567 	display_pipe_crc_irq_handler(dev_priv, pipe,
1568 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1569 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1570 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1571 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1572 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1573 }
1574 
1575 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1576 				      enum i915_pipe pipe)
1577 {
1578 	uint32_t res1, res2;
1579 
1580 	if (INTEL_GEN(dev_priv) >= 3)
1581 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1582 	else
1583 		res1 = 0;
1584 
1585 	if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1586 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1587 	else
1588 		res2 = 0;
1589 
1590 	display_pipe_crc_irq_handler(dev_priv, pipe,
1591 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1592 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1593 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1594 				     res1, res2);
1595 }
1596 
1597 /* The RPS events need forcewake, so we add them to a work queue and mask their
1598  * IMR bits until the work is done. Other interrupts can be processed without
1599  * the work queue. */
1600 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1601 {
1602 	if (pm_iir & dev_priv->pm_rps_events) {
1603 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1604 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1605 		if (dev_priv->rps.interrupts_enabled) {
1606 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1607 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1608 		}
1609 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1610 	}
1611 
1612 	if (INTEL_INFO(dev_priv)->gen >= 8)
1613 		return;
1614 
1615 	if (HAS_VEBOX(dev_priv)) {
1616 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1617 			notify_ring(&dev_priv->engine[VECS]);
1618 
1619 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1620 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1621 	}
1622 }
1623 
1624 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1625 				     enum i915_pipe pipe)
1626 {
1627 	bool ret;
1628 
1629 	ret = drm_handle_vblank(dev_priv->dev, pipe);
1630 	if (ret)
1631 		intel_finish_page_flip_mmio(dev_priv, pipe);
1632 
1633 	return ret;
1634 }
1635 
1636 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1637 					u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1638 {
1639 	int pipe;
1640 
1641 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1642 
1643 	if (!dev_priv->display_irqs_enabled) {
1644 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1645 		return;
1646 	}
1647 
1648 	for_each_pipe(dev_priv, pipe) {
1649 		i915_reg_t reg;
1650 		u32 mask, iir_bit = 0;
1651 
1652 		/*
1653 		 * PIPESTAT bits get signalled even when the interrupt is
1654 		 * disabled with the mask bits, and some of the status bits do
1655 		 * not generate interrupts at all (like the underrun bit). Hence
1656 		 * we need to be careful that we only handle what we want to
1657 		 * handle.
1658 		 */
1659 
1660 		/* fifo underruns are filterered in the underrun handler. */
1661 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1662 
1663 		switch (pipe) {
1664 		case PIPE_A:
1665 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1666 			break;
1667 		case PIPE_B:
1668 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1669 			break;
1670 		case PIPE_C:
1671 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1672 			break;
1673 		}
1674 		if (iir & iir_bit)
1675 			mask |= dev_priv->pipestat_irq_mask[pipe];
1676 
1677 		if (!mask)
1678 			continue;
1679 
1680 		reg = PIPESTAT(pipe);
1681 		mask |= PIPESTAT_INT_ENABLE_MASK;
1682 		pipe_stats[pipe] = I915_READ(reg) & mask;
1683 
1684 		/*
1685 		 * Clear the PIPE*STAT regs before the IIR
1686 		 */
1687 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1688 					PIPESTAT_INT_STATUS_MASK))
1689 			I915_WRITE(reg, pipe_stats[pipe]);
1690 	}
1691 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1692 }
1693 
1694 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1695 					    u32 pipe_stats[I915_MAX_PIPES])
1696 {
1697 	enum i915_pipe pipe;
1698 
1699 	for_each_pipe(dev_priv, pipe) {
1700 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1701 		    intel_pipe_handle_vblank(dev_priv, pipe))
1702 			intel_check_page_flip(dev_priv, pipe);
1703 
1704 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1705 			intel_finish_page_flip_cs(dev_priv, pipe);
1706 
1707 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1708 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1709 
1710 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1711 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1712 	}
1713 
1714 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1715 		gmbus_irq_handler(dev_priv);
1716 }
1717 
1718 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1719 {
1720 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1721 
1722 	if (hotplug_status)
1723 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1724 
1725 	return hotplug_status;
1726 }
1727 
1728 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1729 				 u32 hotplug_status)
1730 {
1731 	u32 pin_mask = 0, long_mask = 0;
1732 
1733 	if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1734 	    IS_CHERRYVIEW(dev_priv)) {
1735 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1736 
1737 		if (hotplug_trigger) {
1738 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1739 					   hotplug_trigger, hpd_status_g4x,
1740 					   i9xx_port_hotplug_long_detect);
1741 
1742 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1743 		}
1744 
1745 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1746 			dp_aux_irq_handler(dev_priv);
1747 	} else {
1748 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1749 
1750 		if (hotplug_trigger) {
1751 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1752 					   hotplug_trigger, hpd_status_i915,
1753 					   i9xx_port_hotplug_long_detect);
1754 			intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1755 		}
1756 	}
1757 }
1758 
1759 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1760 {
1761 	struct drm_device *dev = arg;
1762 	struct drm_i915_private *dev_priv = dev->dev_private;
1763 	irqreturn_t ret = IRQ_NONE;
1764 
1765 	if (!intel_irqs_enabled(dev_priv))
1766 		return IRQ_NONE;
1767 
1768 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1769 	disable_rpm_wakeref_asserts(dev_priv);
1770 
1771 	do {
1772 		u32 iir, gt_iir, pm_iir;
1773 		u32 pipe_stats[I915_MAX_PIPES] = {};
1774 		u32 hotplug_status = 0;
1775 		u32 ier = 0;
1776 
1777 		gt_iir = I915_READ(GTIIR);
1778 		pm_iir = I915_READ(GEN6_PMIIR);
1779 		iir = I915_READ(VLV_IIR);
1780 
1781 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1782 			break;
1783 
1784 		ret = IRQ_HANDLED;
1785 
1786 		/*
1787 		 * Theory on interrupt generation, based on empirical evidence:
1788 		 *
1789 		 * x = ((VLV_IIR & VLV_IER) ||
1790 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1791 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1792 		 *
1793 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1794 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1795 		 * guarantee the CPU interrupt will be raised again even if we
1796 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1797 		 * bits this time around.
1798 		 */
1799 		I915_WRITE(VLV_MASTER_IER, 0);
1800 		ier = I915_READ(VLV_IER);
1801 		I915_WRITE(VLV_IER, 0);
1802 
1803 		if (gt_iir)
1804 			I915_WRITE(GTIIR, gt_iir);
1805 		if (pm_iir)
1806 			I915_WRITE(GEN6_PMIIR, pm_iir);
1807 
1808 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1809 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1810 
1811 		/* Call regardless, as some status bits might not be
1812 		 * signalled in iir */
1813 		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1814 
1815 		/*
1816 		 * VLV_IIR is single buffered, and reflects the level
1817 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1818 		 */
1819 		if (iir)
1820 			I915_WRITE(VLV_IIR, iir);
1821 
1822 		I915_WRITE(VLV_IER, ier);
1823 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1824 		POSTING_READ(VLV_MASTER_IER);
1825 
1826 		if (gt_iir)
1827 			snb_gt_irq_handler(dev_priv, gt_iir);
1828 		if (pm_iir)
1829 			gen6_rps_irq_handler(dev_priv, pm_iir);
1830 
1831 		if (hotplug_status)
1832 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1833 
1834 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1835 	} while (0);
1836 
1837 	enable_rpm_wakeref_asserts(dev_priv);
1838 
1839 	return ret;
1840 }
1841 
1842 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1843 {
1844 	struct drm_device *dev = arg;
1845 	struct drm_i915_private *dev_priv = dev->dev_private;
1846 	irqreturn_t ret = IRQ_NONE;
1847 
1848 	if (!intel_irqs_enabled(dev_priv))
1849 		return IRQ_NONE;
1850 
1851 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1852 	disable_rpm_wakeref_asserts(dev_priv);
1853 
1854 	do {
1855 		u32 master_ctl, iir;
1856 		u32 gt_iir[4] = {};
1857 		u32 pipe_stats[I915_MAX_PIPES] = {};
1858 		u32 hotplug_status = 0;
1859 		u32 ier = 0;
1860 
1861 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1862 		iir = I915_READ(VLV_IIR);
1863 
1864 		if (master_ctl == 0 && iir == 0)
1865 			break;
1866 
1867 		ret = IRQ_HANDLED;
1868 
1869 		/*
1870 		 * Theory on interrupt generation, based on empirical evidence:
1871 		 *
1872 		 * x = ((VLV_IIR & VLV_IER) ||
1873 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1874 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1875 		 *
1876 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1877 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1878 		 * guarantee the CPU interrupt will be raised again even if we
1879 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1880 		 * bits this time around.
1881 		 */
1882 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1883 		ier = I915_READ(VLV_IER);
1884 		I915_WRITE(VLV_IER, 0);
1885 
1886 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1887 
1888 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1889 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1890 
1891 		/* Call regardless, as some status bits might not be
1892 		 * signalled in iir */
1893 		valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1894 
1895 		/*
1896 		 * VLV_IIR is single buffered, and reflects the level
1897 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1898 		 */
1899 		if (iir)
1900 			I915_WRITE(VLV_IIR, iir);
1901 
1902 		I915_WRITE(VLV_IER, ier);
1903 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1904 		POSTING_READ(GEN8_MASTER_IRQ);
1905 
1906 		gen8_gt_irq_handler(dev_priv, gt_iir);
1907 
1908 		if (hotplug_status)
1909 			i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1910 
1911 		valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1912 	} while (0);
1913 
1914 	enable_rpm_wakeref_asserts(dev_priv);
1915 
1916 	return ret;
1917 }
1918 
1919 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1920 				u32 hotplug_trigger,
1921 				const u32 hpd[HPD_NUM_PINS])
1922 {
1923 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1924 
1925 	/*
1926 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1927 	 * unless we touch the hotplug register, even if hotplug_trigger is
1928 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1929 	 * errors.
1930 	 */
1931 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1932 	if (!hotplug_trigger) {
1933 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1934 			PORTD_HOTPLUG_STATUS_MASK |
1935 			PORTC_HOTPLUG_STATUS_MASK |
1936 			PORTB_HOTPLUG_STATUS_MASK;
1937 		dig_hotplug_reg &= ~mask;
1938 	}
1939 
1940 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1941 	if (!hotplug_trigger)
1942 		return;
1943 
1944 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1945 			   dig_hotplug_reg, hpd,
1946 			   pch_port_hotplug_long_detect);
1947 
1948 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1949 }
1950 
1951 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1952 {
1953 	int pipe;
1954 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1955 
1956 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1957 
1958 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1959 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1960 			       SDE_AUDIO_POWER_SHIFT);
1961 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1962 				 port_name(port));
1963 	}
1964 
1965 	if (pch_iir & SDE_AUX_MASK)
1966 		dp_aux_irq_handler(dev_priv);
1967 
1968 	if (pch_iir & SDE_GMBUS)
1969 		gmbus_irq_handler(dev_priv);
1970 
1971 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1972 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1973 
1974 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1975 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1976 
1977 	if (pch_iir & SDE_POISON)
1978 		DRM_ERROR("PCH poison interrupt\n");
1979 
1980 	if (pch_iir & SDE_FDI_MASK)
1981 		for_each_pipe(dev_priv, pipe)
1982 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1983 					 pipe_name(pipe),
1984 					 I915_READ(FDI_RX_IIR(pipe)));
1985 
1986 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1987 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1988 
1989 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1990 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1991 
1992 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1993 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1994 
1995 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1996 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1997 }
1998 
1999 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2000 {
2001 	u32 err_int = I915_READ(GEN7_ERR_INT);
2002 	enum i915_pipe pipe;
2003 
2004 	if (err_int & ERR_INT_POISON)
2005 		DRM_ERROR("Poison interrupt\n");
2006 
2007 	for_each_pipe(dev_priv, pipe) {
2008 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2009 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2010 
2011 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2012 			if (IS_IVYBRIDGE(dev_priv))
2013 				ivb_pipe_crc_irq_handler(dev_priv, pipe);
2014 			else
2015 				hsw_pipe_crc_irq_handler(dev_priv, pipe);
2016 		}
2017 	}
2018 
2019 	I915_WRITE(GEN7_ERR_INT, err_int);
2020 }
2021 
2022 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2023 {
2024 	u32 serr_int = I915_READ(SERR_INT);
2025 
2026 	if (serr_int & SERR_INT_POISON)
2027 		DRM_ERROR("PCH poison interrupt\n");
2028 
2029 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2030 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2031 
2032 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2033 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2034 
2035 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2036 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2037 
2038 	I915_WRITE(SERR_INT, serr_int);
2039 }
2040 
2041 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2042 {
2043 	int pipe;
2044 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2045 
2046 	ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2047 
2048 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2049 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2050 			       SDE_AUDIO_POWER_SHIFT_CPT);
2051 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2052 				 port_name(port));
2053 	}
2054 
2055 	if (pch_iir & SDE_AUX_MASK_CPT)
2056 		dp_aux_irq_handler(dev_priv);
2057 
2058 	if (pch_iir & SDE_GMBUS_CPT)
2059 		gmbus_irq_handler(dev_priv);
2060 
2061 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2062 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2063 
2064 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2065 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2066 
2067 	if (pch_iir & SDE_FDI_MASK_CPT)
2068 		for_each_pipe(dev_priv, pipe)
2069 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2070 					 pipe_name(pipe),
2071 					 I915_READ(FDI_RX_IIR(pipe)));
2072 
2073 	if (pch_iir & SDE_ERROR_CPT)
2074 		cpt_serr_int_handler(dev_priv);
2075 }
2076 
2077 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2078 {
2079 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2080 		~SDE_PORTE_HOTPLUG_SPT;
2081 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2082 	u32 pin_mask = 0, long_mask = 0;
2083 
2084 	if (hotplug_trigger) {
2085 		u32 dig_hotplug_reg;
2086 
2087 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2088 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2089 
2090 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2091 				   dig_hotplug_reg, hpd_spt,
2092 				   spt_port_hotplug_long_detect);
2093 	}
2094 
2095 	if (hotplug2_trigger) {
2096 		u32 dig_hotplug_reg;
2097 
2098 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2099 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2100 
2101 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2102 				   dig_hotplug_reg, hpd_spt,
2103 				   spt_port_hotplug2_long_detect);
2104 	}
2105 
2106 	if (pin_mask)
2107 		intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2108 
2109 	if (pch_iir & SDE_GMBUS_CPT)
2110 		gmbus_irq_handler(dev_priv);
2111 }
2112 
2113 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2114 				u32 hotplug_trigger,
2115 				const u32 hpd[HPD_NUM_PINS])
2116 {
2117 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2118 
2119 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2120 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2121 
2122 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2123 			   dig_hotplug_reg, hpd,
2124 			   ilk_port_hotplug_long_detect);
2125 
2126 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2127 }
2128 
2129 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2130 				    u32 de_iir)
2131 {
2132 	enum i915_pipe pipe;
2133 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2134 
2135 	if (hotplug_trigger)
2136 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2137 
2138 	if (de_iir & DE_AUX_CHANNEL_A)
2139 		dp_aux_irq_handler(dev_priv);
2140 
2141 	if (de_iir & DE_GSE)
2142 		intel_opregion_asle_intr(dev_priv);
2143 
2144 	if (de_iir & DE_POISON)
2145 		DRM_ERROR("Poison interrupt\n");
2146 
2147 	for_each_pipe(dev_priv, pipe) {
2148 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2149 		    intel_pipe_handle_vblank(dev_priv, pipe))
2150 			intel_check_page_flip(dev_priv, pipe);
2151 
2152 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2153 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2154 
2155 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2156 			i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2157 
2158 		/* plane/pipes map 1:1 on ilk+ */
2159 		if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2160 			intel_finish_page_flip_cs(dev_priv, pipe);
2161 	}
2162 
2163 	/* check event from PCH */
2164 	if (de_iir & DE_PCH_EVENT) {
2165 		u32 pch_iir = I915_READ(SDEIIR);
2166 
2167 		if (HAS_PCH_CPT(dev_priv))
2168 			cpt_irq_handler(dev_priv, pch_iir);
2169 		else
2170 			ibx_irq_handler(dev_priv, pch_iir);
2171 
2172 		/* should clear PCH hotplug event before clear CPU irq */
2173 		I915_WRITE(SDEIIR, pch_iir);
2174 	}
2175 
2176 	if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2177 		ironlake_rps_change_irq_handler(dev_priv);
2178 }
2179 
2180 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2181 				    u32 de_iir)
2182 {
2183 	enum i915_pipe pipe;
2184 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2185 
2186 	if (hotplug_trigger)
2187 		ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2188 
2189 	if (de_iir & DE_ERR_INT_IVB)
2190 		ivb_err_int_handler(dev_priv);
2191 
2192 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2193 		dp_aux_irq_handler(dev_priv);
2194 
2195 	if (de_iir & DE_GSE_IVB)
2196 		intel_opregion_asle_intr(dev_priv);
2197 
2198 	for_each_pipe(dev_priv, pipe) {
2199 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2200 		    intel_pipe_handle_vblank(dev_priv, pipe))
2201 			intel_check_page_flip(dev_priv, pipe);
2202 
2203 		/* plane/pipes map 1:1 on ilk+ */
2204 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2205 			intel_finish_page_flip_cs(dev_priv, pipe);
2206 	}
2207 
2208 	/* check event from PCH */
2209 	if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2210 		u32 pch_iir = I915_READ(SDEIIR);
2211 
2212 		cpt_irq_handler(dev_priv, pch_iir);
2213 
2214 		/* clear PCH hotplug event before clear CPU irq */
2215 		I915_WRITE(SDEIIR, pch_iir);
2216 	}
2217 }
2218 
2219 /*
2220  * To handle irqs with the minimum potential races with fresh interrupts, we:
2221  * 1 - Disable Master Interrupt Control.
2222  * 2 - Find the source(s) of the interrupt.
2223  * 3 - Clear the Interrupt Identity bits (IIR).
2224  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2225  * 5 - Re-enable Master Interrupt Control.
2226  */
2227 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2228 {
2229 	struct drm_device *dev = arg;
2230 	struct drm_i915_private *dev_priv = dev->dev_private;
2231 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2232 	irqreturn_t ret = IRQ_NONE;
2233 
2234 	if (!intel_irqs_enabled(dev_priv))
2235 		return IRQ_NONE;
2236 
2237 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2238 	disable_rpm_wakeref_asserts(dev_priv);
2239 
2240 	/* disable master interrupt before clearing iir  */
2241 	de_ier = I915_READ(DEIER);
2242 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2243 	POSTING_READ(DEIER);
2244 
2245 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2246 	 * interrupts will will be stored on its back queue, and then we'll be
2247 	 * able to process them after we restore SDEIER (as soon as we restore
2248 	 * it, we'll get an interrupt if SDEIIR still has something to process
2249 	 * due to its back queue). */
2250 	if (!HAS_PCH_NOP(dev_priv)) {
2251 		sde_ier = I915_READ(SDEIER);
2252 		I915_WRITE(SDEIER, 0);
2253 		POSTING_READ(SDEIER);
2254 	}
2255 
2256 	/* Find, clear, then process each source of interrupt */
2257 
2258 	gt_iir = I915_READ(GTIIR);
2259 	if (gt_iir) {
2260 		I915_WRITE(GTIIR, gt_iir);
2261 		ret = IRQ_HANDLED;
2262 		if (INTEL_GEN(dev_priv) >= 6)
2263 			snb_gt_irq_handler(dev_priv, gt_iir);
2264 		else
2265 			ilk_gt_irq_handler(dev_priv, gt_iir);
2266 	}
2267 
2268 	de_iir = I915_READ(DEIIR);
2269 	if (de_iir) {
2270 		I915_WRITE(DEIIR, de_iir);
2271 		ret = IRQ_HANDLED;
2272 		if (INTEL_GEN(dev_priv) >= 7)
2273 			ivb_display_irq_handler(dev_priv, de_iir);
2274 		else
2275 			ilk_display_irq_handler(dev_priv, de_iir);
2276 	}
2277 
2278 	if (INTEL_GEN(dev_priv) >= 6) {
2279 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2280 		if (pm_iir) {
2281 			I915_WRITE(GEN6_PMIIR, pm_iir);
2282 			ret = IRQ_HANDLED;
2283 			gen6_rps_irq_handler(dev_priv, pm_iir);
2284 		}
2285 	}
2286 
2287 	I915_WRITE(DEIER, de_ier);
2288 	POSTING_READ(DEIER);
2289 	if (!HAS_PCH_NOP(dev_priv)) {
2290 		I915_WRITE(SDEIER, sde_ier);
2291 		POSTING_READ(SDEIER);
2292 	}
2293 
2294 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2295 	enable_rpm_wakeref_asserts(dev_priv);
2296 
2297 	return ret;
2298 }
2299 
2300 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2301 				u32 hotplug_trigger,
2302 				const u32 hpd[HPD_NUM_PINS])
2303 {
2304 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2305 
2306 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2307 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2308 
2309 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2310 			   dig_hotplug_reg, hpd,
2311 			   bxt_port_hotplug_long_detect);
2312 
2313 	intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2314 }
2315 
2316 static irqreturn_t
2317 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2318 {
2319 	irqreturn_t ret = IRQ_NONE;
2320 	u32 iir;
2321 	enum i915_pipe pipe;
2322 
2323 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2324 		iir = I915_READ(GEN8_DE_MISC_IIR);
2325 		if (iir) {
2326 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2327 			ret = IRQ_HANDLED;
2328 			if (iir & GEN8_DE_MISC_GSE)
2329 				intel_opregion_asle_intr(dev_priv);
2330 			else
2331 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2332 		}
2333 		else
2334 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2335 	}
2336 
2337 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2338 		iir = I915_READ(GEN8_DE_PORT_IIR);
2339 		if (iir) {
2340 			u32 tmp_mask;
2341 			bool found = false;
2342 
2343 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2344 			ret = IRQ_HANDLED;
2345 
2346 			tmp_mask = GEN8_AUX_CHANNEL_A;
2347 			if (INTEL_INFO(dev_priv)->gen >= 9)
2348 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2349 					    GEN9_AUX_CHANNEL_C |
2350 					    GEN9_AUX_CHANNEL_D;
2351 
2352 			if (iir & tmp_mask) {
2353 				dp_aux_irq_handler(dev_priv);
2354 				found = true;
2355 			}
2356 
2357 			if (IS_BROXTON(dev_priv)) {
2358 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2359 				if (tmp_mask) {
2360 					bxt_hpd_irq_handler(dev_priv, tmp_mask,
2361 							    hpd_bxt);
2362 					found = true;
2363 				}
2364 			} else if (IS_BROADWELL(dev_priv)) {
2365 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2366 				if (tmp_mask) {
2367 					ilk_hpd_irq_handler(dev_priv,
2368 							    tmp_mask, hpd_bdw);
2369 					found = true;
2370 				}
2371 			}
2372 
2373 			if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2374 				gmbus_irq_handler(dev_priv);
2375 				found = true;
2376 			}
2377 
2378 			if (!found)
2379 				DRM_ERROR("Unexpected DE Port interrupt\n");
2380 		}
2381 		else
2382 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2383 	}
2384 
2385 	for_each_pipe(dev_priv, pipe) {
2386 		u32 flip_done, fault_errors;
2387 
2388 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2389 			continue;
2390 
2391 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2392 		if (!iir) {
2393 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2394 			continue;
2395 		}
2396 
2397 		ret = IRQ_HANDLED;
2398 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2399 
2400 		if (iir & GEN8_PIPE_VBLANK &&
2401 		    intel_pipe_handle_vblank(dev_priv, pipe))
2402 			intel_check_page_flip(dev_priv, pipe);
2403 
2404 		flip_done = iir;
2405 		if (INTEL_INFO(dev_priv)->gen >= 9)
2406 			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2407 		else
2408 			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2409 
2410 		if (flip_done)
2411 			intel_finish_page_flip_cs(dev_priv, pipe);
2412 
2413 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2414 			hsw_pipe_crc_irq_handler(dev_priv, pipe);
2415 
2416 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2417 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2418 
2419 		fault_errors = iir;
2420 		if (INTEL_INFO(dev_priv)->gen >= 9)
2421 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2422 		else
2423 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2424 
2425 		if (fault_errors)
2426 			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2427 				  pipe_name(pipe),
2428 				  fault_errors);
2429 	}
2430 
2431 	if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2432 	    master_ctl & GEN8_DE_PCH_IRQ) {
2433 		/*
2434 		 * FIXME(BDW): Assume for now that the new interrupt handling
2435 		 * scheme also closed the SDE interrupt handling race we've seen
2436 		 * on older pch-split platforms. But this needs testing.
2437 		 */
2438 		iir = I915_READ(SDEIIR);
2439 		if (iir) {
2440 			I915_WRITE(SDEIIR, iir);
2441 			ret = IRQ_HANDLED;
2442 
2443 			if (HAS_PCH_SPT(dev_priv))
2444 				spt_irq_handler(dev_priv, iir);
2445 			else
2446 				cpt_irq_handler(dev_priv, iir);
2447 		} else {
2448 			/*
2449 			 * Like on previous PCH there seems to be something
2450 			 * fishy going on with forwarding PCH interrupts.
2451 			 */
2452 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2453 		}
2454 	}
2455 
2456 	return ret;
2457 }
2458 
2459 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2460 {
2461 	struct drm_device *dev = arg;
2462 	struct drm_i915_private *dev_priv = dev->dev_private;
2463 	u32 master_ctl;
2464 	u32 gt_iir[4] = {};
2465 	irqreturn_t ret;
2466 
2467 	if (!intel_irqs_enabled(dev_priv))
2468 		return IRQ_NONE;
2469 
2470 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2471 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2472 	if (!master_ctl)
2473 		return IRQ_NONE;
2474 
2475 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2476 
2477 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2478 	disable_rpm_wakeref_asserts(dev_priv);
2479 
2480 	/* Find, clear, then process each source of interrupt */
2481 	ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2482 	gen8_gt_irq_handler(dev_priv, gt_iir);
2483 	ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2484 
2485 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2486 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2487 
2488 	enable_rpm_wakeref_asserts(dev_priv);
2489 
2490 	return ret;
2491 }
2492 
2493 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2494 {
2495 	/*
2496 	 * Notify all waiters for GPU completion events that reset state has
2497 	 * been changed, and that they need to restart their wait after
2498 	 * checking for potential errors (and bail out to drop locks if there is
2499 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2500 	 */
2501 
2502 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2503 	wake_up_all(&dev_priv->gpu_error.wait_queue);
2504 
2505 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2506 	wake_up_all(&dev_priv->pending_flip_queue);
2507 }
2508 
2509 /**
2510  * i915_reset_and_wakeup - do process context error handling work
2511  * @dev_priv: i915 device private
2512  *
2513  * Fire an error uevent so userspace can see that a hang or error
2514  * was detected.
2515  */
2516 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2517 {
2518 	struct kobject *kobj = &dev_priv->dev->primary->kdev->kobj;
2519 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2520 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2521 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2522 	int ret;
2523 
2524 	kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2525 
2526 	/*
2527 	 * Note that there's only one work item which does gpu resets, so we
2528 	 * need not worry about concurrent gpu resets potentially incrementing
2529 	 * error->reset_counter twice. We only need to take care of another
2530 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2531 	 * quick check for that is good enough: schedule_work ensures the
2532 	 * correct ordering between hang detection and this work item, and since
2533 	 * the reset in-progress bit is only ever set by code outside of this
2534 	 * work we don't need to worry about any other races.
2535 	 */
2536 	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2537 		DRM_DEBUG_DRIVER("resetting chip\n");
2538 		kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2539 
2540 		/*
2541 		 * In most cases it's guaranteed that we get here with an RPM
2542 		 * reference held, for example because there is a pending GPU
2543 		 * request that won't finish until the reset is done. This
2544 		 * isn't the case at least when we get here by doing a
2545 		 * simulated reset via debugs, so get an RPM reference.
2546 		 */
2547 		intel_runtime_pm_get(dev_priv);
2548 
2549 		intel_prepare_reset(dev_priv);
2550 
2551 		/*
2552 		 * All state reset _must_ be completed before we update the
2553 		 * reset counter, for otherwise waiters might miss the reset
2554 		 * pending state and not properly drop locks, resulting in
2555 		 * deadlocks with the reset work.
2556 		 */
2557 		ret = i915_reset(dev_priv);
2558 
2559 		intel_finish_reset(dev_priv);
2560 
2561 		intel_runtime_pm_put(dev_priv);
2562 
2563 		if (ret == 0)
2564 			kobject_uevent_env(kobj,
2565 					   KOBJ_CHANGE, reset_done_event);
2566 
2567 		/*
2568 		 * Note: The wake_up also serves as a memory barrier so that
2569 		 * waiters see the update value of the reset counter atomic_t.
2570 		 */
2571 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2572 	}
2573 }
2574 
2575 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2576 {
2577 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2578 	u32 eir = I915_READ(EIR);
2579 	int pipe, i;
2580 
2581 	if (!eir)
2582 		return;
2583 
2584 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2585 
2586 	i915_get_extra_instdone(dev_priv, instdone);
2587 
2588 	if (IS_G4X(dev_priv)) {
2589 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2590 			u32 ipeir = I915_READ(IPEIR_I965);
2591 
2592 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2593 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2594 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2595 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2596 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2597 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2598 			I915_WRITE(IPEIR_I965, ipeir);
2599 			POSTING_READ(IPEIR_I965);
2600 		}
2601 		if (eir & GM45_ERROR_PAGE_TABLE) {
2602 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2603 			pr_err("page table error\n");
2604 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2605 			I915_WRITE(PGTBL_ER, pgtbl_err);
2606 			POSTING_READ(PGTBL_ER);
2607 		}
2608 	}
2609 
2610 	if (!IS_GEN2(dev_priv)) {
2611 		if (eir & I915_ERROR_PAGE_TABLE) {
2612 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2613 			pr_err("page table error\n");
2614 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2615 			I915_WRITE(PGTBL_ER, pgtbl_err);
2616 			POSTING_READ(PGTBL_ER);
2617 		}
2618 	}
2619 
2620 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2621 		pr_err("memory refresh error:\n");
2622 		for_each_pipe(dev_priv, pipe)
2623 			pr_err("pipe %c stat: 0x%08x\n",
2624 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2625 		/* pipestat has already been acked */
2626 	}
2627 	if (eir & I915_ERROR_INSTRUCTION) {
2628 		pr_err("instruction error\n");
2629 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2630 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2631 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2632 		if (INTEL_GEN(dev_priv) < 4) {
2633 			u32 ipeir = I915_READ(IPEIR);
2634 
2635 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2636 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2637 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2638 			I915_WRITE(IPEIR, ipeir);
2639 			POSTING_READ(IPEIR);
2640 		} else {
2641 			u32 ipeir = I915_READ(IPEIR_I965);
2642 
2643 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2644 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2645 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2646 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2647 			I915_WRITE(IPEIR_I965, ipeir);
2648 			POSTING_READ(IPEIR_I965);
2649 		}
2650 	}
2651 
2652 	I915_WRITE(EIR, eir);
2653 	POSTING_READ(EIR);
2654 	eir = I915_READ(EIR);
2655 	if (eir) {
2656 		/*
2657 		 * some errors might have become stuck,
2658 		 * mask them.
2659 		 */
2660 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2661 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2662 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2663 	}
2664 }
2665 
2666 /**
2667  * i915_handle_error - handle a gpu error
2668  * @dev_priv: i915 device private
2669  * @engine_mask: mask representing engines that are hung
2670  * Do some basic checking of register state at error time and
2671  * dump it to the syslog.  Also call i915_capture_error_state() to make
2672  * sure we get a record and make it available in debugfs.  Fire a uevent
2673  * so userspace knows something bad happened (should trigger collection
2674  * of a ring dump etc.).
2675  * @fmt: Error message format string
2676  */
2677 void i915_handle_error(struct drm_i915_private *dev_priv,
2678 		       u32 engine_mask,
2679 		       const char *fmt, ...)
2680 {
2681 	va_list args;
2682 	char error_msg[80];
2683 
2684 	va_start(args, fmt);
2685 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2686 	va_end(args);
2687 
2688 	i915_capture_error_state(dev_priv, engine_mask, error_msg);
2689 	i915_report_and_clear_eir(dev_priv);
2690 
2691 	if (engine_mask) {
2692 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2693 				&dev_priv->gpu_error.reset_counter);
2694 
2695 		/*
2696 		 * Wakeup waiting processes so that the reset function
2697 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2698 		 * various locks. By bumping the reset counter first, the woken
2699 		 * processes will see a reset in progress and back off,
2700 		 * releasing their locks and then wait for the reset completion.
2701 		 * We must do this for _all_ gpu waiters that might hold locks
2702 		 * that the reset work needs to acquire.
2703 		 *
2704 		 * Note: The wake_up serves as the required memory barrier to
2705 		 * ensure that the waiters see the updated value of the reset
2706 		 * counter atomic_t.
2707 		 */
2708 		i915_error_wake_up(dev_priv);
2709 	}
2710 
2711 	i915_reset_and_wakeup(dev_priv);
2712 }
2713 
2714 /* Called from drm generic code, passed 'crtc' which
2715  * we use as a pipe index
2716  */
2717 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2718 {
2719 	struct drm_i915_private *dev_priv = dev->dev_private;
2720 	unsigned long irqflags;
2721 
2722 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2723 	if (INTEL_INFO(dev)->gen >= 4)
2724 		i915_enable_pipestat(dev_priv, pipe,
2725 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2726 	else
2727 		i915_enable_pipestat(dev_priv, pipe,
2728 				     PIPE_VBLANK_INTERRUPT_STATUS);
2729 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2730 
2731 	return 0;
2732 }
2733 
2734 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2735 {
2736 	struct drm_i915_private *dev_priv = dev->dev_private;
2737 	unsigned long irqflags;
2738 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2739 						     DE_PIPE_VBLANK(pipe);
2740 
2741 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2742 	ilk_enable_display_irq(dev_priv, bit);
2743 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2744 
2745 	return 0;
2746 }
2747 
2748 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2749 {
2750 	struct drm_i915_private *dev_priv = dev->dev_private;
2751 	unsigned long irqflags;
2752 
2753 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2754 	i915_enable_pipestat(dev_priv, pipe,
2755 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2756 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2757 
2758 	return 0;
2759 }
2760 
2761 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2762 {
2763 	struct drm_i915_private *dev_priv = dev->dev_private;
2764 	unsigned long irqflags;
2765 
2766 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2767 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2768 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2769 
2770 	return 0;
2771 }
2772 
2773 /* Called from drm generic code, passed 'crtc' which
2774  * we use as a pipe index
2775  */
2776 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2777 {
2778 	struct drm_i915_private *dev_priv = dev->dev_private;
2779 	unsigned long irqflags;
2780 
2781 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2782 	i915_disable_pipestat(dev_priv, pipe,
2783 			      PIPE_VBLANK_INTERRUPT_STATUS |
2784 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2785 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2786 }
2787 
2788 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2789 {
2790 	struct drm_i915_private *dev_priv = dev->dev_private;
2791 	unsigned long irqflags;
2792 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2793 						     DE_PIPE_VBLANK(pipe);
2794 
2795 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2796 	ilk_disable_display_irq(dev_priv, bit);
2797 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2798 }
2799 
2800 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2801 {
2802 	struct drm_i915_private *dev_priv = dev->dev_private;
2803 	unsigned long irqflags;
2804 
2805 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2806 	i915_disable_pipestat(dev_priv, pipe,
2807 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2808 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2809 }
2810 
2811 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2812 {
2813 	struct drm_i915_private *dev_priv = dev->dev_private;
2814 	unsigned long irqflags;
2815 
2816 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2817 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2818 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2819 }
2820 
2821 static bool
2822 ring_idle(struct intel_engine_cs *engine, u32 seqno)
2823 {
2824 	return i915_seqno_passed(seqno,
2825 				 READ_ONCE(engine->last_submitted_seqno));
2826 }
2827 
2828 static bool
2829 ipehr_is_semaphore_wait(struct drm_i915_private *dev_priv, u32 ipehr)
2830 {
2831 	if (INTEL_GEN(dev_priv) >= 8) {
2832 		return (ipehr >> 23) == 0x1c;
2833 	} else {
2834 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2835 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2836 				 MI_SEMAPHORE_REGISTER);
2837 	}
2838 }
2839 
2840 static struct intel_engine_cs *
2841 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2842 				 u64 offset)
2843 {
2844 	struct drm_i915_private *dev_priv = engine->i915;
2845 	struct intel_engine_cs *signaller;
2846 
2847 	if (INTEL_GEN(dev_priv) >= 8) {
2848 		for_each_engine(signaller, dev_priv) {
2849 			if (engine == signaller)
2850 				continue;
2851 
2852 			if (offset == signaller->semaphore.signal_ggtt[engine->id])
2853 				return signaller;
2854 		}
2855 	} else {
2856 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2857 
2858 		for_each_engine(signaller, dev_priv) {
2859 			if(engine == signaller)
2860 				continue;
2861 
2862 			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2863 				return signaller;
2864 		}
2865 	}
2866 
2867 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2868 		  engine->id, ipehr, offset);
2869 
2870 	return NULL;
2871 }
2872 
2873 static struct intel_engine_cs *
2874 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2875 {
2876 	struct drm_i915_private *dev_priv = engine->i915;
2877 	u32 cmd, ipehr, head;
2878 	u64 offset = 0;
2879 	int i, backwards;
2880 
2881 	/*
2882 	 * This function does not support execlist mode - any attempt to
2883 	 * proceed further into this function will result in a kernel panic
2884 	 * when dereferencing ring->buffer, which is not set up in execlist
2885 	 * mode.
2886 	 *
2887 	 * The correct way of doing it would be to derive the currently
2888 	 * executing ring buffer from the current context, which is derived
2889 	 * from the currently running request. Unfortunately, to get the
2890 	 * current request we would have to grab the struct_mutex before doing
2891 	 * anything else, which would be ill-advised since some other thread
2892 	 * might have grabbed it already and managed to hang itself, causing
2893 	 * the hang checker to deadlock.
2894 	 *
2895 	 * Therefore, this function does not support execlist mode in its
2896 	 * current form. Just return NULL and move on.
2897 	 */
2898 	if (engine->buffer == NULL)
2899 		return NULL;
2900 
2901 	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2902 	if (!ipehr_is_semaphore_wait(engine->i915, ipehr))
2903 		return NULL;
2904 
2905 	/*
2906 	 * HEAD is likely pointing to the dword after the actual command,
2907 	 * so scan backwards until we find the MBOX. But limit it to just 3
2908 	 * or 4 dwords depending on the semaphore wait command size.
2909 	 * Note that we don't care about ACTHD here since that might
2910 	 * point at at batch, and semaphores are always emitted into the
2911 	 * ringbuffer itself.
2912 	 */
2913 	head = I915_READ_HEAD(engine) & HEAD_ADDR;
2914 	backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2915 
2916 	for (i = backwards; i; --i) {
2917 		/*
2918 		 * Be paranoid and presume the hw has gone off into the wild -
2919 		 * our ring is smaller than what the hardware (and hence
2920 		 * HEAD_ADDR) allows. Also handles wrap-around.
2921 		 */
2922 		head &= engine->buffer->size - 1;
2923 
2924 		/* This here seems to blow up */
2925 		cmd = ioread32(engine->buffer->virtual_start + head);
2926 		if (cmd == ipehr)
2927 			break;
2928 
2929 		head -= 4;
2930 	}
2931 
2932 	if (!i)
2933 		return NULL;
2934 
2935 	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2936 	if (INTEL_GEN(dev_priv) >= 8) {
2937 		offset = ioread32(engine->buffer->virtual_start + head + 12);
2938 		offset <<= 32;
2939 		offset = ioread32(engine->buffer->virtual_start + head + 8);
2940 	}
2941 	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2942 }
2943 
2944 static int semaphore_passed(struct intel_engine_cs *engine)
2945 {
2946 	struct drm_i915_private *dev_priv = engine->i915;
2947 	struct intel_engine_cs *signaller;
2948 	u32 seqno;
2949 
2950 	engine->hangcheck.deadlock++;
2951 
2952 	signaller = semaphore_waits_for(engine, &seqno);
2953 	if (signaller == NULL)
2954 		return -1;
2955 
2956 	/* Prevent pathological recursion due to driver bugs */
2957 	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2958 		return -1;
2959 
2960 	if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
2961 		return 1;
2962 
2963 	/* cursory check for an unkickable deadlock */
2964 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2965 	    semaphore_passed(signaller) < 0)
2966 		return -1;
2967 
2968 	return 0;
2969 }
2970 
2971 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2972 {
2973 	struct intel_engine_cs *engine;
2974 
2975 	for_each_engine(engine, dev_priv)
2976 		engine->hangcheck.deadlock = 0;
2977 }
2978 
2979 static bool subunits_stuck(struct intel_engine_cs *engine)
2980 {
2981 	u32 instdone[I915_NUM_INSTDONE_REG];
2982 	bool stuck;
2983 	int i;
2984 
2985 	if (engine->id != RCS)
2986 		return true;
2987 
2988 	i915_get_extra_instdone(engine->i915, instdone);
2989 
2990 	/* There might be unstable subunit states even when
2991 	 * actual head is not moving. Filter out the unstable ones by
2992 	 * accumulating the undone -> done transitions and only
2993 	 * consider those as progress.
2994 	 */
2995 	stuck = true;
2996 	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2997 		const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
2998 
2999 		if (tmp != engine->hangcheck.instdone[i])
3000 			stuck = false;
3001 
3002 		engine->hangcheck.instdone[i] |= tmp;
3003 	}
3004 
3005 	return stuck;
3006 }
3007 
3008 static enum intel_ring_hangcheck_action
3009 head_stuck(struct intel_engine_cs *engine, u64 acthd)
3010 {
3011 	if (acthd != engine->hangcheck.acthd) {
3012 
3013 		/* Clear subunit states on head movement */
3014 		memset(engine->hangcheck.instdone, 0,
3015 		       sizeof(engine->hangcheck.instdone));
3016 
3017 		return HANGCHECK_ACTIVE;
3018 	}
3019 
3020 	if (!subunits_stuck(engine))
3021 		return HANGCHECK_ACTIVE;
3022 
3023 	return HANGCHECK_HUNG;
3024 }
3025 
3026 static enum intel_ring_hangcheck_action
3027 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3028 {
3029 	struct drm_i915_private *dev_priv = engine->i915;
3030 	enum intel_ring_hangcheck_action ha;
3031 	u32 tmp;
3032 
3033 	ha = head_stuck(engine, acthd);
3034 	if (ha != HANGCHECK_HUNG)
3035 		return ha;
3036 
3037 	if (IS_GEN2(dev_priv))
3038 		return HANGCHECK_HUNG;
3039 
3040 	/* Is the chip hanging on a WAIT_FOR_EVENT?
3041 	 * If so we can simply poke the RB_WAIT bit
3042 	 * and break the hang. This should work on
3043 	 * all but the second generation chipsets.
3044 	 */
3045 	tmp = I915_READ_CTL(engine);
3046 	if (tmp & RING_WAIT) {
3047 		i915_handle_error(dev_priv, 0,
3048 				  "Kicking stuck wait on %s",
3049 				  engine->name);
3050 		I915_WRITE_CTL(engine, tmp);
3051 		return HANGCHECK_KICK;
3052 	}
3053 
3054 	if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3055 		switch (semaphore_passed(engine)) {
3056 		default:
3057 			return HANGCHECK_HUNG;
3058 		case 1:
3059 			i915_handle_error(dev_priv, 0,
3060 					  "Kicking stuck semaphore on %s",
3061 					  engine->name);
3062 			I915_WRITE_CTL(engine, tmp);
3063 			return HANGCHECK_KICK;
3064 		case 0:
3065 			return HANGCHECK_WAIT;
3066 		}
3067 	}
3068 
3069 	return HANGCHECK_HUNG;
3070 }
3071 
3072 static unsigned kick_waiters(struct intel_engine_cs *engine)
3073 {
3074 	struct drm_i915_private *i915 = engine->i915;
3075 	unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3076 
3077 	if (engine->hangcheck.user_interrupts == user_interrupts &&
3078 	    !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3079 		if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
3080 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3081 				  engine->name);
3082 		else
3083 			DRM_INFO("Fake missed irq on %s\n",
3084 				 engine->name);
3085 		wake_up_all(&engine->irq_queue);
3086 	}
3087 
3088 	return user_interrupts;
3089 }
3090 /*
3091  * This is called when the chip hasn't reported back with completed
3092  * batchbuffers in a long time. We keep track per ring seqno progress and
3093  * if there are no progress, hangcheck score for that ring is increased.
3094  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3095  * we kick the ring. If we see no progress on three subsequent calls
3096  * we assume chip is wedged and try to fix it by resetting the chip.
3097  */
3098 static void i915_hangcheck_elapsed(struct work_struct *work)
3099 {
3100 	struct drm_i915_private *dev_priv =
3101 		container_of(work, typeof(*dev_priv),
3102 			     gpu_error.hangcheck_work.work);
3103 	struct intel_engine_cs *engine;
3104 	enum intel_engine_id id;
3105 	int busy_count = 0, rings_hung = 0;
3106 	bool stuck[I915_NUM_ENGINES] = { 0 };
3107 #define BUSY 1
3108 #define KICK 5
3109 #define HUNG 20
3110 #define ACTIVE_DECAY 15
3111 
3112 	if (!i915.enable_hangcheck)
3113 		return;
3114 
3115 	/*
3116 	 * The hangcheck work is synced during runtime suspend, we don't
3117 	 * require a wakeref. TODO: instead of disabling the asserts make
3118 	 * sure that we hold a reference when this work is running.
3119 	 */
3120 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3121 
3122 	/* As enabling the GPU requires fairly extensive mmio access,
3123 	 * periodically arm the mmio checker to see if we are triggering
3124 	 * any invalid access.
3125 	 */
3126 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3127 
3128 	for_each_engine_id(engine, dev_priv, id) {
3129 		bool busy = waitqueue_active(&engine->irq_queue);
3130 		u64 acthd;
3131 		u32 seqno;
3132 		unsigned user_interrupts;
3133 
3134 		semaphore_clear_deadlocks(dev_priv);
3135 
3136 		/* We don't strictly need an irq-barrier here, as we are not
3137 		 * serving an interrupt request, be paranoid in case the
3138 		 * barrier has side-effects (such as preventing a broken
3139 		 * cacheline snoop) and so be sure that we can see the seqno
3140 		 * advance. If the seqno should stick, due to a stale
3141 		 * cacheline, we would erroneously declare the GPU hung.
3142 		 */
3143 		if (engine->irq_seqno_barrier)
3144 			engine->irq_seqno_barrier(engine);
3145 
3146 		acthd = intel_ring_get_active_head(engine);
3147 		seqno = engine->get_seqno(engine);
3148 
3149 		/* Reset stuck interrupts between batch advances */
3150 		user_interrupts = 0;
3151 
3152 		if (engine->hangcheck.seqno == seqno) {
3153 			if (ring_idle(engine, seqno)) {
3154 				engine->hangcheck.action = HANGCHECK_IDLE;
3155 				if (busy) {
3156 					/* Safeguard against driver failure */
3157 					user_interrupts = kick_waiters(engine);
3158 					engine->hangcheck.score += BUSY;
3159 				}
3160 			} else {
3161 				/* We always increment the hangcheck score
3162 				 * if the ring is busy and still processing
3163 				 * the same request, so that no single request
3164 				 * can run indefinitely (such as a chain of
3165 				 * batches). The only time we do not increment
3166 				 * the hangcheck score on this ring, if this
3167 				 * ring is in a legitimate wait for another
3168 				 * ring. In that case the waiting ring is a
3169 				 * victim and we want to be sure we catch the
3170 				 * right culprit. Then every time we do kick
3171 				 * the ring, add a small increment to the
3172 				 * score so that we can catch a batch that is
3173 				 * being repeatedly kicked and so responsible
3174 				 * for stalling the machine.
3175 				 */
3176 				engine->hangcheck.action = ring_stuck(engine,
3177 								      acthd);
3178 
3179 				switch (engine->hangcheck.action) {
3180 				case HANGCHECK_IDLE:
3181 				case HANGCHECK_WAIT:
3182 					break;
3183 				case HANGCHECK_ACTIVE:
3184 					engine->hangcheck.score += BUSY;
3185 					break;
3186 				case HANGCHECK_KICK:
3187 					engine->hangcheck.score += KICK;
3188 					break;
3189 				case HANGCHECK_HUNG:
3190 					engine->hangcheck.score += HUNG;
3191 					stuck[id] = true;
3192 					break;
3193 				}
3194 			}
3195 		} else {
3196 			engine->hangcheck.action = HANGCHECK_ACTIVE;
3197 
3198 			/* Gradually reduce the count so that we catch DoS
3199 			 * attempts across multiple batches.
3200 			 */
3201 			if (engine->hangcheck.score > 0)
3202 				engine->hangcheck.score -= ACTIVE_DECAY;
3203 			if (engine->hangcheck.score < 0)
3204 				engine->hangcheck.score = 0;
3205 
3206 			/* Clear head and subunit states on seqno movement */
3207 			acthd = 0;
3208 
3209 			memset(engine->hangcheck.instdone, 0,
3210 			       sizeof(engine->hangcheck.instdone));
3211 		}
3212 
3213 		engine->hangcheck.seqno = seqno;
3214 		engine->hangcheck.acthd = acthd;
3215 		engine->hangcheck.user_interrupts = user_interrupts;
3216 		busy_count += busy;
3217 	}
3218 
3219 	for_each_engine_id(engine, dev_priv, id) {
3220 		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3221 			DRM_INFO("%s on %s\n",
3222 				 stuck[id] ? "stuck" : "no progress",
3223 				 engine->name);
3224 			rings_hung |= intel_engine_flag(engine);
3225 		}
3226 	}
3227 
3228 	if (rings_hung) {
3229 		i915_handle_error(dev_priv, rings_hung, "Engine(s) hung");
3230 		goto out;
3231 	}
3232 
3233 	/* Reset timer in case GPU hangs without another request being added */
3234 	if (busy_count)
3235 		i915_queue_hangcheck(dev_priv);
3236 
3237 out:
3238 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3239 }
3240 
3241 static void ibx_irq_reset(struct drm_device *dev)
3242 {
3243 	struct drm_i915_private *dev_priv = dev->dev_private;
3244 
3245 	if (HAS_PCH_NOP(dev))
3246 		return;
3247 
3248 	GEN5_IRQ_RESET(SDE);
3249 
3250 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3251 		I915_WRITE(SERR_INT, 0xffffffff);
3252 }
3253 
3254 /*
3255  * SDEIER is also touched by the interrupt handler to work around missed PCH
3256  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3257  * instead we unconditionally enable all PCH interrupt sources here, but then
3258  * only unmask them as needed with SDEIMR.
3259  *
3260  * This function needs to be called before interrupts are enabled.
3261  */
3262 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3263 {
3264 	struct drm_i915_private *dev_priv = dev->dev_private;
3265 
3266 	if (HAS_PCH_NOP(dev))
3267 		return;
3268 
3269 	WARN_ON(I915_READ(SDEIER) != 0);
3270 	I915_WRITE(SDEIER, 0xffffffff);
3271 	POSTING_READ(SDEIER);
3272 }
3273 
3274 static void gen5_gt_irq_reset(struct drm_device *dev)
3275 {
3276 	struct drm_i915_private *dev_priv = dev->dev_private;
3277 
3278 	GEN5_IRQ_RESET(GT);
3279 	if (INTEL_INFO(dev)->gen >= 6)
3280 		GEN5_IRQ_RESET(GEN6_PM);
3281 }
3282 
3283 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3284 {
3285 	enum i915_pipe pipe;
3286 
3287 	if (IS_CHERRYVIEW(dev_priv))
3288 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3289 	else
3290 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3291 
3292 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3293 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3294 
3295 	for_each_pipe(dev_priv, pipe) {
3296 		I915_WRITE(PIPESTAT(pipe),
3297 			   PIPE_FIFO_UNDERRUN_STATUS |
3298 			   PIPESTAT_INT_STATUS_MASK);
3299 		dev_priv->pipestat_irq_mask[pipe] = 0;
3300 	}
3301 
3302 	GEN5_IRQ_RESET(VLV_);
3303 	dev_priv->irq_mask = ~0;
3304 }
3305 
3306 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3307 {
3308 	u32 pipestat_mask;
3309 	u32 enable_mask;
3310 	enum i915_pipe pipe;
3311 
3312 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3313 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3314 
3315 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3316 	for_each_pipe(dev_priv, pipe)
3317 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3318 
3319 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3320 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3321 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3322 	if (IS_CHERRYVIEW(dev_priv))
3323 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3324 
3325 	WARN_ON(dev_priv->irq_mask != ~0);
3326 
3327 	dev_priv->irq_mask = ~enable_mask;
3328 
3329 	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3330 }
3331 
3332 /* drm_dma.h hooks
3333 */
3334 static void ironlake_irq_reset(struct drm_device *dev)
3335 {
3336 	struct drm_i915_private *dev_priv = dev->dev_private;
3337 
3338 	I915_WRITE(HWSTAM, 0xffffffff);
3339 
3340 	GEN5_IRQ_RESET(DE);
3341 	if (IS_GEN7(dev))
3342 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3343 
3344 	gen5_gt_irq_reset(dev);
3345 
3346 	ibx_irq_reset(dev);
3347 }
3348 
3349 static void valleyview_irq_preinstall(struct drm_device *dev)
3350 {
3351 	struct drm_i915_private *dev_priv = dev->dev_private;
3352 
3353 	I915_WRITE(VLV_MASTER_IER, 0);
3354 	POSTING_READ(VLV_MASTER_IER);
3355 
3356 	gen5_gt_irq_reset(dev);
3357 
3358 	spin_lock_irq(&dev_priv->irq_lock);
3359 	if (dev_priv->display_irqs_enabled)
3360 		vlv_display_irq_reset(dev_priv);
3361 	spin_unlock_irq(&dev_priv->irq_lock);
3362 }
3363 
3364 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3365 {
3366 	GEN8_IRQ_RESET_NDX(GT, 0);
3367 	GEN8_IRQ_RESET_NDX(GT, 1);
3368 	GEN8_IRQ_RESET_NDX(GT, 2);
3369 	GEN8_IRQ_RESET_NDX(GT, 3);
3370 }
3371 
3372 static void gen8_irq_reset(struct drm_device *dev)
3373 {
3374 	struct drm_i915_private *dev_priv = dev->dev_private;
3375 	int pipe;
3376 
3377 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3378 	POSTING_READ(GEN8_MASTER_IRQ);
3379 
3380 	gen8_gt_irq_reset(dev_priv);
3381 
3382 	for_each_pipe(dev_priv, pipe)
3383 		if (intel_display_power_is_enabled(dev_priv,
3384 						   POWER_DOMAIN_PIPE(pipe)))
3385 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3386 
3387 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3388 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3389 	GEN5_IRQ_RESET(GEN8_PCU_);
3390 
3391 	if (HAS_PCH_SPLIT(dev))
3392 		ibx_irq_reset(dev);
3393 }
3394 
3395 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3396 				     unsigned int pipe_mask)
3397 {
3398 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3399 	enum i915_pipe pipe;
3400 
3401 	spin_lock_irq(&dev_priv->irq_lock);
3402 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3403 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3404 				  dev_priv->de_irq_mask[pipe],
3405 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3406 	spin_unlock_irq(&dev_priv->irq_lock);
3407 }
3408 
3409 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3410 				     unsigned int pipe_mask)
3411 {
3412 	enum i915_pipe pipe;
3413 
3414 	spin_lock_irq(&dev_priv->irq_lock);
3415 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3416 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3417 	spin_unlock_irq(&dev_priv->irq_lock);
3418 
3419 	/* make sure we're done processing display irqs */
3420 	synchronize_irq(dev_priv->dev->irq);
3421 }
3422 
3423 static void cherryview_irq_preinstall(struct drm_device *dev)
3424 {
3425 	struct drm_i915_private *dev_priv = dev->dev_private;
3426 
3427 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3428 	POSTING_READ(GEN8_MASTER_IRQ);
3429 
3430 	gen8_gt_irq_reset(dev_priv);
3431 
3432 	GEN5_IRQ_RESET(GEN8_PCU_);
3433 
3434 	spin_lock_irq(&dev_priv->irq_lock);
3435 	if (dev_priv->display_irqs_enabled)
3436 		vlv_display_irq_reset(dev_priv);
3437 	spin_unlock_irq(&dev_priv->irq_lock);
3438 }
3439 
3440 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3441 				  const u32 hpd[HPD_NUM_PINS])
3442 {
3443 	struct intel_encoder *encoder;
3444 	u32 enabled_irqs = 0;
3445 
3446 	for_each_intel_encoder(dev_priv->dev, encoder)
3447 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3448 			enabled_irqs |= hpd[encoder->hpd_pin];
3449 
3450 	return enabled_irqs;
3451 }
3452 
3453 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3454 {
3455 	u32 hotplug_irqs, hotplug, enabled_irqs;
3456 
3457 	if (HAS_PCH_IBX(dev_priv)) {
3458 		hotplug_irqs = SDE_HOTPLUG_MASK;
3459 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3460 	} else {
3461 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3462 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3463 	}
3464 
3465 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3466 
3467 	/*
3468 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3469 	 * duration to 2ms (which is the minimum in the Display Port spec).
3470 	 * The pulse duration bits are reserved on LPT+.
3471 	 */
3472 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3473 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3474 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3475 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3476 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3477 	/*
3478 	 * When CPU and PCH are on the same package, port A
3479 	 * HPD must be enabled in both north and south.
3480 	 */
3481 	if (HAS_PCH_LPT_LP(dev_priv))
3482 		hotplug |= PORTA_HOTPLUG_ENABLE;
3483 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3484 }
3485 
3486 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3487 {
3488 	u32 hotplug_irqs, hotplug, enabled_irqs;
3489 
3490 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3491 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3492 
3493 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3494 
3495 	/* Enable digital hotplug on the PCH */
3496 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3497 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3498 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3499 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3500 
3501 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3502 	hotplug |= PORTE_HOTPLUG_ENABLE;
3503 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3504 }
3505 
3506 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3507 {
3508 	u32 hotplug_irqs, hotplug, enabled_irqs;
3509 
3510 	if (INTEL_GEN(dev_priv) >= 8) {
3511 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3512 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3513 
3514 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3515 	} else if (INTEL_GEN(dev_priv) >= 7) {
3516 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3517 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3518 
3519 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3520 	} else {
3521 		hotplug_irqs = DE_DP_A_HOTPLUG;
3522 		enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3523 
3524 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3525 	}
3526 
3527 	/*
3528 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3529 	 * duration to 2ms (which is the minimum in the Display Port spec)
3530 	 * The pulse duration bits are reserved on HSW+.
3531 	 */
3532 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3533 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3534 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3535 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3536 
3537 	ibx_hpd_irq_setup(dev_priv);
3538 }
3539 
3540 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3541 {
3542 	u32 hotplug_irqs, hotplug, enabled_irqs;
3543 
3544 	enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3545 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3546 
3547 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3548 
3549 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3550 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3551 		PORTA_HOTPLUG_ENABLE;
3552 
3553 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3554 		      hotplug, enabled_irqs);
3555 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3556 
3557 	/*
3558 	 * For BXT invert bit has to be set based on AOB design
3559 	 * for HPD detection logic, update it based on VBT fields.
3560 	 */
3561 
3562 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3563 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3564 		hotplug |= BXT_DDIA_HPD_INVERT;
3565 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3566 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3567 		hotplug |= BXT_DDIB_HPD_INVERT;
3568 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3569 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3570 		hotplug |= BXT_DDIC_HPD_INVERT;
3571 
3572 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3573 }
3574 
3575 static void ibx_irq_postinstall(struct drm_device *dev)
3576 {
3577 	struct drm_i915_private *dev_priv = dev->dev_private;
3578 	u32 mask;
3579 
3580 	if (HAS_PCH_NOP(dev))
3581 		return;
3582 
3583 	if (HAS_PCH_IBX(dev))
3584 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3585 	else
3586 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3587 
3588 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3589 	I915_WRITE(SDEIMR, ~mask);
3590 }
3591 
3592 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3593 {
3594 	struct drm_i915_private *dev_priv = dev->dev_private;
3595 	u32 pm_irqs, gt_irqs;
3596 
3597 	pm_irqs = gt_irqs = 0;
3598 
3599 	dev_priv->gt_irq_mask = ~0;
3600 	if (HAS_L3_DPF(dev)) {
3601 		/* L3 parity interrupt is always unmasked. */
3602 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3603 		gt_irqs |= GT_PARITY_ERROR(dev);
3604 	}
3605 
3606 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3607 	if (IS_GEN5(dev)) {
3608 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3609 			   ILK_BSD_USER_INTERRUPT;
3610 	} else {
3611 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3612 	}
3613 
3614 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3615 
3616 	if (INTEL_INFO(dev)->gen >= 6) {
3617 		/*
3618 		 * RPS interrupts will get enabled/disabled on demand when RPS
3619 		 * itself is enabled/disabled.
3620 		 */
3621 		if (HAS_VEBOX(dev))
3622 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3623 
3624 		dev_priv->pm_irq_mask = 0xffffffff;
3625 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3626 	}
3627 }
3628 
3629 static int ironlake_irq_postinstall(struct drm_device *dev)
3630 {
3631 	struct drm_i915_private *dev_priv = dev->dev_private;
3632 	u32 display_mask, extra_mask;
3633 
3634 	if (INTEL_INFO(dev)->gen >= 7) {
3635 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3636 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3637 				DE_PLANEB_FLIP_DONE_IVB |
3638 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3639 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3640 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3641 			      DE_DP_A_HOTPLUG_IVB);
3642 	} else {
3643 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3644 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3645 				DE_AUX_CHANNEL_A |
3646 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3647 				DE_POISON);
3648 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3649 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3650 			      DE_DP_A_HOTPLUG);
3651 	}
3652 
3653 	dev_priv->irq_mask = ~display_mask;
3654 
3655 	I915_WRITE(HWSTAM, 0xeffe);
3656 
3657 	ibx_irq_pre_postinstall(dev);
3658 
3659 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3660 
3661 	gen5_gt_irq_postinstall(dev);
3662 
3663 	ibx_irq_postinstall(dev);
3664 
3665 	if (IS_IRONLAKE_M(dev)) {
3666 		/* Enable PCU event interrupts
3667 		 *
3668 		 * spinlocking not required here for correctness since interrupt
3669 		 * setup is guaranteed to run in single-threaded context. But we
3670 		 * need it to make the assert_spin_locked happy. */
3671 		spin_lock_irq(&dev_priv->irq_lock);
3672 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3673 		spin_unlock_irq(&dev_priv->irq_lock);
3674 	}
3675 
3676 	return 0;
3677 }
3678 
3679 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3680 {
3681 	assert_spin_locked(&dev_priv->irq_lock);
3682 
3683 	if (dev_priv->display_irqs_enabled)
3684 		return;
3685 
3686 	dev_priv->display_irqs_enabled = true;
3687 
3688 	if (intel_irqs_enabled(dev_priv)) {
3689 		vlv_display_irq_reset(dev_priv);
3690 		vlv_display_irq_postinstall(dev_priv);
3691 	}
3692 }
3693 
3694 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3695 {
3696 	assert_spin_locked(&dev_priv->irq_lock);
3697 
3698 	if (!dev_priv->display_irqs_enabled)
3699 		return;
3700 
3701 	dev_priv->display_irqs_enabled = false;
3702 
3703 	if (intel_irqs_enabled(dev_priv))
3704 		vlv_display_irq_reset(dev_priv);
3705 }
3706 
3707 
3708 static int valleyview_irq_postinstall(struct drm_device *dev)
3709 {
3710 	struct drm_i915_private *dev_priv = dev->dev_private;
3711 
3712 	gen5_gt_irq_postinstall(dev);
3713 
3714 	spin_lock_irq(&dev_priv->irq_lock);
3715 	if (dev_priv->display_irqs_enabled)
3716 		vlv_display_irq_postinstall(dev_priv);
3717 	spin_unlock_irq(&dev_priv->irq_lock);
3718 
3719 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3720 	POSTING_READ(VLV_MASTER_IER);
3721 
3722 	return 0;
3723 }
3724 
3725 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3726 {
3727 	/* These are interrupts we'll toggle with the ring mask register */
3728 	uint32_t gt_interrupts[] = {
3729 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3730 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3731 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3732 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3733 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3734 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3735 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3736 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3737 		0,
3738 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3739 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3740 		};
3741 
3742 	if (HAS_L3_DPF(dev_priv))
3743 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3744 
3745 	dev_priv->pm_irq_mask = 0xffffffff;
3746 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3747 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3748 	/*
3749 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3750 	 * is enabled/disabled.
3751 	 */
3752 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3753 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3754 }
3755 
3756 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3757 {
3758 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3759 	uint32_t de_pipe_enables;
3760 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3761 	u32 de_port_enables;
3762 	u32 de_misc_masked = GEN8_DE_MISC_GSE;
3763 	enum i915_pipe pipe;
3764 
3765 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3766 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3767 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3768 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3769 				  GEN9_AUX_CHANNEL_D;
3770 		if (IS_BROXTON(dev_priv))
3771 			de_port_masked |= BXT_DE_PORT_GMBUS;
3772 	} else {
3773 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3774 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3775 	}
3776 
3777 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3778 					   GEN8_PIPE_FIFO_UNDERRUN;
3779 
3780 	de_port_enables = de_port_masked;
3781 	if (IS_BROXTON(dev_priv))
3782 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3783 	else if (IS_BROADWELL(dev_priv))
3784 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3785 
3786 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3787 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3788 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3789 
3790 	for_each_pipe(dev_priv, pipe)
3791 		if (intel_display_power_is_enabled(dev_priv,
3792 				POWER_DOMAIN_PIPE(pipe)))
3793 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3794 					  dev_priv->de_irq_mask[pipe],
3795 					  de_pipe_enables);
3796 
3797 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3798 	GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3799 }
3800 
3801 static int gen8_irq_postinstall(struct drm_device *dev)
3802 {
3803 	struct drm_i915_private *dev_priv = dev->dev_private;
3804 
3805 	if (HAS_PCH_SPLIT(dev))
3806 		ibx_irq_pre_postinstall(dev);
3807 
3808 	gen8_gt_irq_postinstall(dev_priv);
3809 	gen8_de_irq_postinstall(dev_priv);
3810 
3811 	if (HAS_PCH_SPLIT(dev))
3812 		ibx_irq_postinstall(dev);
3813 
3814 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3815 	POSTING_READ(GEN8_MASTER_IRQ);
3816 
3817 	return 0;
3818 }
3819 
3820 static int cherryview_irq_postinstall(struct drm_device *dev)
3821 {
3822 	struct drm_i915_private *dev_priv = dev->dev_private;
3823 
3824 	gen8_gt_irq_postinstall(dev_priv);
3825 
3826 	spin_lock_irq(&dev_priv->irq_lock);
3827 	if (dev_priv->display_irqs_enabled)
3828 		vlv_display_irq_postinstall(dev_priv);
3829 	spin_unlock_irq(&dev_priv->irq_lock);
3830 
3831 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3832 	POSTING_READ(GEN8_MASTER_IRQ);
3833 
3834 	return 0;
3835 }
3836 
3837 static void gen8_irq_uninstall(struct drm_device *dev)
3838 {
3839 	struct drm_i915_private *dev_priv = dev->dev_private;
3840 
3841 	if (!dev_priv)
3842 		return;
3843 
3844 	gen8_irq_reset(dev);
3845 }
3846 
3847 static void valleyview_irq_uninstall(struct drm_device *dev)
3848 {
3849 	struct drm_i915_private *dev_priv = dev->dev_private;
3850 
3851 	if (!dev_priv)
3852 		return;
3853 
3854 	I915_WRITE(VLV_MASTER_IER, 0);
3855 	POSTING_READ(VLV_MASTER_IER);
3856 
3857 	gen5_gt_irq_reset(dev);
3858 
3859 	I915_WRITE(HWSTAM, 0xffffffff);
3860 
3861 	spin_lock_irq(&dev_priv->irq_lock);
3862 	if (dev_priv->display_irqs_enabled)
3863 		vlv_display_irq_reset(dev_priv);
3864 	spin_unlock_irq(&dev_priv->irq_lock);
3865 }
3866 
3867 static void cherryview_irq_uninstall(struct drm_device *dev)
3868 {
3869 	struct drm_i915_private *dev_priv = dev->dev_private;
3870 
3871 	if (!dev_priv)
3872 		return;
3873 
3874 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3875 	POSTING_READ(GEN8_MASTER_IRQ);
3876 
3877 	gen8_gt_irq_reset(dev_priv);
3878 
3879 	GEN5_IRQ_RESET(GEN8_PCU_);
3880 
3881 	spin_lock_irq(&dev_priv->irq_lock);
3882 	if (dev_priv->display_irqs_enabled)
3883 		vlv_display_irq_reset(dev_priv);
3884 	spin_unlock_irq(&dev_priv->irq_lock);
3885 }
3886 
3887 static void ironlake_irq_uninstall(struct drm_device *dev)
3888 {
3889 	struct drm_i915_private *dev_priv = dev->dev_private;
3890 
3891 	if (!dev_priv)
3892 		return;
3893 
3894 	ironlake_irq_reset(dev);
3895 }
3896 
3897 static void i8xx_irq_preinstall(struct drm_device * dev)
3898 {
3899 	struct drm_i915_private *dev_priv = dev->dev_private;
3900 	int pipe;
3901 
3902 	for_each_pipe(dev_priv, pipe)
3903 		I915_WRITE(PIPESTAT(pipe), 0);
3904 	I915_WRITE16(IMR, 0xffff);
3905 	I915_WRITE16(IER, 0x0);
3906 	POSTING_READ16(IER);
3907 }
3908 
3909 static int i8xx_irq_postinstall(struct drm_device *dev)
3910 {
3911 	struct drm_i915_private *dev_priv = dev->dev_private;
3912 
3913 	I915_WRITE16(EMR,
3914 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3915 
3916 	/* Unmask the interrupts that we always want on. */
3917 	dev_priv->irq_mask =
3918 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3919 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3920 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3921 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3922 	I915_WRITE16(IMR, dev_priv->irq_mask);
3923 
3924 	I915_WRITE16(IER,
3925 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3926 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3927 		     I915_USER_INTERRUPT);
3928 	POSTING_READ16(IER);
3929 
3930 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3931 	 * just to make the assert_spin_locked check happy. */
3932 	spin_lock_irq(&dev_priv->irq_lock);
3933 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3934 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3935 	spin_unlock_irq(&dev_priv->irq_lock);
3936 
3937 	return 0;
3938 }
3939 
3940 /*
3941  * Returns true when a page flip has completed.
3942  */
3943 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3944 			       int plane, int pipe, u32 iir)
3945 {
3946 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3947 
3948 	if (!intel_pipe_handle_vblank(dev_priv, pipe))
3949 		return false;
3950 
3951 	if ((iir & flip_pending) == 0)
3952 		goto check_page_flip;
3953 
3954 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3955 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3956 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3957 	 * the flip is completed (no longer pending). Since this doesn't raise
3958 	 * an interrupt per se, we watch for the change at vblank.
3959 	 */
3960 	if (I915_READ16(ISR) & flip_pending)
3961 		goto check_page_flip;
3962 
3963 	intel_finish_page_flip_cs(dev_priv, pipe);
3964 	return true;
3965 
3966 check_page_flip:
3967 	intel_check_page_flip(dev_priv, pipe);
3968 	return false;
3969 }
3970 
3971 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3972 {
3973 	struct drm_device *dev = arg;
3974 	struct drm_i915_private *dev_priv = dev->dev_private;
3975 	u16 iir, new_iir;
3976 	u32 pipe_stats[2];
3977 	int pipe;
3978 	u16 flip_mask =
3979 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3980 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3981 	irqreturn_t ret;
3982 
3983 	if (!intel_irqs_enabled(dev_priv))
3984 		return IRQ_NONE;
3985 
3986 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3987 	disable_rpm_wakeref_asserts(dev_priv);
3988 
3989 	ret = IRQ_NONE;
3990 	iir = I915_READ16(IIR);
3991 	if (iir == 0)
3992 		goto out;
3993 
3994 	while (iir & ~flip_mask) {
3995 		/* Can't rely on pipestat interrupt bit in iir as it might
3996 		 * have been cleared after the pipestat interrupt was received.
3997 		 * It doesn't set the bit in iir again, but it still produces
3998 		 * interrupts (for non-MSI).
3999 		 */
4000 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4001 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4002 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4003 
4004 		for_each_pipe(dev_priv, pipe) {
4005 			i915_reg_t reg = PIPESTAT(pipe);
4006 			pipe_stats[pipe] = I915_READ(reg);
4007 
4008 			/*
4009 			 * Clear the PIPE*STAT regs before the IIR
4010 			 */
4011 			if (pipe_stats[pipe] & 0x8000ffff)
4012 				I915_WRITE(reg, pipe_stats[pipe]);
4013 		}
4014 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4015 
4016 		I915_WRITE16(IIR, iir & ~flip_mask);
4017 		new_iir = I915_READ16(IIR); /* Flush posted writes */
4018 
4019 		if (iir & I915_USER_INTERRUPT)
4020 			notify_ring(&dev_priv->engine[RCS]);
4021 
4022 		for_each_pipe(dev_priv, pipe) {
4023 			int plane = pipe;
4024 			if (HAS_FBC(dev_priv))
4025 				plane = !plane;
4026 
4027 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4028 			    i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4029 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4030 
4031 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4032 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4033 
4034 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4035 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4036 								    pipe);
4037 		}
4038 
4039 		iir = new_iir;
4040 	}
4041 	ret = IRQ_HANDLED;
4042 
4043 out:
4044 	enable_rpm_wakeref_asserts(dev_priv);
4045 
4046 	return ret;
4047 }
4048 
4049 static void i8xx_irq_uninstall(struct drm_device * dev)
4050 {
4051 	struct drm_i915_private *dev_priv = dev->dev_private;
4052 	int pipe;
4053 
4054 	for_each_pipe(dev_priv, pipe) {
4055 		/* Clear enable bits; then clear status bits */
4056 		I915_WRITE(PIPESTAT(pipe), 0);
4057 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4058 	}
4059 	I915_WRITE16(IMR, 0xffff);
4060 	I915_WRITE16(IER, 0x0);
4061 	I915_WRITE16(IIR, I915_READ16(IIR));
4062 }
4063 
4064 static void i915_irq_preinstall(struct drm_device * dev)
4065 {
4066 	struct drm_i915_private *dev_priv = dev->dev_private;
4067 	int pipe;
4068 
4069 	if (I915_HAS_HOTPLUG(dev)) {
4070 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4071 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4072 	}
4073 
4074 	I915_WRITE16(HWSTAM, 0xeffe);
4075 	for_each_pipe(dev_priv, pipe)
4076 		I915_WRITE(PIPESTAT(pipe), 0);
4077 	I915_WRITE(IMR, 0xffffffff);
4078 	I915_WRITE(IER, 0x0);
4079 	POSTING_READ(IER);
4080 }
4081 
4082 static int i915_irq_postinstall(struct drm_device *dev)
4083 {
4084 	struct drm_i915_private *dev_priv = dev->dev_private;
4085 	u32 enable_mask;
4086 
4087 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4088 
4089 	/* Unmask the interrupts that we always want on. */
4090 	dev_priv->irq_mask =
4091 		~(I915_ASLE_INTERRUPT |
4092 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4093 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4094 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4095 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4096 
4097 	enable_mask =
4098 		I915_ASLE_INTERRUPT |
4099 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4100 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4101 		I915_USER_INTERRUPT;
4102 
4103 	if (I915_HAS_HOTPLUG(dev)) {
4104 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4105 		POSTING_READ(PORT_HOTPLUG_EN);
4106 
4107 		/* Enable in IER... */
4108 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4109 		/* and unmask in IMR */
4110 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4111 	}
4112 
4113 	I915_WRITE(IMR, dev_priv->irq_mask);
4114 	I915_WRITE(IER, enable_mask);
4115 	POSTING_READ(IER);
4116 
4117 	i915_enable_asle_pipestat(dev_priv);
4118 
4119 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4120 	 * just to make the assert_spin_locked check happy. */
4121 	spin_lock_irq(&dev_priv->irq_lock);
4122 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4123 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4124 	spin_unlock_irq(&dev_priv->irq_lock);
4125 
4126 	return 0;
4127 }
4128 
4129 /*
4130  * Returns true when a page flip has completed.
4131  */
4132 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4133 			       int plane, int pipe, u32 iir)
4134 {
4135 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4136 
4137 	if (!intel_pipe_handle_vblank(dev_priv, pipe))
4138 		return false;
4139 
4140 	if ((iir & flip_pending) == 0)
4141 		goto check_page_flip;
4142 
4143 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4144 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4145 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4146 	 * the flip is completed (no longer pending). Since this doesn't raise
4147 	 * an interrupt per se, we watch for the change at vblank.
4148 	 */
4149 	if (I915_READ(ISR) & flip_pending)
4150 		goto check_page_flip;
4151 
4152 	intel_finish_page_flip_cs(dev_priv, pipe);
4153 	return true;
4154 
4155 check_page_flip:
4156 	intel_check_page_flip(dev_priv, pipe);
4157 	return false;
4158 }
4159 
4160 static irqreturn_t i915_irq_handler(int irq, void *arg)
4161 {
4162 	struct drm_device *dev = arg;
4163 	struct drm_i915_private *dev_priv = dev->dev_private;
4164 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4165 	u32 flip_mask =
4166 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4167 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4168 	int pipe, ret = IRQ_NONE;
4169 
4170 	if (!intel_irqs_enabled(dev_priv))
4171 		return IRQ_NONE;
4172 
4173 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4174 	disable_rpm_wakeref_asserts(dev_priv);
4175 
4176 	iir = I915_READ(IIR);
4177 	do {
4178 		bool irq_received = (iir & ~flip_mask) != 0;
4179 		bool blc_event = false;
4180 
4181 		/* Can't rely on pipestat interrupt bit in iir as it might
4182 		 * have been cleared after the pipestat interrupt was received.
4183 		 * It doesn't set the bit in iir again, but it still produces
4184 		 * interrupts (for non-MSI).
4185 		 */
4186 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4187 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4188 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4189 
4190 		for_each_pipe(dev_priv, pipe) {
4191 			i915_reg_t reg = PIPESTAT(pipe);
4192 			pipe_stats[pipe] = I915_READ(reg);
4193 
4194 			/* Clear the PIPE*STAT regs before the IIR */
4195 			if (pipe_stats[pipe] & 0x8000ffff) {
4196 				I915_WRITE(reg, pipe_stats[pipe]);
4197 				irq_received = true;
4198 			}
4199 		}
4200 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4201 
4202 		if (!irq_received)
4203 			break;
4204 
4205 		/* Consume port.  Then clear IIR or we'll miss events */
4206 		if (I915_HAS_HOTPLUG(dev_priv) &&
4207 		    iir & I915_DISPLAY_PORT_INTERRUPT) {
4208 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4209 			if (hotplug_status)
4210 				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4211 		}
4212 
4213 		I915_WRITE(IIR, iir & ~flip_mask);
4214 		new_iir = I915_READ(IIR); /* Flush posted writes */
4215 
4216 		if (iir & I915_USER_INTERRUPT)
4217 			notify_ring(&dev_priv->engine[RCS]);
4218 
4219 		for_each_pipe(dev_priv, pipe) {
4220 			int plane = pipe;
4221 			if (HAS_FBC(dev_priv))
4222 				plane = !plane;
4223 
4224 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4225 			    i915_handle_vblank(dev_priv, plane, pipe, iir))
4226 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4227 
4228 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4229 				blc_event = true;
4230 
4231 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4232 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4233 
4234 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4235 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4236 								    pipe);
4237 		}
4238 
4239 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4240 			intel_opregion_asle_intr(dev_priv);
4241 
4242 		/* With MSI, interrupts are only generated when iir
4243 		 * transitions from zero to nonzero.  If another bit got
4244 		 * set while we were handling the existing iir bits, then
4245 		 * we would never get another interrupt.
4246 		 *
4247 		 * This is fine on non-MSI as well, as if we hit this path
4248 		 * we avoid exiting the interrupt handler only to generate
4249 		 * another one.
4250 		 *
4251 		 * Note that for MSI this could cause a stray interrupt report
4252 		 * if an interrupt landed in the time between writing IIR and
4253 		 * the posting read.  This should be rare enough to never
4254 		 * trigger the 99% of 100,000 interrupts test for disabling
4255 		 * stray interrupts.
4256 		 */
4257 		ret = IRQ_HANDLED;
4258 		iir = new_iir;
4259 	} while (iir & ~flip_mask);
4260 
4261 	enable_rpm_wakeref_asserts(dev_priv);
4262 
4263 	return ret;
4264 }
4265 
4266 static void i915_irq_uninstall(struct drm_device * dev)
4267 {
4268 	struct drm_i915_private *dev_priv = dev->dev_private;
4269 	int pipe;
4270 
4271 	if (I915_HAS_HOTPLUG(dev)) {
4272 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4273 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4274 	}
4275 
4276 	I915_WRITE16(HWSTAM, 0xffff);
4277 	for_each_pipe(dev_priv, pipe) {
4278 		/* Clear enable bits; then clear status bits */
4279 		I915_WRITE(PIPESTAT(pipe), 0);
4280 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4281 	}
4282 	I915_WRITE(IMR, 0xffffffff);
4283 	I915_WRITE(IER, 0x0);
4284 
4285 	I915_WRITE(IIR, I915_READ(IIR));
4286 }
4287 
4288 static void i965_irq_preinstall(struct drm_device * dev)
4289 {
4290 	struct drm_i915_private *dev_priv = dev->dev_private;
4291 	int pipe;
4292 
4293 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4294 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4295 
4296 	I915_WRITE(HWSTAM, 0xeffe);
4297 	for_each_pipe(dev_priv, pipe)
4298 		I915_WRITE(PIPESTAT(pipe), 0);
4299 	I915_WRITE(IMR, 0xffffffff);
4300 	I915_WRITE(IER, 0x0);
4301 	POSTING_READ(IER);
4302 }
4303 
4304 static int i965_irq_postinstall(struct drm_device *dev)
4305 {
4306 	struct drm_i915_private *dev_priv = dev->dev_private;
4307 	u32 enable_mask;
4308 	u32 error_mask;
4309 
4310 	/* Unmask the interrupts that we always want on. */
4311 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4312 			       I915_DISPLAY_PORT_INTERRUPT |
4313 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4314 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4315 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4316 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4317 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4318 
4319 	enable_mask = ~dev_priv->irq_mask;
4320 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4321 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4322 	enable_mask |= I915_USER_INTERRUPT;
4323 
4324 	if (IS_G4X(dev_priv))
4325 		enable_mask |= I915_BSD_USER_INTERRUPT;
4326 
4327 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4328 	 * just to make the assert_spin_locked check happy. */
4329 	spin_lock_irq(&dev_priv->irq_lock);
4330 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4331 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4332 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4333 	spin_unlock_irq(&dev_priv->irq_lock);
4334 
4335 	/*
4336 	 * Enable some error detection, note the instruction error mask
4337 	 * bit is reserved, so we leave it masked.
4338 	 */
4339 	if (IS_G4X(dev_priv)) {
4340 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4341 			       GM45_ERROR_MEM_PRIV |
4342 			       GM45_ERROR_CP_PRIV |
4343 			       I915_ERROR_MEMORY_REFRESH);
4344 	} else {
4345 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4346 			       I915_ERROR_MEMORY_REFRESH);
4347 	}
4348 	I915_WRITE(EMR, error_mask);
4349 
4350 	I915_WRITE(IMR, dev_priv->irq_mask);
4351 	I915_WRITE(IER, enable_mask);
4352 	POSTING_READ(IER);
4353 
4354 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4355 	POSTING_READ(PORT_HOTPLUG_EN);
4356 
4357 	i915_enable_asle_pipestat(dev_priv);
4358 
4359 	return 0;
4360 }
4361 
4362 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4363 {
4364 	u32 hotplug_en;
4365 
4366 	assert_spin_locked(&dev_priv->irq_lock);
4367 
4368 	/* Note HDMI and DP share hotplug bits */
4369 	/* enable bits are the same for all generations */
4370 	hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4371 	/* Programming the CRT detection parameters tends
4372 	   to generate a spurious hotplug event about three
4373 	   seconds later.  So just do it once.
4374 	*/
4375 	if (IS_G4X(dev_priv))
4376 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4377 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4378 
4379 	/* Ignore TV since it's buggy */
4380 	i915_hotplug_interrupt_update_locked(dev_priv,
4381 					     HOTPLUG_INT_EN_MASK |
4382 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4383 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4384 					     hotplug_en);
4385 }
4386 
4387 static irqreturn_t i965_irq_handler(int irq, void *arg)
4388 {
4389 	struct drm_device *dev = arg;
4390 	struct drm_i915_private *dev_priv = dev->dev_private;
4391 	u32 iir, new_iir;
4392 	u32 pipe_stats[I915_MAX_PIPES];
4393 	int ret = IRQ_NONE, pipe;
4394 	u32 flip_mask =
4395 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4396 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4397 
4398 	if (!intel_irqs_enabled(dev_priv))
4399 		return IRQ_NONE;
4400 
4401 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4402 	disable_rpm_wakeref_asserts(dev_priv);
4403 
4404 	iir = I915_READ(IIR);
4405 
4406 	for (;;) {
4407 		bool irq_received = (iir & ~flip_mask) != 0;
4408 		bool blc_event = false;
4409 
4410 		/* Can't rely on pipestat interrupt bit in iir as it might
4411 		 * have been cleared after the pipestat interrupt was received.
4412 		 * It doesn't set the bit in iir again, but it still produces
4413 		 * interrupts (for non-MSI).
4414 		 */
4415 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4416 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4417 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4418 
4419 		for_each_pipe(dev_priv, pipe) {
4420 			i915_reg_t reg = PIPESTAT(pipe);
4421 			pipe_stats[pipe] = I915_READ(reg);
4422 
4423 			/*
4424 			 * Clear the PIPE*STAT regs before the IIR
4425 			 */
4426 			if (pipe_stats[pipe] & 0x8000ffff) {
4427 				I915_WRITE(reg, pipe_stats[pipe]);
4428 				irq_received = true;
4429 			}
4430 		}
4431 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4432 
4433 		if (!irq_received)
4434 			break;
4435 
4436 		ret = IRQ_HANDLED;
4437 
4438 		/* Consume port.  Then clear IIR or we'll miss events */
4439 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4440 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4441 			if (hotplug_status)
4442 				i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4443 		}
4444 
4445 		I915_WRITE(IIR, iir & ~flip_mask);
4446 		new_iir = I915_READ(IIR); /* Flush posted writes */
4447 
4448 		if (iir & I915_USER_INTERRUPT)
4449 			notify_ring(&dev_priv->engine[RCS]);
4450 		if (iir & I915_BSD_USER_INTERRUPT)
4451 			notify_ring(&dev_priv->engine[VCS]);
4452 
4453 		for_each_pipe(dev_priv, pipe) {
4454 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4455 			    i915_handle_vblank(dev_priv, pipe, pipe, iir))
4456 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4457 
4458 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4459 				blc_event = true;
4460 
4461 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4462 				i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4463 
4464 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4465 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4466 		}
4467 
4468 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4469 			intel_opregion_asle_intr(dev_priv);
4470 
4471 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4472 			gmbus_irq_handler(dev_priv);
4473 
4474 		/* With MSI, interrupts are only generated when iir
4475 		 * transitions from zero to nonzero.  If another bit got
4476 		 * set while we were handling the existing iir bits, then
4477 		 * we would never get another interrupt.
4478 		 *
4479 		 * This is fine on non-MSI as well, as if we hit this path
4480 		 * we avoid exiting the interrupt handler only to generate
4481 		 * another one.
4482 		 *
4483 		 * Note that for MSI this could cause a stray interrupt report
4484 		 * if an interrupt landed in the time between writing IIR and
4485 		 * the posting read.  This should be rare enough to never
4486 		 * trigger the 99% of 100,000 interrupts test for disabling
4487 		 * stray interrupts.
4488 		 */
4489 		iir = new_iir;
4490 	}
4491 
4492 	enable_rpm_wakeref_asserts(dev_priv);
4493 
4494 	return ret;
4495 }
4496 
4497 static void i965_irq_uninstall(struct drm_device * dev)
4498 {
4499 	struct drm_i915_private *dev_priv = dev->dev_private;
4500 	int pipe;
4501 
4502 	if (!dev_priv)
4503 		return;
4504 
4505 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4506 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4507 
4508 	I915_WRITE(HWSTAM, 0xffffffff);
4509 	for_each_pipe(dev_priv, pipe)
4510 		I915_WRITE(PIPESTAT(pipe), 0);
4511 	I915_WRITE(IMR, 0xffffffff);
4512 	I915_WRITE(IER, 0x0);
4513 
4514 	for_each_pipe(dev_priv, pipe)
4515 		I915_WRITE(PIPESTAT(pipe),
4516 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4517 	I915_WRITE(IIR, I915_READ(IIR));
4518 }
4519 
4520 /**
4521  * intel_irq_init - initializes irq support
4522  * @dev_priv: i915 device instance
4523  *
4524  * This function initializes all the irq support including work items, timers
4525  * and all the vtables. It does not setup the interrupt itself though.
4526  */
4527 void intel_irq_init(struct drm_i915_private *dev_priv)
4528 {
4529 	struct drm_device *dev = dev_priv->dev;
4530 
4531 	intel_hpd_init_work(dev_priv);
4532 
4533 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4534 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4535 
4536 	/* Let's track the enabled rps events */
4537 	if (IS_VALLEYVIEW(dev_priv))
4538 		/* WaGsvRC0ResidencyMethod:vlv */
4539 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4540 	else
4541 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4542 
4543 	dev_priv->rps.pm_intr_keep = 0;
4544 
4545 	/*
4546 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4547 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
4548 	 *
4549 	 * TODO: verify if this can be reproduced on VLV,CHV.
4550 	 */
4551 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4552 		dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4553 
4554 	if (INTEL_INFO(dev_priv)->gen >= 8)
4555 		dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4556 
4557 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4558 			  i915_hangcheck_elapsed);
4559 
4560 	if (IS_GEN2(dev_priv)) {
4561 		dev->max_vblank_count = 0;
4562 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4563 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4564 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4565 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4566 	} else {
4567 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4568 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4569 	}
4570 
4571 	/*
4572 	 * Opt out of the vblank disable timer on everything except gen2.
4573 	 * Gen2 doesn't have a hardware frame counter and so depends on
4574 	 * vblank interrupts to produce sane vblank seuquence numbers.
4575 	 */
4576 	if (!IS_GEN2(dev_priv))
4577 		dev->vblank_disable_immediate = true;
4578 
4579 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4580 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4581 
4582 	if (IS_CHERRYVIEW(dev_priv)) {
4583 		dev->driver->irq_handler = cherryview_irq_handler;
4584 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4585 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4586 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4587 		dev->driver->enable_vblank = valleyview_enable_vblank;
4588 		dev->driver->disable_vblank = valleyview_disable_vblank;
4589 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4590 	} else if (IS_VALLEYVIEW(dev_priv)) {
4591 		dev->driver->irq_handler = valleyview_irq_handler;
4592 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4593 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4594 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4595 		dev->driver->enable_vblank = valleyview_enable_vblank;
4596 		dev->driver->disable_vblank = valleyview_disable_vblank;
4597 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4598 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4599 		dev->driver->irq_handler = gen8_irq_handler;
4600 		dev->driver->irq_preinstall = gen8_irq_reset;
4601 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4602 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4603 		dev->driver->enable_vblank = gen8_enable_vblank;
4604 		dev->driver->disable_vblank = gen8_disable_vblank;
4605 		if (IS_BROXTON(dev))
4606 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4607 		else if (HAS_PCH_SPT(dev))
4608 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4609 		else
4610 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4611 	} else if (HAS_PCH_SPLIT(dev)) {
4612 		dev->driver->irq_handler = ironlake_irq_handler;
4613 		dev->driver->irq_preinstall = ironlake_irq_reset;
4614 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4615 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4616 		dev->driver->enable_vblank = ironlake_enable_vblank;
4617 		dev->driver->disable_vblank = ironlake_disable_vblank;
4618 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4619 	} else {
4620 		if (IS_GEN2(dev_priv)) {
4621 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4622 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4623 			dev->driver->irq_handler = i8xx_irq_handler;
4624 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4625 		} else if (IS_GEN3(dev_priv)) {
4626 			dev->driver->irq_preinstall = i915_irq_preinstall;
4627 			dev->driver->irq_postinstall = i915_irq_postinstall;
4628 			dev->driver->irq_uninstall = i915_irq_uninstall;
4629 			dev->driver->irq_handler = i915_irq_handler;
4630 		} else {
4631 			dev->driver->irq_preinstall = i965_irq_preinstall;
4632 			dev->driver->irq_postinstall = i965_irq_postinstall;
4633 			dev->driver->irq_uninstall = i965_irq_uninstall;
4634 			dev->driver->irq_handler = i965_irq_handler;
4635 		}
4636 		if (I915_HAS_HOTPLUG(dev_priv))
4637 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4638 		dev->driver->enable_vblank = i915_enable_vblank;
4639 		dev->driver->disable_vblank = i915_disable_vblank;
4640 	}
4641 }
4642 
4643 /**
4644  * intel_irq_install - enables the hardware interrupt
4645  * @dev_priv: i915 device instance
4646  *
4647  * This function enables the hardware interrupt handling, but leaves the hotplug
4648  * handling still disabled. It is called after intel_irq_init().
4649  *
4650  * In the driver load and resume code we need working interrupts in a few places
4651  * but don't want to deal with the hassle of concurrent probe and hotplug
4652  * workers. Hence the split into this two-stage approach.
4653  */
4654 int intel_irq_install(struct drm_i915_private *dev_priv)
4655 {
4656 	/*
4657 	 * We enable some interrupt sources in our postinstall hooks, so mark
4658 	 * interrupts as enabled _before_ actually enabling them to avoid
4659 	 * special cases in our ordering checks.
4660 	 */
4661 	dev_priv->pm.irqs_enabled = true;
4662 
4663 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4664 }
4665 
4666 /**
4667  * intel_irq_uninstall - finilizes all irq handling
4668  * @dev_priv: i915 device instance
4669  *
4670  * This stops interrupt and hotplug handling and unregisters and frees all
4671  * resources acquired in the init functions.
4672  */
4673 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4674 {
4675 	drm_irq_uninstall(dev_priv->dev);
4676 	intel_hpd_cancel_work(dev_priv);
4677 	dev_priv->pm.irqs_enabled = false;
4678 }
4679 
4680 /**
4681  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4682  * @dev_priv: i915 device instance
4683  *
4684  * This function is used to disable interrupts at runtime, both in the runtime
4685  * pm and the system suspend/resume code.
4686  */
4687 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4688 {
4689 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4690 	dev_priv->pm.irqs_enabled = false;
4691 	synchronize_irq(dev_priv->dev->irq);
4692 }
4693 
4694 /**
4695  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4696  * @dev_priv: i915 device instance
4697  *
4698  * This function is used to enable interrupts at runtime, both in the runtime
4699  * pm and the system suspend/resume code.
4700  */
4701 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4702 {
4703 	dev_priv->pm.irqs_enabled = true;
4704 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4705 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4706 }
4707