xref: /dragonfly/sys/dev/drm/i915/i915_irq.c (revision 62dc643e)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <linux/circ_buf.h>
30 #include <drm/drmP.h>
31 #include <drm/i915_drm.h>
32 #include "i915_drv.h"
33 #include "i915_trace.h"
34 #include "intel_drv.h"
35 
36 /**
37  * DOC: interrupt handling
38  *
39  * These functions provide the basic support for enabling and disabling the
40  * interrupt handling support. There's a lot more functionality in i915_irq.c
41  * and related files, but that will be described in separate chapters.
42  */
43 
44 static const u32 hpd_ilk[HPD_NUM_PINS] = {
45 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
46 };
47 
48 static const u32 hpd_ivb[HPD_NUM_PINS] = {
49 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
50 };
51 
52 static const u32 hpd_bdw[HPD_NUM_PINS] = {
53 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
54 };
55 
56 static const u32 hpd_ibx[HPD_NUM_PINS] = {
57 	[HPD_CRT] = SDE_CRT_HOTPLUG,
58 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
59 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
60 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
61 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
62 };
63 
64 static const u32 hpd_cpt[HPD_NUM_PINS] = {
65 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
66 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
67 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
68 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
69 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
70 };
71 
72 static const u32 hpd_spt[HPD_NUM_PINS] = {
73 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
74 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
75 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
76 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
77 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
78 };
79 
80 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
81 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
82 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
83 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
84 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
85 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
86 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
87 };
88 
89 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
90 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
91 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
92 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
93 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
94 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
95 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
96 };
97 
98 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
99 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
100 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
101 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
102 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
103 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
104 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
105 };
106 
107 /* BXT hpd list */
108 static const u32 hpd_bxt[HPD_NUM_PINS] = {
109 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
110 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
111 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
112 };
113 
114 /* IIR can theoretically queue up two events. Be paranoid. */
115 #define GEN8_IRQ_RESET_NDX(type, which) do { \
116 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
117 	POSTING_READ(GEN8_##type##_IMR(which)); \
118 	I915_WRITE(GEN8_##type##_IER(which), 0); \
119 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
120 	POSTING_READ(GEN8_##type##_IIR(which)); \
121 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
122 	POSTING_READ(GEN8_##type##_IIR(which)); \
123 } while (0)
124 
125 #define GEN5_IRQ_RESET(type) do { \
126 	I915_WRITE(type##IMR, 0xffffffff); \
127 	POSTING_READ(type##IMR); \
128 	I915_WRITE(type##IER, 0); \
129 	I915_WRITE(type##IIR, 0xffffffff); \
130 	POSTING_READ(type##IIR); \
131 	I915_WRITE(type##IIR, 0xffffffff); \
132 	POSTING_READ(type##IIR); \
133 } while (0)
134 
135 /*
136  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
137  */
138 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
139 				    i915_reg_t reg)
140 {
141 	u32 val = I915_READ(reg);
142 
143 	if (val == 0)
144 		return;
145 
146 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
147 	     i915_mmio_reg_offset(reg), val);
148 	I915_WRITE(reg, 0xffffffff);
149 	POSTING_READ(reg);
150 	I915_WRITE(reg, 0xffffffff);
151 	POSTING_READ(reg);
152 }
153 
154 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
155 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
156 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
157 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
158 	POSTING_READ(GEN8_##type##_IMR(which)); \
159 } while (0)
160 
161 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
162 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
163 	I915_WRITE(type##IER, (ier_val)); \
164 	I915_WRITE(type##IMR, (imr_val)); \
165 	POSTING_READ(type##IMR); \
166 } while (0)
167 
168 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
169 
170 /* For display hotplug interrupt */
171 static inline void
172 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
173 				     uint32_t mask,
174 				     uint32_t bits)
175 {
176 	uint32_t val;
177 
178 	assert_spin_locked(&dev_priv->irq_lock);
179 	WARN_ON(bits & ~mask);
180 
181 	val = I915_READ(PORT_HOTPLUG_EN);
182 	val &= ~mask;
183 	val |= bits;
184 	I915_WRITE(PORT_HOTPLUG_EN, val);
185 }
186 
187 /**
188  * i915_hotplug_interrupt_update - update hotplug interrupt enable
189  * @dev_priv: driver private
190  * @mask: bits to update
191  * @bits: bits to enable
192  * NOTE: the HPD enable bits are modified both inside and outside
193  * of an interrupt context. To avoid that read-modify-write cycles
194  * interfer, these bits are protected by a spinlock. Since this
195  * function is usually not called from a context where the lock is
196  * held already, this function acquires the lock itself. A non-locking
197  * version is also available.
198  */
199 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
200 				   uint32_t mask,
201 				   uint32_t bits)
202 {
203 	spin_lock_irq(&dev_priv->irq_lock);
204 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
205 	spin_unlock_irq(&dev_priv->irq_lock);
206 }
207 
208 /**
209  * ilk_update_display_irq - update DEIMR
210  * @dev_priv: driver private
211  * @interrupt_mask: mask of interrupt bits to update
212  * @enabled_irq_mask: mask of interrupt bits to enable
213  */
214 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
215 			    uint32_t interrupt_mask,
216 			    uint32_t enabled_irq_mask)
217 {
218 	uint32_t new_val;
219 
220 	assert_spin_locked(&dev_priv->irq_lock);
221 
222 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
223 
224 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
225 		return;
226 
227 	new_val = dev_priv->irq_mask;
228 	new_val &= ~interrupt_mask;
229 	new_val |= (~enabled_irq_mask & interrupt_mask);
230 
231 	if (new_val != dev_priv->irq_mask) {
232 		dev_priv->irq_mask = new_val;
233 		I915_WRITE(DEIMR, dev_priv->irq_mask);
234 		POSTING_READ(DEIMR);
235 	}
236 }
237 
238 /**
239  * ilk_update_gt_irq - update GTIMR
240  * @dev_priv: driver private
241  * @interrupt_mask: mask of interrupt bits to update
242  * @enabled_irq_mask: mask of interrupt bits to enable
243  */
244 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
245 			      uint32_t interrupt_mask,
246 			      uint32_t enabled_irq_mask)
247 {
248 	assert_spin_locked(&dev_priv->irq_lock);
249 
250 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
251 
252 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
253 		return;
254 
255 	dev_priv->gt_irq_mask &= ~interrupt_mask;
256 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
257 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
258 	POSTING_READ(GTIMR);
259 }
260 
261 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
262 {
263 	ilk_update_gt_irq(dev_priv, mask, mask);
264 }
265 
266 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
267 {
268 	ilk_update_gt_irq(dev_priv, mask, 0);
269 }
270 
271 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
272 {
273 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
274 }
275 
276 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
277 {
278 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
279 }
280 
281 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
282 {
283 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
284 }
285 
286 /**
287  * snb_update_pm_irq - update GEN6_PMIMR
288  * @dev_priv: driver private
289  * @interrupt_mask: mask of interrupt bits to update
290  * @enabled_irq_mask: mask of interrupt bits to enable
291  */
292 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
293 			      uint32_t interrupt_mask,
294 			      uint32_t enabled_irq_mask)
295 {
296 	uint32_t new_val;
297 
298 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
299 
300 	assert_spin_locked(&dev_priv->irq_lock);
301 
302 	new_val = dev_priv->pm_irq_mask;
303 	new_val &= ~interrupt_mask;
304 	new_val |= (~enabled_irq_mask & interrupt_mask);
305 
306 	if (new_val != dev_priv->pm_irq_mask) {
307 		dev_priv->pm_irq_mask = new_val;
308 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
309 		POSTING_READ(gen6_pm_imr(dev_priv));
310 	}
311 }
312 
313 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
314 {
315 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
316 		return;
317 
318 	snb_update_pm_irq(dev_priv, mask, mask);
319 }
320 
321 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
322 				  uint32_t mask)
323 {
324 	snb_update_pm_irq(dev_priv, mask, 0);
325 }
326 
327 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
328 {
329 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
330 		return;
331 
332 	__gen6_disable_pm_irq(dev_priv, mask);
333 }
334 
335 void gen6_reset_rps_interrupts(struct drm_device *dev)
336 {
337 	struct drm_i915_private *dev_priv = dev->dev_private;
338 	i915_reg_t reg = gen6_pm_iir(dev_priv);
339 
340 	spin_lock_irq(&dev_priv->irq_lock);
341 	I915_WRITE(reg, dev_priv->pm_rps_events);
342 	I915_WRITE(reg, dev_priv->pm_rps_events);
343 	POSTING_READ(reg);
344 	dev_priv->rps.pm_iir = 0;
345 	spin_unlock_irq(&dev_priv->irq_lock);
346 }
347 
348 void gen6_enable_rps_interrupts(struct drm_device *dev)
349 {
350 	struct drm_i915_private *dev_priv = dev->dev_private;
351 
352 	spin_lock_irq(&dev_priv->irq_lock);
353 
354 	WARN_ON(dev_priv->rps.pm_iir);
355 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
356 	dev_priv->rps.interrupts_enabled = true;
357 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 				dev_priv->pm_rps_events);
359 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
360 
361 	spin_unlock_irq(&dev_priv->irq_lock);
362 }
363 
364 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
365 {
366 	/*
367 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
368 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
369 	 *
370 	 * TODO: verify if this can be reproduced on VLV,CHV.
371 	 */
372 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
373 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
374 
375 	if (INTEL_INFO(dev_priv)->gen >= 8)
376 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
377 
378 	return mask;
379 }
380 
381 void gen6_disable_rps_interrupts(struct drm_device *dev)
382 {
383 	struct drm_i915_private *dev_priv = dev->dev_private;
384 
385 	spin_lock_irq(&dev_priv->irq_lock);
386 	dev_priv->rps.interrupts_enabled = false;
387 	spin_unlock_irq(&dev_priv->irq_lock);
388 
389 	cancel_work_sync(&dev_priv->rps.work);
390 
391 	spin_lock_irq(&dev_priv->irq_lock);
392 
393 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
394 
395 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
396 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
397 				~dev_priv->pm_rps_events);
398 
399 	spin_unlock_irq(&dev_priv->irq_lock);
400 
401 	synchronize_irq(dev->irq);
402 }
403 
404 /**
405  * bdw_update_port_irq - update DE port interrupt
406  * @dev_priv: driver private
407  * @interrupt_mask: mask of interrupt bits to update
408  * @enabled_irq_mask: mask of interrupt bits to enable
409  */
410 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
411 				uint32_t interrupt_mask,
412 				uint32_t enabled_irq_mask)
413 {
414 	uint32_t new_val;
415 	uint32_t old_val;
416 
417 	assert_spin_locked(&dev_priv->irq_lock);
418 
419 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
420 
421 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
422 		return;
423 
424 	old_val = I915_READ(GEN8_DE_PORT_IMR);
425 
426 	new_val = old_val;
427 	new_val &= ~interrupt_mask;
428 	new_val |= (~enabled_irq_mask & interrupt_mask);
429 
430 	if (new_val != old_val) {
431 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
432 		POSTING_READ(GEN8_DE_PORT_IMR);
433 	}
434 }
435 
436 /**
437  * bdw_update_pipe_irq - update DE pipe interrupt
438  * @dev_priv: driver private
439  * @pipe: pipe whose interrupt to update
440  * @interrupt_mask: mask of interrupt bits to update
441  * @enabled_irq_mask: mask of interrupt bits to enable
442  */
443 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
444 			 enum i915_pipe pipe,
445 			 uint32_t interrupt_mask,
446 			 uint32_t enabled_irq_mask)
447 {
448 	uint32_t new_val;
449 
450 	assert_spin_locked(&dev_priv->irq_lock);
451 
452 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
453 
454 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
455 		return;
456 
457 	new_val = dev_priv->de_irq_mask[pipe];
458 	new_val &= ~interrupt_mask;
459 	new_val |= (~enabled_irq_mask & interrupt_mask);
460 
461 	if (new_val != dev_priv->de_irq_mask[pipe]) {
462 		dev_priv->de_irq_mask[pipe] = new_val;
463 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
464 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
465 	}
466 }
467 
468 /**
469  * ibx_display_interrupt_update - update SDEIMR
470  * @dev_priv: driver private
471  * @interrupt_mask: mask of interrupt bits to update
472  * @enabled_irq_mask: mask of interrupt bits to enable
473  */
474 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
475 				  uint32_t interrupt_mask,
476 				  uint32_t enabled_irq_mask)
477 {
478 	uint32_t sdeimr = I915_READ(SDEIMR);
479 	sdeimr &= ~interrupt_mask;
480 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
481 
482 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
483 
484 	assert_spin_locked(&dev_priv->irq_lock);
485 
486 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
487 		return;
488 
489 	I915_WRITE(SDEIMR, sdeimr);
490 	POSTING_READ(SDEIMR);
491 }
492 
493 static void
494 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
495 		       u32 enable_mask, u32 status_mask)
496 {
497 	i915_reg_t reg = PIPESTAT(pipe);
498 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
499 
500 	assert_spin_locked(&dev_priv->irq_lock);
501 	WARN_ON(!intel_irqs_enabled(dev_priv));
502 
503 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
504 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
505 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
506 		      pipe_name(pipe), enable_mask, status_mask))
507 		return;
508 
509 	if ((pipestat & enable_mask) == enable_mask)
510 		return;
511 
512 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
513 
514 	/* Enable the interrupt, clear any pending status */
515 	pipestat |= enable_mask | status_mask;
516 	I915_WRITE(reg, pipestat);
517 	POSTING_READ(reg);
518 }
519 
520 static void
521 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
522 		        u32 enable_mask, u32 status_mask)
523 {
524 	i915_reg_t reg = PIPESTAT(pipe);
525 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
526 
527 	assert_spin_locked(&dev_priv->irq_lock);
528 	WARN_ON(!intel_irqs_enabled(dev_priv));
529 
530 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
531 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
532 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
533 		      pipe_name(pipe), enable_mask, status_mask))
534 		return;
535 
536 	if ((pipestat & enable_mask) == 0)
537 		return;
538 
539 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
540 
541 	pipestat &= ~enable_mask;
542 	I915_WRITE(reg, pipestat);
543 	POSTING_READ(reg);
544 }
545 
546 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
547 {
548 	u32 enable_mask = status_mask << 16;
549 
550 	/*
551 	 * On pipe A we don't support the PSR interrupt yet,
552 	 * on pipe B and C the same bit MBZ.
553 	 */
554 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
555 		return 0;
556 	/*
557 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
558 	 * A the same bit is for perf counters which we don't use either.
559 	 */
560 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
561 		return 0;
562 
563 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
564 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
565 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
566 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
567 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
568 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
569 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
570 
571 	return enable_mask;
572 }
573 
574 void
575 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
576 		     u32 status_mask)
577 {
578 	u32 enable_mask;
579 
580 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
581 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
582 							   status_mask);
583 	else
584 		enable_mask = status_mask << 16;
585 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
586 }
587 
588 void
589 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
590 		      u32 status_mask)
591 {
592 	u32 enable_mask;
593 
594 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
595 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
596 							   status_mask);
597 	else
598 		enable_mask = status_mask << 16;
599 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
600 }
601 
602 /**
603  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
604  * @dev: drm device
605  */
606 static void i915_enable_asle_pipestat(struct drm_device *dev)
607 {
608 	struct drm_i915_private *dev_priv = dev->dev_private;
609 
610 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
611 		return;
612 
613 	spin_lock_irq(&dev_priv->irq_lock);
614 
615 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
616 	if (INTEL_INFO(dev)->gen >= 4)
617 		i915_enable_pipestat(dev_priv, PIPE_A,
618 				     PIPE_LEGACY_BLC_EVENT_STATUS);
619 
620 	spin_unlock_irq(&dev_priv->irq_lock);
621 }
622 
623 /*
624  * This timing diagram depicts the video signal in and
625  * around the vertical blanking period.
626  *
627  * Assumptions about the fictitious mode used in this example:
628  *  vblank_start >= 3
629  *  vsync_start = vblank_start + 1
630  *  vsync_end = vblank_start + 2
631  *  vtotal = vblank_start + 3
632  *
633  *           start of vblank:
634  *           latch double buffered registers
635  *           increment frame counter (ctg+)
636  *           generate start of vblank interrupt (gen4+)
637  *           |
638  *           |          frame start:
639  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
640  *           |          may be shifted forward 1-3 extra lines via PIPECONF
641  *           |          |
642  *           |          |  start of vsync:
643  *           |          |  generate vsync interrupt
644  *           |          |  |
645  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
646  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
647  * ----va---> <-----------------vb--------------------> <--------va-------------
648  *       |          |       <----vs----->                     |
649  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
650  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
651  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
652  *       |          |                                         |
653  *       last visible pixel                                   first visible pixel
654  *                  |                                         increment frame counter (gen3/4)
655  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
656  *
657  * x  = horizontal active
658  * _  = horizontal blanking
659  * hs = horizontal sync
660  * va = vertical active
661  * vb = vertical blanking
662  * vs = vertical sync
663  * vbs = vblank_start (number)
664  *
665  * Summary:
666  * - most events happen at the start of horizontal sync
667  * - frame start happens at the start of horizontal blank, 1-4 lines
668  *   (depending on PIPECONF settings) after the start of vblank
669  * - gen3/4 pixel and frame counter are synchronized with the start
670  *   of horizontal active on the first line of vertical active
671  */
672 
673 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
674 {
675 	/* Gen2 doesn't have a hardware frame counter */
676 	return 0;
677 }
678 
679 /* Called from drm generic code, passed a 'crtc', which
680  * we use as a pipe index
681  */
682 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
683 {
684 	struct drm_i915_private *dev_priv = dev->dev_private;
685 	i915_reg_t high_frame, low_frame;
686 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
687 	struct intel_crtc *intel_crtc =
688 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
689 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
690 
691 	htotal = mode->crtc_htotal;
692 	hsync_start = mode->crtc_hsync_start;
693 	vbl_start = mode->crtc_vblank_start;
694 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
695 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
696 
697 	/* Convert to pixel count */
698 	vbl_start *= htotal;
699 
700 	/* Start of vblank event occurs at start of hsync */
701 	vbl_start -= htotal - hsync_start;
702 
703 	high_frame = PIPEFRAME(pipe);
704 	low_frame = PIPEFRAMEPIXEL(pipe);
705 
706 	/*
707 	 * High & low register fields aren't synchronized, so make sure
708 	 * we get a low value that's stable across two reads of the high
709 	 * register.
710 	 */
711 	do {
712 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
713 		low   = I915_READ(low_frame);
714 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
715 	} while (high1 != high2);
716 
717 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
718 	pixel = low & PIPE_PIXEL_MASK;
719 	low >>= PIPE_FRAME_LOW_SHIFT;
720 
721 	/*
722 	 * The frame counter increments at beginning of active.
723 	 * Cook up a vblank counter by also checking the pixel
724 	 * counter against vblank start.
725 	 */
726 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
727 }
728 
729 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
730 {
731 	struct drm_i915_private *dev_priv = dev->dev_private;
732 
733 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
734 }
735 
736 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
737 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
738 {
739 	struct drm_device *dev = crtc->base.dev;
740 	struct drm_i915_private *dev_priv = dev->dev_private;
741 	const struct drm_display_mode *mode = &crtc->base.hwmode;
742 	enum i915_pipe pipe = crtc->pipe;
743 	int position, vtotal;
744 
745 	vtotal = mode->crtc_vtotal;
746 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
747 		vtotal /= 2;
748 
749 	if (IS_GEN2(dev))
750 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
751 	else
752 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
753 
754 	/*
755 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
756 	 * read it just before the start of vblank.  So try it again
757 	 * so we don't accidentally end up spanning a vblank frame
758 	 * increment, causing the pipe_update_end() code to squak at us.
759 	 *
760 	 * The nature of this problem means we can't simply check the ISR
761 	 * bit and return the vblank start value; nor can we use the scanline
762 	 * debug register in the transcoder as it appears to have the same
763 	 * problem.  We may need to extend this to include other platforms,
764 	 * but so far testing only shows the problem on HSW.
765 	 */
766 	if (HAS_DDI(dev) && !position) {
767 		int i, temp;
768 
769 		for (i = 0; i < 100; i++) {
770 			udelay(1);
771 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
772 				DSL_LINEMASK_GEN3;
773 			if (temp != position) {
774 				position = temp;
775 				break;
776 			}
777 		}
778 	}
779 
780 	/*
781 	 * See update_scanline_offset() for the details on the
782 	 * scanline_offset adjustment.
783 	 */
784 	return (position + crtc->scanline_offset) % vtotal;
785 }
786 
787 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
788 				    unsigned int flags, int *vpos, int *hpos,
789 				    ktime_t *stime, ktime_t *etime,
790 				    const struct drm_display_mode *mode)
791 {
792 	struct drm_i915_private *dev_priv = dev->dev_private;
793 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
794 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
795 	int position;
796 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
797 	bool in_vbl = true;
798 	int ret = 0;
799 	unsigned long irqflags;
800 
801 	if (WARN_ON(!mode->crtc_clock)) {
802 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
803 				 "pipe %c\n", pipe_name(pipe));
804 		return 0;
805 	}
806 
807 	htotal = mode->crtc_htotal;
808 	hsync_start = mode->crtc_hsync_start;
809 	vtotal = mode->crtc_vtotal;
810 	vbl_start = mode->crtc_vblank_start;
811 	vbl_end = mode->crtc_vblank_end;
812 
813 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
814 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
815 		vbl_end /= 2;
816 		vtotal /= 2;
817 	}
818 
819 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
820 
821 	/*
822 	 * Lock uncore.lock, as we will do multiple timing critical raw
823 	 * register reads, potentially with preemption disabled, so the
824 	 * following code must not block on uncore.lock.
825 	 */
826 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
827 
828 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
829 
830 	/* Get optional system timestamp before query. */
831 	if (stime)
832 		*stime = ktime_get();
833 
834 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
835 		/* No obvious pixelcount register. Only query vertical
836 		 * scanout position from Display scan line register.
837 		 */
838 		position = __intel_get_crtc_scanline(intel_crtc);
839 	} else {
840 		/* Have access to pixelcount since start of frame.
841 		 * We can split this into vertical and horizontal
842 		 * scanout position.
843 		 */
844 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
845 
846 		/* convert to pixel counts */
847 		vbl_start *= htotal;
848 		vbl_end *= htotal;
849 		vtotal *= htotal;
850 
851 		/*
852 		 * In interlaced modes, the pixel counter counts all pixels,
853 		 * so one field will have htotal more pixels. In order to avoid
854 		 * the reported position from jumping backwards when the pixel
855 		 * counter is beyond the length of the shorter field, just
856 		 * clamp the position the length of the shorter field. This
857 		 * matches how the scanline counter based position works since
858 		 * the scanline counter doesn't count the two half lines.
859 		 */
860 		if (position >= vtotal)
861 			position = vtotal - 1;
862 
863 		/*
864 		 * Start of vblank interrupt is triggered at start of hsync,
865 		 * just prior to the first active line of vblank. However we
866 		 * consider lines to start at the leading edge of horizontal
867 		 * active. So, should we get here before we've crossed into
868 		 * the horizontal active of the first line in vblank, we would
869 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
870 		 * always add htotal-hsync_start to the current pixel position.
871 		 */
872 		position = (position + htotal - hsync_start) % vtotal;
873 	}
874 
875 	/* Get optional system timestamp after query. */
876 	if (etime)
877 		*etime = ktime_get();
878 
879 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
880 
881 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
882 
883 	in_vbl = position >= vbl_start && position < vbl_end;
884 
885 	/*
886 	 * While in vblank, position will be negative
887 	 * counting up towards 0 at vbl_end. And outside
888 	 * vblank, position will be positive counting
889 	 * up since vbl_end.
890 	 */
891 	if (position >= vbl_start)
892 		position -= vbl_end;
893 	else
894 		position += vtotal - vbl_end;
895 
896 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
897 		*vpos = position;
898 		*hpos = 0;
899 	} else {
900 		*vpos = position / htotal;
901 		*hpos = position - (*vpos * htotal);
902 	}
903 
904 	/* In vblank? */
905 	if (in_vbl)
906 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
907 
908 	return ret;
909 }
910 
911 int intel_get_crtc_scanline(struct intel_crtc *crtc)
912 {
913 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
914 	unsigned long irqflags;
915 	int position;
916 
917 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
918 	position = __intel_get_crtc_scanline(crtc);
919 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
920 
921 	return position;
922 }
923 
924 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
925 			      int *max_error,
926 			      struct timeval *vblank_time,
927 			      unsigned flags)
928 {
929 	struct drm_crtc *crtc;
930 
931 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
932 		DRM_ERROR("Invalid crtc %u\n", pipe);
933 		return -EINVAL;
934 	}
935 
936 	/* Get drm_crtc to timestamp: */
937 	crtc = intel_get_crtc_for_pipe(dev, pipe);
938 	if (crtc == NULL) {
939 		DRM_ERROR("Invalid crtc %u\n", pipe);
940 		return -EINVAL;
941 	}
942 
943 	if (!crtc->hwmode.crtc_clock) {
944 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
945 		return -EBUSY;
946 	}
947 
948 	/* Helper routine in DRM core does all the work: */
949 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
950 						     vblank_time, flags,
951 						     &crtc->hwmode);
952 }
953 
954 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
955 {
956 	struct drm_i915_private *dev_priv = dev->dev_private;
957 	u32 busy_up, busy_down, max_avg, min_avg;
958 	u8 new_delay;
959 
960 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
961 
962 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
963 
964 	new_delay = dev_priv->ips.cur_delay;
965 
966 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
967 	busy_up = I915_READ(RCPREVBSYTUPAVG);
968 	busy_down = I915_READ(RCPREVBSYTDNAVG);
969 	max_avg = I915_READ(RCBMAXAVG);
970 	min_avg = I915_READ(RCBMINAVG);
971 
972 	/* Handle RCS change request from hw */
973 	if (busy_up > max_avg) {
974 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
975 			new_delay = dev_priv->ips.cur_delay - 1;
976 		if (new_delay < dev_priv->ips.max_delay)
977 			new_delay = dev_priv->ips.max_delay;
978 	} else if (busy_down < min_avg) {
979 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
980 			new_delay = dev_priv->ips.cur_delay + 1;
981 		if (new_delay > dev_priv->ips.min_delay)
982 			new_delay = dev_priv->ips.min_delay;
983 	}
984 
985 	if (ironlake_set_drps(dev, new_delay))
986 		dev_priv->ips.cur_delay = new_delay;
987 
988 	lockmgr(&mchdev_lock, LK_RELEASE);
989 
990 	return;
991 }
992 
993 static void notify_ring(struct intel_engine_cs *engine)
994 {
995 	if (!intel_engine_initialized(engine))
996 		return;
997 
998 	trace_i915_gem_request_notify(engine);
999 	engine->user_interrupts++;
1000 
1001 	wake_up_all(&engine->irq_queue);
1002 }
1003 
1004 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1005 			struct intel_rps_ei *ei)
1006 {
1007 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1008 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1009 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1010 }
1011 
1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1013 			 const struct intel_rps_ei *old,
1014 			 const struct intel_rps_ei *now,
1015 			 int threshold)
1016 {
1017 	u64 time, c0;
1018 	unsigned int mul = 100;
1019 
1020 	if (old->cz_clock == 0)
1021 		return false;
1022 
1023 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1024 		mul <<= 8;
1025 
1026 	time = now->cz_clock - old->cz_clock;
1027 	time *= threshold * dev_priv->czclk_freq;
1028 
1029 	/* Workload can be split between render + media, e.g. SwapBuffers
1030 	 * being blitted in X after being rendered in mesa. To account for
1031 	 * this we need to combine both engines into our activity counter.
1032 	 */
1033 	c0 = now->render_c0 - old->render_c0;
1034 	c0 += now->media_c0 - old->media_c0;
1035 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1036 
1037 	return c0 >= time;
1038 }
1039 
1040 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1041 {
1042 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1043 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1044 }
1045 
1046 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1047 {
1048 	struct intel_rps_ei now;
1049 	u32 events = 0;
1050 
1051 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1052 		return 0;
1053 
1054 	vlv_c0_read(dev_priv, &now);
1055 	if (now.cz_clock == 0)
1056 		return 0;
1057 
1058 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1059 		if (!vlv_c0_above(dev_priv,
1060 				  &dev_priv->rps.down_ei, &now,
1061 				  dev_priv->rps.down_threshold))
1062 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1063 		dev_priv->rps.down_ei = now;
1064 	}
1065 
1066 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1067 		if (vlv_c0_above(dev_priv,
1068 				 &dev_priv->rps.up_ei, &now,
1069 				 dev_priv->rps.up_threshold))
1070 			events |= GEN6_PM_RP_UP_THRESHOLD;
1071 		dev_priv->rps.up_ei = now;
1072 	}
1073 
1074 	return events;
1075 }
1076 
1077 static bool any_waiters(struct drm_i915_private *dev_priv)
1078 {
1079 	struct intel_engine_cs *engine;
1080 
1081 	for_each_engine(engine, dev_priv)
1082 		if (engine->irq_refcount)
1083 			return true;
1084 
1085 	return false;
1086 }
1087 
1088 static void gen6_pm_rps_work(struct work_struct *work)
1089 {
1090 	struct drm_i915_private *dev_priv =
1091 		container_of(work, struct drm_i915_private, rps.work);
1092 	bool client_boost;
1093 	int new_delay, adj, min, max;
1094 	u32 pm_iir;
1095 
1096 	spin_lock_irq(&dev_priv->irq_lock);
1097 	/* Speed up work cancelation during disabling rps interrupts. */
1098 	if (!dev_priv->rps.interrupts_enabled) {
1099 		spin_unlock_irq(&dev_priv->irq_lock);
1100 		return;
1101 	}
1102 
1103 	/*
1104 	 * The RPS work is synced during runtime suspend, we don't require a
1105 	 * wakeref. TODO: instead of disabling the asserts make sure that we
1106 	 * always hold an RPM reference while the work is running.
1107 	 */
1108 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1109 
1110 	pm_iir = dev_priv->rps.pm_iir;
1111 	dev_priv->rps.pm_iir = 0;
1112 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1113 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1114 	client_boost = dev_priv->rps.client_boost;
1115 	dev_priv->rps.client_boost = false;
1116 	spin_unlock_irq(&dev_priv->irq_lock);
1117 
1118 	/* Make sure we didn't queue anything we're not going to process. */
1119 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1120 
1121 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1122 		goto out;
1123 
1124 	mutex_lock(&dev_priv->rps.hw_lock);
1125 
1126 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1127 
1128 	adj = dev_priv->rps.last_adj;
1129 	new_delay = dev_priv->rps.cur_freq;
1130 	min = dev_priv->rps.min_freq_softlimit;
1131 	max = dev_priv->rps.max_freq_softlimit;
1132 
1133 	if (client_boost) {
1134 		new_delay = dev_priv->rps.max_freq_softlimit;
1135 		adj = 0;
1136 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1137 		if (adj > 0)
1138 			adj *= 2;
1139 		else /* CHV needs even encode values */
1140 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1141 		/*
1142 		 * For better performance, jump directly
1143 		 * to RPe if we're below it.
1144 		 */
1145 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1146 			new_delay = dev_priv->rps.efficient_freq;
1147 			adj = 0;
1148 		}
1149 	} else if (any_waiters(dev_priv)) {
1150 		adj = 0;
1151 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1152 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1153 			new_delay = dev_priv->rps.efficient_freq;
1154 		else
1155 			new_delay = dev_priv->rps.min_freq_softlimit;
1156 		adj = 0;
1157 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1158 		if (adj < 0)
1159 			adj *= 2;
1160 		else /* CHV needs even encode values */
1161 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1162 	} else { /* unknown event */
1163 		adj = 0;
1164 	}
1165 
1166 	dev_priv->rps.last_adj = adj;
1167 
1168 	/* sysfs frequency interfaces may have snuck in while servicing the
1169 	 * interrupt
1170 	 */
1171 	new_delay += adj;
1172 	new_delay = clamp_t(int, new_delay, min, max);
1173 
1174 	intel_set_rps(dev_priv->dev, new_delay);
1175 
1176 	mutex_unlock(&dev_priv->rps.hw_lock);
1177 out:
1178 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1179 }
1180 
1181 
1182 /**
1183  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1184  * occurred.
1185  * @work: workqueue struct
1186  *
1187  * Doesn't actually do anything except notify userspace. As a consequence of
1188  * this event, userspace should try to remap the bad rows since statistically
1189  * it is likely the same row is more likely to go bad again.
1190  */
1191 static void ivybridge_parity_work(struct work_struct *work)
1192 {
1193 	struct drm_i915_private *dev_priv =
1194 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1195 	u32 error_status, row, bank, subbank;
1196 	char *parity_event[6];
1197 	uint32_t misccpctl;
1198 	uint8_t slice = 0;
1199 
1200 	/* We must turn off DOP level clock gating to access the L3 registers.
1201 	 * In order to prevent a get/put style interface, acquire struct mutex
1202 	 * any time we access those registers.
1203 	 */
1204 	mutex_lock(&dev_priv->dev->struct_mutex);
1205 
1206 	/* If we've screwed up tracking, just let the interrupt fire again */
1207 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1208 		goto out;
1209 
1210 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1211 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1212 	POSTING_READ(GEN7_MISCCPCTL);
1213 
1214 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1215 		i915_reg_t reg;
1216 
1217 		slice--;
1218 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1219 			break;
1220 
1221 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1222 
1223 		reg = GEN7_L3CDERRST1(slice);
1224 
1225 		error_status = I915_READ(reg);
1226 		row = GEN7_PARITY_ERROR_ROW(error_status);
1227 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1228 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1229 
1230 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1231 		POSTING_READ(reg);
1232 
1233 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1234 		parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1235 		parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1236 		parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1237 		parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1238 		parity_event[5] = NULL;
1239 
1240 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1241 				   KOBJ_CHANGE, parity_event);
1242 
1243 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1244 			  slice, row, bank, subbank);
1245 
1246 		kfree(parity_event[4]);
1247 		kfree(parity_event[3]);
1248 		kfree(parity_event[2]);
1249 		kfree(parity_event[1]);
1250 	}
1251 
1252 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1253 
1254 out:
1255 	WARN_ON(dev_priv->l3_parity.which_slice);
1256 	spin_lock_irq(&dev_priv->irq_lock);
1257 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1258 	spin_unlock_irq(&dev_priv->irq_lock);
1259 
1260 	mutex_unlock(&dev_priv->dev->struct_mutex);
1261 }
1262 
1263 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1264 					       u32 iir)
1265 {
1266 	if (!HAS_L3_DPF(dev_priv))
1267 		return;
1268 
1269 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1270 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1271 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1272 
1273 	iir &= GT_PARITY_ERROR(dev_priv);
1274 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1275 		dev_priv->l3_parity.which_slice |= 1 << 1;
1276 
1277 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1278 		dev_priv->l3_parity.which_slice |= 1 << 0;
1279 
1280 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1281 }
1282 
1283 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1284 			       u32 gt_iir)
1285 {
1286 	if (gt_iir &
1287 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1288 		notify_ring(&dev_priv->engine[RCS]);
1289 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1290 		notify_ring(&dev_priv->engine[VCS]);
1291 }
1292 
1293 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1294 			       u32 gt_iir)
1295 {
1296 
1297 	if (gt_iir &
1298 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1299 		notify_ring(&dev_priv->engine[RCS]);
1300 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1301 		notify_ring(&dev_priv->engine[VCS]);
1302 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1303 		notify_ring(&dev_priv->engine[BCS]);
1304 
1305 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1306 		      GT_BSD_CS_ERROR_INTERRUPT |
1307 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1308 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1309 
1310 	if (gt_iir & GT_PARITY_ERROR(dev_priv))
1311 		ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1312 }
1313 
1314 static __always_inline void
1315 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1316 {
1317 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1318 		notify_ring(engine);
1319 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1320 		tasklet_schedule(&engine->irq_tasklet);
1321 }
1322 
1323 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1324 				   u32 master_ctl,
1325 				   u32 gt_iir[4])
1326 {
1327 
1328 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1329 		gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1330 		if (gt_iir[0]) {
1331 			I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1332 		} else
1333 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1334 	}
1335 
1336 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1337 		gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1338 		if (gt_iir[1]) {
1339 			I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1340 		} else
1341 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1342 	}
1343 
1344 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1345 		gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1346 		if (gt_iir[3]) {
1347 			I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1348 		} else
1349 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1350 	}
1351 
1352 	if (master_ctl & GEN8_GT_PM_IRQ) {
1353 		gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1354 		if (gt_iir[2] & dev_priv->pm_rps_events) {
1355 			I915_WRITE_FW(GEN8_GT_IIR(2),
1356 				      gt_iir[2] & dev_priv->pm_rps_events);
1357 		} else
1358 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1359 	}
1360 
1361 }
1362 
1363 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1364 				u32 gt_iir[4])
1365 {
1366 	if (gt_iir[0]) {
1367 		gen8_cs_irq_handler(&dev_priv->engine[RCS],
1368 				    gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1369 		gen8_cs_irq_handler(&dev_priv->engine[BCS],
1370 				    gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1371 	}
1372 
1373 	if (gt_iir[1]) {
1374 		gen8_cs_irq_handler(&dev_priv->engine[VCS],
1375 				    gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1376 		gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1377 				    gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1378 	}
1379 
1380 	if (gt_iir[3])
1381 		gen8_cs_irq_handler(&dev_priv->engine[VECS],
1382 				    gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1383 
1384 	if (gt_iir[2] & dev_priv->pm_rps_events)
1385 		gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1386 }
1387 
1388 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1389 {
1390 	switch (port) {
1391 	case PORT_A:
1392 		return val & PORTA_HOTPLUG_LONG_DETECT;
1393 	case PORT_B:
1394 		return val & PORTB_HOTPLUG_LONG_DETECT;
1395 	case PORT_C:
1396 		return val & PORTC_HOTPLUG_LONG_DETECT;
1397 	default:
1398 		return false;
1399 	}
1400 }
1401 
1402 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1403 {
1404 	switch (port) {
1405 	case PORT_E:
1406 		return val & PORTE_HOTPLUG_LONG_DETECT;
1407 	default:
1408 		return false;
1409 	}
1410 }
1411 
1412 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1413 {
1414 	switch (port) {
1415 	case PORT_A:
1416 		return val & PORTA_HOTPLUG_LONG_DETECT;
1417 	case PORT_B:
1418 		return val & PORTB_HOTPLUG_LONG_DETECT;
1419 	case PORT_C:
1420 		return val & PORTC_HOTPLUG_LONG_DETECT;
1421 	case PORT_D:
1422 		return val & PORTD_HOTPLUG_LONG_DETECT;
1423 	default:
1424 		return false;
1425 	}
1426 }
1427 
1428 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1429 {
1430 	switch (port) {
1431 	case PORT_A:
1432 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1433 	default:
1434 		return false;
1435 	}
1436 }
1437 
1438 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1439 {
1440 	switch (port) {
1441 	case PORT_B:
1442 		return val & PORTB_HOTPLUG_LONG_DETECT;
1443 	case PORT_C:
1444 		return val & PORTC_HOTPLUG_LONG_DETECT;
1445 	case PORT_D:
1446 		return val & PORTD_HOTPLUG_LONG_DETECT;
1447 	default:
1448 		return false;
1449 	}
1450 }
1451 
1452 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1453 {
1454 	switch (port) {
1455 	case PORT_B:
1456 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1457 	case PORT_C:
1458 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1459 	case PORT_D:
1460 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1461 	default:
1462 		return false;
1463 	}
1464 }
1465 
1466 /*
1467  * Get a bit mask of pins that have triggered, and which ones may be long.
1468  * This can be called multiple times with the same masks to accumulate
1469  * hotplug detection results from several registers.
1470  *
1471  * Note that the caller is expected to zero out the masks initially.
1472  */
1473 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1474 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1475 			     const u32 hpd[HPD_NUM_PINS],
1476 			     bool long_pulse_detect(enum port port, u32 val))
1477 {
1478 	enum port port;
1479 	int i;
1480 
1481 	for_each_hpd_pin(i) {
1482 		if ((hpd[i] & hotplug_trigger) == 0)
1483 			continue;
1484 
1485 		*pin_mask |= BIT(i);
1486 
1487 		if (!intel_hpd_pin_to_port(i, &port))
1488 			continue;
1489 
1490 		if (long_pulse_detect(port, dig_hotplug_reg))
1491 			*long_mask |= BIT(i);
1492 	}
1493 
1494 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1495 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1496 
1497 }
1498 
1499 static void gmbus_irq_handler(struct drm_device *dev)
1500 {
1501 	struct drm_i915_private *dev_priv = dev->dev_private;
1502 
1503 	wake_up_all(&dev_priv->gmbus_wait_queue);
1504 }
1505 
1506 static void dp_aux_irq_handler(struct drm_device *dev)
1507 {
1508 	struct drm_i915_private *dev_priv = dev->dev_private;
1509 
1510 	wake_up_all(&dev_priv->gmbus_wait_queue);
1511 }
1512 
1513 #if defined(CONFIG_DEBUG_FS)
1514 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1515 					 uint32_t crc0, uint32_t crc1,
1516 					 uint32_t crc2, uint32_t crc3,
1517 					 uint32_t crc4)
1518 {
1519 	struct drm_i915_private *dev_priv = dev->dev_private;
1520 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1521 	struct intel_pipe_crc_entry *entry;
1522 	int head, tail;
1523 
1524 	spin_lock(&pipe_crc->lock);
1525 
1526 	if (!pipe_crc->entries) {
1527 		spin_unlock(&pipe_crc->lock);
1528 		DRM_DEBUG_KMS("spurious interrupt\n");
1529 		return;
1530 	}
1531 
1532 	head = pipe_crc->head;
1533 	tail = pipe_crc->tail;
1534 
1535 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1536 		spin_unlock(&pipe_crc->lock);
1537 		DRM_ERROR("CRC buffer overflowing\n");
1538 		return;
1539 	}
1540 
1541 	entry = &pipe_crc->entries[head];
1542 
1543 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1544 	entry->crc[0] = crc0;
1545 	entry->crc[1] = crc1;
1546 	entry->crc[2] = crc2;
1547 	entry->crc[3] = crc3;
1548 	entry->crc[4] = crc4;
1549 
1550 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1551 	pipe_crc->head = head;
1552 
1553 	spin_unlock(&pipe_crc->lock);
1554 
1555 	wake_up_interruptible(&pipe_crc->wq);
1556 }
1557 #else
1558 static inline void
1559 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1560 			     uint32_t crc0, uint32_t crc1,
1561 			     uint32_t crc2, uint32_t crc3,
1562 			     uint32_t crc4) {}
1563 #endif
1564 
1565 
1566 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1567 {
1568 	struct drm_i915_private *dev_priv = dev->dev_private;
1569 
1570 	display_pipe_crc_irq_handler(dev, pipe,
1571 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1572 				     0, 0, 0, 0);
1573 }
1574 
1575 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1576 {
1577 	struct drm_i915_private *dev_priv = dev->dev_private;
1578 
1579 	display_pipe_crc_irq_handler(dev, pipe,
1580 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1581 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1582 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1583 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1584 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1585 }
1586 
1587 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1588 {
1589 	struct drm_i915_private *dev_priv = dev->dev_private;
1590 	uint32_t res1, res2;
1591 
1592 	if (INTEL_INFO(dev)->gen >= 3)
1593 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1594 	else
1595 		res1 = 0;
1596 
1597 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1598 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1599 	else
1600 		res2 = 0;
1601 
1602 	display_pipe_crc_irq_handler(dev, pipe,
1603 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1604 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1605 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1606 				     res1, res2);
1607 }
1608 
1609 /* The RPS events need forcewake, so we add them to a work queue and mask their
1610  * IMR bits until the work is done. Other interrupts can be processed without
1611  * the work queue. */
1612 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1613 {
1614 	if (pm_iir & dev_priv->pm_rps_events) {
1615 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1616 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1617 		if (dev_priv->rps.interrupts_enabled) {
1618 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1619 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1620 		}
1621 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1622 	}
1623 
1624 	if (INTEL_INFO(dev_priv)->gen >= 8)
1625 		return;
1626 
1627 	if (HAS_VEBOX(dev_priv)) {
1628 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1629 			notify_ring(&dev_priv->engine[VECS]);
1630 
1631 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1632 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1633 	}
1634 }
1635 
1636 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1637 {
1638 	if (!drm_handle_vblank(dev, pipe))
1639 		return false;
1640 
1641 	return true;
1642 }
1643 
1644 static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1645 					u32 pipe_stats[I915_MAX_PIPES])
1646 {
1647 	struct drm_i915_private *dev_priv = dev->dev_private;
1648 	int pipe;
1649 
1650 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1651 
1652 	if (!dev_priv->display_irqs_enabled) {
1653 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1654 		return;
1655 	}
1656 
1657 	for_each_pipe(dev_priv, pipe) {
1658 		i915_reg_t reg;
1659 		u32 mask, iir_bit = 0;
1660 
1661 		/*
1662 		 * PIPESTAT bits get signalled even when the interrupt is
1663 		 * disabled with the mask bits, and some of the status bits do
1664 		 * not generate interrupts at all (like the underrun bit). Hence
1665 		 * we need to be careful that we only handle what we want to
1666 		 * handle.
1667 		 */
1668 
1669 		/* fifo underruns are filterered in the underrun handler. */
1670 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1671 
1672 		switch (pipe) {
1673 		case PIPE_A:
1674 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1675 			break;
1676 		case PIPE_B:
1677 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1678 			break;
1679 		case PIPE_C:
1680 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1681 			break;
1682 		}
1683 		if (iir & iir_bit)
1684 			mask |= dev_priv->pipestat_irq_mask[pipe];
1685 
1686 		if (!mask)
1687 			continue;
1688 
1689 		reg = PIPESTAT(pipe);
1690 		mask |= PIPESTAT_INT_ENABLE_MASK;
1691 		pipe_stats[pipe] = I915_READ(reg) & mask;
1692 
1693 		/*
1694 		 * Clear the PIPE*STAT regs before the IIR
1695 		 */
1696 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1697 					PIPESTAT_INT_STATUS_MASK))
1698 			I915_WRITE(reg, pipe_stats[pipe]);
1699 	}
1700 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1701 }
1702 
1703 static void valleyview_pipestat_irq_handler(struct drm_device *dev,
1704 					    u32 pipe_stats[I915_MAX_PIPES])
1705 {
1706 	struct drm_i915_private *dev_priv = to_i915(dev);
1707 	enum i915_pipe pipe;
1708 
1709 	for_each_pipe(dev_priv, pipe) {
1710 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1711 		    intel_pipe_handle_vblank(dev, pipe))
1712 			intel_check_page_flip(dev, pipe);
1713 
1714 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1715 			intel_prepare_page_flip(dev, pipe);
1716 			intel_finish_page_flip(dev, pipe);
1717 		}
1718 
1719 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1720 			i9xx_pipe_crc_irq_handler(dev, pipe);
1721 
1722 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1723 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1724 	}
1725 
1726 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1727 		gmbus_irq_handler(dev);
1728 }
1729 
1730 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1731 {
1732 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1733 
1734 	if (hotplug_status)
1735 		I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1736 
1737 	return hotplug_status;
1738 }
1739 
1740 static void i9xx_hpd_irq_handler(struct drm_device *dev,
1741 				 u32 hotplug_status)
1742 {
1743 	u32 pin_mask = 0, long_mask = 0;
1744 
1745 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1746 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1747 
1748 		if (hotplug_trigger) {
1749 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1750 					   hotplug_trigger, hpd_status_g4x,
1751 					   i9xx_port_hotplug_long_detect);
1752 
1753 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1754 		}
1755 
1756 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1757 			dp_aux_irq_handler(dev);
1758 	} else {
1759 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1760 
1761 		if (hotplug_trigger) {
1762 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1763 					   hotplug_trigger, hpd_status_i915,
1764 					   i9xx_port_hotplug_long_detect);
1765 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1766 		}
1767 	}
1768 }
1769 
1770 static irqreturn_t valleyview_irq_handler(void *arg)
1771 {
1772 	struct drm_device *dev = arg;
1773 	struct drm_i915_private *dev_priv = dev->dev_private;
1774 
1775 	if (!intel_irqs_enabled(dev_priv))
1776 		return IRQ_NONE;
1777 
1778 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1779 	disable_rpm_wakeref_asserts(dev_priv);
1780 
1781 	do {
1782 		u32 iir, gt_iir, pm_iir;
1783 		u32 pipe_stats[I915_MAX_PIPES] = {};
1784 		u32 hotplug_status = 0;
1785 		u32 ier = 0;
1786 
1787 		gt_iir = I915_READ(GTIIR);
1788 		pm_iir = I915_READ(GEN6_PMIIR);
1789 		iir = I915_READ(VLV_IIR);
1790 
1791 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1792 			break;
1793 
1794 
1795 		/*
1796 		 * Theory on interrupt generation, based on empirical evidence:
1797 		 *
1798 		 * x = ((VLV_IIR & VLV_IER) ||
1799 		 *      (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1800 		 *       (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1801 		 *
1802 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1803 		 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1804 		 * guarantee the CPU interrupt will be raised again even if we
1805 		 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1806 		 * bits this time around.
1807 		 */
1808 		I915_WRITE(VLV_MASTER_IER, 0);
1809 		ier = I915_READ(VLV_IER);
1810 		I915_WRITE(VLV_IER, 0);
1811 
1812 		if (gt_iir)
1813 			I915_WRITE(GTIIR, gt_iir);
1814 		if (pm_iir)
1815 			I915_WRITE(GEN6_PMIIR, pm_iir);
1816 
1817 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1818 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1819 
1820 		/* Call regardless, as some status bits might not be
1821 		 * signalled in iir */
1822 		valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
1823 
1824 		/*
1825 		 * VLV_IIR is single buffered, and reflects the level
1826 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1827 		 */
1828 		if (iir)
1829 			I915_WRITE(VLV_IIR, iir);
1830 
1831 		I915_WRITE(VLV_IER, ier);
1832 		I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1833 		POSTING_READ(VLV_MASTER_IER);
1834 
1835 		if (gt_iir)
1836 			snb_gt_irq_handler(dev_priv, gt_iir);
1837 		if (pm_iir)
1838 			gen6_rps_irq_handler(dev_priv, pm_iir);
1839 
1840 		if (hotplug_status)
1841 			i9xx_hpd_irq_handler(dev, hotplug_status);
1842 
1843 		valleyview_pipestat_irq_handler(dev, pipe_stats);
1844 	} while (0);
1845 
1846 	enable_rpm_wakeref_asserts(dev_priv);
1847 
1848 }
1849 
1850 static irqreturn_t cherryview_irq_handler(void *arg)
1851 {
1852 	struct drm_device *dev = arg;
1853 	struct drm_i915_private *dev_priv = dev->dev_private;
1854 
1855 	if (!intel_irqs_enabled(dev_priv))
1856 		return IRQ_NONE;
1857 
1858 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1859 	disable_rpm_wakeref_asserts(dev_priv);
1860 
1861 	do {
1862 		u32 master_ctl, iir;
1863 		u32 gt_iir[4] = {};
1864 		u32 pipe_stats[I915_MAX_PIPES] = {};
1865 		u32 hotplug_status = 0;
1866 		u32 ier = 0;
1867 
1868 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1869 		iir = I915_READ(VLV_IIR);
1870 
1871 		if (master_ctl == 0 && iir == 0)
1872 			break;
1873 
1874 
1875 		/*
1876 		 * Theory on interrupt generation, based on empirical evidence:
1877 		 *
1878 		 * x = ((VLV_IIR & VLV_IER) ||
1879 		 *      ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1880 		 *       (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1881 		 *
1882 		 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1883 		 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1884 		 * guarantee the CPU interrupt will be raised again even if we
1885 		 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1886 		 * bits this time around.
1887 		 */
1888 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1889 		ier = I915_READ(VLV_IER);
1890 		I915_WRITE(VLV_IER, 0);
1891 
1892 		gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1893 
1894 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
1895 			hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1896 
1897 		/* Call regardless, as some status bits might not be
1898 		 * signalled in iir */
1899 		valleyview_pipestat_irq_ack(dev, iir, pipe_stats);
1900 
1901 		/*
1902 		 * VLV_IIR is single buffered, and reflects the level
1903 		 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1904 		 */
1905 		if (iir)
1906 			I915_WRITE(VLV_IIR, iir);
1907 
1908 		I915_WRITE(VLV_IER, ier);
1909 		I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1910 		POSTING_READ(GEN8_MASTER_IRQ);
1911 
1912 		gen8_gt_irq_handler(dev_priv, gt_iir);
1913 
1914 		if (hotplug_status)
1915 			i9xx_hpd_irq_handler(dev, hotplug_status);
1916 
1917 		valleyview_pipestat_irq_handler(dev, pipe_stats);
1918 	} while (0);
1919 
1920 	enable_rpm_wakeref_asserts(dev_priv);
1921 
1922 }
1923 
1924 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1925 				const u32 hpd[HPD_NUM_PINS])
1926 {
1927 	struct drm_i915_private *dev_priv = to_i915(dev);
1928 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1929 
1930 	/*
1931 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1932 	 * unless we touch the hotplug register, even if hotplug_trigger is
1933 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1934 	 * errors.
1935 	 */
1936 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1937 	if (!hotplug_trigger) {
1938 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1939 			PORTD_HOTPLUG_STATUS_MASK |
1940 			PORTC_HOTPLUG_STATUS_MASK |
1941 			PORTB_HOTPLUG_STATUS_MASK;
1942 		dig_hotplug_reg &= ~mask;
1943 	}
1944 
1945 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1946 	if (!hotplug_trigger)
1947 		return;
1948 
1949 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1950 			   dig_hotplug_reg, hpd,
1951 			   pch_port_hotplug_long_detect);
1952 
1953 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
1954 }
1955 
1956 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1957 {
1958 	struct drm_i915_private *dev_priv = dev->dev_private;
1959 	int pipe;
1960 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1961 
1962 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1963 
1964 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1965 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1966 			       SDE_AUDIO_POWER_SHIFT);
1967 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1968 				 port_name(port));
1969 	}
1970 
1971 	if (pch_iir & SDE_AUX_MASK)
1972 		dp_aux_irq_handler(dev);
1973 
1974 	if (pch_iir & SDE_GMBUS)
1975 		gmbus_irq_handler(dev);
1976 
1977 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1978 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1979 
1980 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1981 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1982 
1983 	if (pch_iir & SDE_POISON)
1984 		DRM_ERROR("PCH poison interrupt\n");
1985 
1986 	if (pch_iir & SDE_FDI_MASK)
1987 		for_each_pipe(dev_priv, pipe)
1988 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1989 					 pipe_name(pipe),
1990 					 I915_READ(FDI_RX_IIR(pipe)));
1991 
1992 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1993 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1994 
1995 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1996 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1997 
1998 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1999 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2000 
2001 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2002 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2003 }
2004 
2005 static void ivb_err_int_handler(struct drm_device *dev)
2006 {
2007 	struct drm_i915_private *dev_priv = dev->dev_private;
2008 	u32 err_int = I915_READ(GEN7_ERR_INT);
2009 	enum i915_pipe pipe;
2010 
2011 	if (err_int & ERR_INT_POISON)
2012 		DRM_ERROR("Poison interrupt\n");
2013 
2014 	for_each_pipe(dev_priv, pipe) {
2015 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2016 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2017 
2018 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2019 			if (IS_IVYBRIDGE(dev))
2020 				ivb_pipe_crc_irq_handler(dev, pipe);
2021 			else
2022 				hsw_pipe_crc_irq_handler(dev, pipe);
2023 		}
2024 	}
2025 
2026 	I915_WRITE(GEN7_ERR_INT, err_int);
2027 }
2028 
2029 static void cpt_serr_int_handler(struct drm_device *dev)
2030 {
2031 	struct drm_i915_private *dev_priv = dev->dev_private;
2032 	u32 serr_int = I915_READ(SERR_INT);
2033 
2034 	if (serr_int & SERR_INT_POISON)
2035 		DRM_ERROR("PCH poison interrupt\n");
2036 
2037 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2038 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2039 
2040 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2041 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2042 
2043 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2044 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2045 
2046 	I915_WRITE(SERR_INT, serr_int);
2047 }
2048 
2049 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2050 {
2051 	struct drm_i915_private *dev_priv = dev->dev_private;
2052 	int pipe;
2053 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2054 
2055 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
2056 
2057 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2058 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2059 			       SDE_AUDIO_POWER_SHIFT_CPT);
2060 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2061 				 port_name(port));
2062 	}
2063 
2064 	if (pch_iir & SDE_AUX_MASK_CPT)
2065 		dp_aux_irq_handler(dev);
2066 
2067 	if (pch_iir & SDE_GMBUS_CPT)
2068 		gmbus_irq_handler(dev);
2069 
2070 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2071 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2072 
2073 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2074 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2075 
2076 	if (pch_iir & SDE_FDI_MASK_CPT)
2077 		for_each_pipe(dev_priv, pipe)
2078 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2079 					 pipe_name(pipe),
2080 					 I915_READ(FDI_RX_IIR(pipe)));
2081 
2082 	if (pch_iir & SDE_ERROR_CPT)
2083 		cpt_serr_int_handler(dev);
2084 }
2085 
2086 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2087 {
2088 	struct drm_i915_private *dev_priv = dev->dev_private;
2089 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2090 		~SDE_PORTE_HOTPLUG_SPT;
2091 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2092 	u32 pin_mask = 0, long_mask = 0;
2093 
2094 	if (hotplug_trigger) {
2095 		u32 dig_hotplug_reg;
2096 
2097 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2098 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2099 
2100 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2101 				   dig_hotplug_reg, hpd_spt,
2102 				   spt_port_hotplug_long_detect);
2103 	}
2104 
2105 	if (hotplug2_trigger) {
2106 		u32 dig_hotplug_reg;
2107 
2108 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2109 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2110 
2111 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2112 				   dig_hotplug_reg, hpd_spt,
2113 				   spt_port_hotplug2_long_detect);
2114 	}
2115 
2116 	if (pin_mask)
2117 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
2118 
2119 	if (pch_iir & SDE_GMBUS_CPT)
2120 		gmbus_irq_handler(dev);
2121 }
2122 
2123 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2124 				const u32 hpd[HPD_NUM_PINS])
2125 {
2126 	struct drm_i915_private *dev_priv = to_i915(dev);
2127 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2128 
2129 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2130 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2131 
2132 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2133 			   dig_hotplug_reg, hpd,
2134 			   ilk_port_hotplug_long_detect);
2135 
2136 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2137 }
2138 
2139 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2140 {
2141 	struct drm_i915_private *dev_priv = dev->dev_private;
2142 	enum i915_pipe pipe;
2143 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2144 
2145 	if (hotplug_trigger)
2146 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2147 
2148 	if (de_iir & DE_AUX_CHANNEL_A)
2149 		dp_aux_irq_handler(dev);
2150 
2151 	if (de_iir & DE_GSE)
2152 		intel_opregion_asle_intr(dev);
2153 
2154 	if (de_iir & DE_POISON)
2155 		DRM_ERROR("Poison interrupt\n");
2156 
2157 	for_each_pipe(dev_priv, pipe) {
2158 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2159 		    intel_pipe_handle_vblank(dev, pipe))
2160 			intel_check_page_flip(dev, pipe);
2161 
2162 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2163 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2164 
2165 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2166 			i9xx_pipe_crc_irq_handler(dev, pipe);
2167 
2168 		/* plane/pipes map 1:1 on ilk+ */
2169 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2170 			intel_prepare_page_flip(dev, pipe);
2171 			intel_finish_page_flip_plane(dev, pipe);
2172 		}
2173 	}
2174 
2175 	/* check event from PCH */
2176 	if (de_iir & DE_PCH_EVENT) {
2177 		u32 pch_iir = I915_READ(SDEIIR);
2178 
2179 		if (HAS_PCH_CPT(dev))
2180 			cpt_irq_handler(dev, pch_iir);
2181 		else
2182 			ibx_irq_handler(dev, pch_iir);
2183 
2184 		/* should clear PCH hotplug event before clear CPU irq */
2185 		I915_WRITE(SDEIIR, pch_iir);
2186 	}
2187 
2188 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2189 		ironlake_rps_change_irq_handler(dev);
2190 }
2191 
2192 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2193 {
2194 	struct drm_i915_private *dev_priv = dev->dev_private;
2195 	enum i915_pipe pipe;
2196 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2197 
2198 	if (hotplug_trigger)
2199 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2200 
2201 	if (de_iir & DE_ERR_INT_IVB)
2202 		ivb_err_int_handler(dev);
2203 
2204 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2205 		dp_aux_irq_handler(dev);
2206 
2207 	if (de_iir & DE_GSE_IVB)
2208 		intel_opregion_asle_intr(dev);
2209 
2210 	for_each_pipe(dev_priv, pipe) {
2211 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2212 		    intel_pipe_handle_vblank(dev, pipe))
2213 			intel_check_page_flip(dev, pipe);
2214 
2215 		/* plane/pipes map 1:1 on ilk+ */
2216 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2217 			intel_prepare_page_flip(dev, pipe);
2218 			intel_finish_page_flip_plane(dev, pipe);
2219 		}
2220 	}
2221 
2222 	/* check event from PCH */
2223 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2224 		u32 pch_iir = I915_READ(SDEIIR);
2225 
2226 		cpt_irq_handler(dev, pch_iir);
2227 
2228 		/* clear PCH hotplug event before clear CPU irq */
2229 		I915_WRITE(SDEIIR, pch_iir);
2230 	}
2231 }
2232 
2233 /*
2234  * To handle irqs with the minimum potential races with fresh interrupts, we:
2235  * 1 - Disable Master Interrupt Control.
2236  * 2 - Find the source(s) of the interrupt.
2237  * 3 - Clear the Interrupt Identity bits (IIR).
2238  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2239  * 5 - Re-enable Master Interrupt Control.
2240  */
2241 static irqreturn_t ironlake_irq_handler(void *arg)
2242 {
2243 	struct drm_device *dev = arg;
2244 	struct drm_i915_private *dev_priv = dev->dev_private;
2245 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2246 
2247 	if (!intel_irqs_enabled(dev_priv))
2248 		return IRQ_NONE;
2249 
2250 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2251 	disable_rpm_wakeref_asserts(dev_priv);
2252 
2253 	/* disable master interrupt before clearing iir  */
2254 	de_ier = I915_READ(DEIER);
2255 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2256 	POSTING_READ(DEIER);
2257 
2258 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2259 	 * interrupts will will be stored on its back queue, and then we'll be
2260 	 * able to process them after we restore SDEIER (as soon as we restore
2261 	 * it, we'll get an interrupt if SDEIIR still has something to process
2262 	 * due to its back queue). */
2263 	if (!HAS_PCH_NOP(dev)) {
2264 		sde_ier = I915_READ(SDEIER);
2265 		I915_WRITE(SDEIER, 0);
2266 		POSTING_READ(SDEIER);
2267 	}
2268 
2269 	/* Find, clear, then process each source of interrupt */
2270 
2271 	gt_iir = I915_READ(GTIIR);
2272 	if (gt_iir) {
2273 		I915_WRITE(GTIIR, gt_iir);
2274 		if (INTEL_INFO(dev)->gen >= 6)
2275 			snb_gt_irq_handler(dev_priv, gt_iir);
2276 		else
2277 			ilk_gt_irq_handler(dev_priv, gt_iir);
2278 	}
2279 
2280 	de_iir = I915_READ(DEIIR);
2281 	if (de_iir) {
2282 		I915_WRITE(DEIIR, de_iir);
2283 		if (INTEL_INFO(dev)->gen >= 7)
2284 			ivb_display_irq_handler(dev, de_iir);
2285 		else
2286 			ilk_display_irq_handler(dev, de_iir);
2287 	}
2288 
2289 	if (INTEL_INFO(dev)->gen >= 6) {
2290 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2291 		if (pm_iir) {
2292 			I915_WRITE(GEN6_PMIIR, pm_iir);
2293 			gen6_rps_irq_handler(dev_priv, pm_iir);
2294 		}
2295 	}
2296 
2297 	I915_WRITE(DEIER, de_ier);
2298 	POSTING_READ(DEIER);
2299 	if (!HAS_PCH_NOP(dev)) {
2300 		I915_WRITE(SDEIER, sde_ier);
2301 		POSTING_READ(SDEIER);
2302 	}
2303 
2304 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2305 	enable_rpm_wakeref_asserts(dev_priv);
2306 
2307 }
2308 
2309 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2310 				const u32 hpd[HPD_NUM_PINS])
2311 {
2312 	struct drm_i915_private *dev_priv = to_i915(dev);
2313 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2314 
2315 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2316 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2317 
2318 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2319 			   dig_hotplug_reg, hpd,
2320 			   bxt_port_hotplug_long_detect);
2321 
2322 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2323 }
2324 
2325 static irqreturn_t
2326 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2327 {
2328 	struct drm_device *dev = dev_priv->dev;
2329 	u32 iir;
2330 	enum i915_pipe pipe;
2331 
2332 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2333 		iir = I915_READ(GEN8_DE_MISC_IIR);
2334 		if (iir) {
2335 			I915_WRITE(GEN8_DE_MISC_IIR, iir);
2336 			if (iir & GEN8_DE_MISC_GSE)
2337 				intel_opregion_asle_intr(dev);
2338 			else
2339 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2340 		}
2341 		else
2342 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2343 	}
2344 
2345 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2346 		iir = I915_READ(GEN8_DE_PORT_IIR);
2347 		if (iir) {
2348 			u32 tmp_mask;
2349 			bool found = false;
2350 
2351 			I915_WRITE(GEN8_DE_PORT_IIR, iir);
2352 
2353 			tmp_mask = GEN8_AUX_CHANNEL_A;
2354 			if (INTEL_INFO(dev_priv)->gen >= 9)
2355 				tmp_mask |= GEN9_AUX_CHANNEL_B |
2356 					    GEN9_AUX_CHANNEL_C |
2357 					    GEN9_AUX_CHANNEL_D;
2358 
2359 			if (iir & tmp_mask) {
2360 				dp_aux_irq_handler(dev);
2361 				found = true;
2362 			}
2363 
2364 			if (IS_BROXTON(dev_priv)) {
2365 				tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2366 				if (tmp_mask) {
2367 					bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt);
2368 					found = true;
2369 				}
2370 			} else if (IS_BROADWELL(dev_priv)) {
2371 				tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2372 				if (tmp_mask) {
2373 					ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw);
2374 					found = true;
2375 				}
2376 			}
2377 
2378 			if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) {
2379 				gmbus_irq_handler(dev);
2380 				found = true;
2381 			}
2382 
2383 			if (!found)
2384 				DRM_ERROR("Unexpected DE Port interrupt\n");
2385 		}
2386 		else
2387 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2388 	}
2389 
2390 	for_each_pipe(dev_priv, pipe) {
2391 		u32 flip_done, fault_errors;
2392 
2393 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2394 			continue;
2395 
2396 		iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2397 		if (!iir) {
2398 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2399 			continue;
2400 		}
2401 
2402 		I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2403 
2404 		if (iir & GEN8_PIPE_VBLANK &&
2405 		    intel_pipe_handle_vblank(dev, pipe))
2406 			intel_check_page_flip(dev, pipe);
2407 
2408 		flip_done = iir;
2409 		if (INTEL_INFO(dev_priv)->gen >= 9)
2410 			flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2411 		else
2412 			flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2413 
2414 		if (flip_done) {
2415 			intel_prepare_page_flip(dev, pipe);
2416 			intel_finish_page_flip_plane(dev, pipe);
2417 		}
2418 
2419 		if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2420 			hsw_pipe_crc_irq_handler(dev, pipe);
2421 
2422 		if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2423 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2424 
2425 		fault_errors = iir;
2426 		if (INTEL_INFO(dev_priv)->gen >= 9)
2427 			fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2428 		else
2429 			fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2430 
2431 		if (fault_errors)
2432 			DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2433 				  pipe_name(pipe),
2434 				  fault_errors);
2435 	}
2436 
2437 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2438 	    master_ctl & GEN8_DE_PCH_IRQ) {
2439 		/*
2440 		 * FIXME(BDW): Assume for now that the new interrupt handling
2441 		 * scheme also closed the SDE interrupt handling race we've seen
2442 		 * on older pch-split platforms. But this needs testing.
2443 		 */
2444 		iir = I915_READ(SDEIIR);
2445 		if (iir) {
2446 			I915_WRITE(SDEIIR, iir);
2447 
2448 			if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2449 				spt_irq_handler(dev, iir);
2450 			else
2451 				cpt_irq_handler(dev, iir);
2452 		} else {
2453 			/*
2454 			 * Like on previous PCH there seems to be something
2455 			 * fishy going on with forwarding PCH interrupts.
2456 			 */
2457 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2458 		}
2459 	}
2460 
2461 }
2462 
2463 static irqreturn_t gen8_irq_handler(void *arg)
2464 {
2465 	struct drm_device *dev = arg;
2466 	struct drm_i915_private *dev_priv = dev->dev_private;
2467 	u32 master_ctl;
2468 	u32 gt_iir[4] = {};
2469 
2470 	if (!intel_irqs_enabled(dev_priv))
2471 		return IRQ_NONE;
2472 
2473 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2474 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2475 	if (!master_ctl)
2476 		return IRQ_NONE;
2477 
2478 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2479 
2480 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2481 	disable_rpm_wakeref_asserts(dev_priv);
2482 
2483 	/* Find, clear, then process each source of interrupt */
2484 	gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2485 	gen8_gt_irq_handler(dev_priv, gt_iir);
2486 	gen8_de_irq_handler(dev_priv, master_ctl);
2487 
2488 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2489 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2490 
2491 	enable_rpm_wakeref_asserts(dev_priv);
2492 
2493 }
2494 
2495 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2496 			       bool reset_completed)
2497 {
2498 	struct intel_engine_cs *engine;
2499 
2500 	/*
2501 	 * Notify all waiters for GPU completion events that reset state has
2502 	 * been changed, and that they need to restart their wait after
2503 	 * checking for potential errors (and bail out to drop locks if there is
2504 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2505 	 */
2506 
2507 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2508 	for_each_engine(engine, dev_priv)
2509 		wake_up_all(&engine->irq_queue);
2510 
2511 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2512 	wake_up_all(&dev_priv->pending_flip_queue);
2513 
2514 	/*
2515 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2516 	 * reset state is cleared.
2517 	 */
2518 	if (reset_completed)
2519 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2520 }
2521 
2522 /**
2523  * i915_reset_and_wakeup - do process context error handling work
2524  * @dev: drm device
2525  *
2526  * Fire an error uevent so userspace can see that a hang or error
2527  * was detected.
2528  */
2529 static void i915_reset_and_wakeup(struct drm_device *dev)
2530 {
2531 	struct drm_i915_private *dev_priv = to_i915(dev);
2532 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2533 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2534 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2535 	int ret;
2536 
2537 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2538 
2539 	/*
2540 	 * Note that there's only one work item which does gpu resets, so we
2541 	 * need not worry about concurrent gpu resets potentially incrementing
2542 	 * error->reset_counter twice. We only need to take care of another
2543 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2544 	 * quick check for that is good enough: schedule_work ensures the
2545 	 * correct ordering between hang detection and this work item, and since
2546 	 * the reset in-progress bit is only ever set by code outside of this
2547 	 * work we don't need to worry about any other races.
2548 	 */
2549 	if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2550 		DRM_DEBUG_DRIVER("resetting chip\n");
2551 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2552 				   reset_event);
2553 
2554 		/*
2555 		 * In most cases it's guaranteed that we get here with an RPM
2556 		 * reference held, for example because there is a pending GPU
2557 		 * request that won't finish until the reset is done. This
2558 		 * isn't the case at least when we get here by doing a
2559 		 * simulated reset via debugs, so get an RPM reference.
2560 		 */
2561 		intel_runtime_pm_get(dev_priv);
2562 
2563 		intel_prepare_reset(dev);
2564 
2565 		/*
2566 		 * All state reset _must_ be completed before we update the
2567 		 * reset counter, for otherwise waiters might miss the reset
2568 		 * pending state and not properly drop locks, resulting in
2569 		 * deadlocks with the reset work.
2570 		 */
2571 		ret = i915_reset(dev);
2572 
2573 		intel_finish_reset(dev);
2574 
2575 		intel_runtime_pm_put(dev_priv);
2576 
2577 		if (ret == 0)
2578 			kobject_uevent_env(&dev->primary->kdev->kobj,
2579 					   KOBJ_CHANGE, reset_done_event);
2580 
2581 		/*
2582 		 * Note: The wake_up also serves as a memory barrier so that
2583 		 * waiters see the update value of the reset counter atomic_t.
2584 		 */
2585 		i915_error_wake_up(dev_priv, true);
2586 	}
2587 }
2588 
2589 static void i915_report_and_clear_eir(struct drm_device *dev)
2590 {
2591 	struct drm_i915_private *dev_priv = dev->dev_private;
2592 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2593 	u32 eir = I915_READ(EIR);
2594 	int pipe, i;
2595 
2596 	if (!eir)
2597 		return;
2598 
2599 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2600 
2601 	i915_get_extra_instdone(dev, instdone);
2602 
2603 	if (IS_G4X(dev)) {
2604 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2605 			u32 ipeir = I915_READ(IPEIR_I965);
2606 
2607 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2608 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2609 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2610 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2611 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2612 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2613 			I915_WRITE(IPEIR_I965, ipeir);
2614 			POSTING_READ(IPEIR_I965);
2615 		}
2616 		if (eir & GM45_ERROR_PAGE_TABLE) {
2617 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2618 			pr_err("page table error\n");
2619 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2620 			I915_WRITE(PGTBL_ER, pgtbl_err);
2621 			POSTING_READ(PGTBL_ER);
2622 		}
2623 	}
2624 
2625 	if (!IS_GEN2(dev)) {
2626 		if (eir & I915_ERROR_PAGE_TABLE) {
2627 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2628 			pr_err("page table error\n");
2629 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2630 			I915_WRITE(PGTBL_ER, pgtbl_err);
2631 			POSTING_READ(PGTBL_ER);
2632 		}
2633 	}
2634 
2635 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2636 		pr_err("memory refresh error:\n");
2637 		for_each_pipe(dev_priv, pipe)
2638 			pr_err("pipe %c stat: 0x%08x\n",
2639 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2640 		/* pipestat has already been acked */
2641 	}
2642 	if (eir & I915_ERROR_INSTRUCTION) {
2643 		pr_err("instruction error\n");
2644 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2645 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2646 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2647 		if (INTEL_INFO(dev)->gen < 4) {
2648 			u32 ipeir = I915_READ(IPEIR);
2649 
2650 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2651 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2652 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2653 			I915_WRITE(IPEIR, ipeir);
2654 			POSTING_READ(IPEIR);
2655 		} else {
2656 			u32 ipeir = I915_READ(IPEIR_I965);
2657 
2658 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2659 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2660 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2661 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2662 			I915_WRITE(IPEIR_I965, ipeir);
2663 			POSTING_READ(IPEIR_I965);
2664 		}
2665 	}
2666 
2667 	I915_WRITE(EIR, eir);
2668 	POSTING_READ(EIR);
2669 	eir = I915_READ(EIR);
2670 	if (eir) {
2671 		/*
2672 		 * some errors might have become stuck,
2673 		 * mask them.
2674 		 */
2675 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2676 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2677 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2678 	}
2679 }
2680 
2681 /**
2682  * i915_handle_error - handle a gpu error
2683  * @dev: drm device
2684  * @engine_mask: mask representing engines that are hung
2685  * Do some basic checking of register state at error time and
2686  * dump it to the syslog.  Also call i915_capture_error_state() to make
2687  * sure we get a record and make it available in debugfs.  Fire a uevent
2688  * so userspace knows something bad happened (should trigger collection
2689  * of a ring dump etc.).
2690  */
2691 void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2692 		       const char *fmt, ...)
2693 {
2694 	struct drm_i915_private *dev_priv = dev->dev_private;
2695 #if 0
2696 	va_list args;
2697 	char error_msg[80];
2698 
2699 	va_start(args, fmt);
2700 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2701 	va_end(args);
2702 
2703 	i915_capture_error_state(dev, engine_mask, error_msg);
2704 #endif
2705 	i915_report_and_clear_eir(dev);
2706 
2707 	if (engine_mask) {
2708 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2709 				&dev_priv->gpu_error.reset_counter);
2710 
2711 		/*
2712 		 * Wakeup waiting processes so that the reset function
2713 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2714 		 * various locks. By bumping the reset counter first, the woken
2715 		 * processes will see a reset in progress and back off,
2716 		 * releasing their locks and then wait for the reset completion.
2717 		 * We must do this for _all_ gpu waiters that might hold locks
2718 		 * that the reset work needs to acquire.
2719 		 *
2720 		 * Note: The wake_up serves as the required memory barrier to
2721 		 * ensure that the waiters see the updated value of the reset
2722 		 * counter atomic_t.
2723 		 */
2724 		i915_error_wake_up(dev_priv, false);
2725 	}
2726 
2727 	i915_reset_and_wakeup(dev);
2728 }
2729 
2730 /* Called from drm generic code, passed 'crtc' which
2731  * we use as a pipe index
2732  */
2733 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2734 {
2735 	struct drm_i915_private *dev_priv = dev->dev_private;
2736 	unsigned long irqflags;
2737 
2738 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2739 	if (INTEL_INFO(dev)->gen >= 4)
2740 		i915_enable_pipestat(dev_priv, pipe,
2741 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2742 	else
2743 		i915_enable_pipestat(dev_priv, pipe,
2744 				     PIPE_VBLANK_INTERRUPT_STATUS);
2745 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2746 
2747 	return 0;
2748 }
2749 
2750 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2751 {
2752 	struct drm_i915_private *dev_priv = dev->dev_private;
2753 	unsigned long irqflags;
2754 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2755 						     DE_PIPE_VBLANK(pipe);
2756 
2757 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2758 	ilk_enable_display_irq(dev_priv, bit);
2759 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2760 
2761 	return 0;
2762 }
2763 
2764 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2765 {
2766 	struct drm_i915_private *dev_priv = dev->dev_private;
2767 	unsigned long irqflags;
2768 
2769 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2770 	i915_enable_pipestat(dev_priv, pipe,
2771 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2772 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2773 
2774 	return 0;
2775 }
2776 
2777 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2778 {
2779 	struct drm_i915_private *dev_priv = dev->dev_private;
2780 	unsigned long irqflags;
2781 
2782 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2783 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2784 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2785 
2786 	return 0;
2787 }
2788 
2789 /* Called from drm generic code, passed 'crtc' which
2790  * we use as a pipe index
2791  */
2792 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2793 {
2794 	struct drm_i915_private *dev_priv = dev->dev_private;
2795 	unsigned long irqflags;
2796 
2797 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2798 	i915_disable_pipestat(dev_priv, pipe,
2799 			      PIPE_VBLANK_INTERRUPT_STATUS |
2800 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2801 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2802 }
2803 
2804 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2805 {
2806 	struct drm_i915_private *dev_priv = dev->dev_private;
2807 	unsigned long irqflags;
2808 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2809 						     DE_PIPE_VBLANK(pipe);
2810 
2811 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2812 	ilk_disable_display_irq(dev_priv, bit);
2813 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2814 }
2815 
2816 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2817 {
2818 	struct drm_i915_private *dev_priv = dev->dev_private;
2819 	unsigned long irqflags;
2820 
2821 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2822 	i915_disable_pipestat(dev_priv, pipe,
2823 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2824 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2825 }
2826 
2827 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2828 {
2829 	struct drm_i915_private *dev_priv = dev->dev_private;
2830 	unsigned long irqflags;
2831 
2832 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2833 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2834 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2835 }
2836 
2837 static bool
2838 ring_idle(struct intel_engine_cs *engine, u32 seqno)
2839 {
2840 	return i915_seqno_passed(seqno,
2841 				 READ_ONCE(engine->last_submitted_seqno));
2842 }
2843 
2844 static bool
2845 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2846 {
2847 	if (INTEL_INFO(dev)->gen >= 8) {
2848 		return (ipehr >> 23) == 0x1c;
2849 	} else {
2850 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2851 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2852 				 MI_SEMAPHORE_REGISTER);
2853 	}
2854 }
2855 
2856 static struct intel_engine_cs *
2857 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2858 				 u64 offset)
2859 {
2860 	struct drm_i915_private *dev_priv = engine->dev->dev_private;
2861 	struct intel_engine_cs *signaller;
2862 
2863 	if (INTEL_INFO(dev_priv)->gen >= 8) {
2864 		for_each_engine(signaller, dev_priv) {
2865 			if (engine == signaller)
2866 				continue;
2867 
2868 			if (offset == signaller->semaphore.signal_ggtt[engine->id])
2869 				return signaller;
2870 		}
2871 	} else {
2872 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2873 
2874 		for_each_engine(signaller, dev_priv) {
2875 			if(engine == signaller)
2876 				continue;
2877 
2878 			if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2879 				return signaller;
2880 		}
2881 	}
2882 
2883 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2884 		  engine->id, ipehr, offset);
2885 
2886 	return NULL;
2887 }
2888 
2889 static struct intel_engine_cs *
2890 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2891 {
2892 	struct drm_i915_private *dev_priv = engine->dev->dev_private;
2893 	u32 cmd, ipehr, head;
2894 	u64 offset = 0;
2895 	int i, backwards;
2896 
2897 	/*
2898 	 * This function does not support execlist mode - any attempt to
2899 	 * proceed further into this function will result in a kernel panic
2900 	 * when dereferencing ring->buffer, which is not set up in execlist
2901 	 * mode.
2902 	 *
2903 	 * The correct way of doing it would be to derive the currently
2904 	 * executing ring buffer from the current context, which is derived
2905 	 * from the currently running request. Unfortunately, to get the
2906 	 * current request we would have to grab the struct_mutex before doing
2907 	 * anything else, which would be ill-advised since some other thread
2908 	 * might have grabbed it already and managed to hang itself, causing
2909 	 * the hang checker to deadlock.
2910 	 *
2911 	 * Therefore, this function does not support execlist mode in its
2912 	 * current form. Just return NULL and move on.
2913 	 */
2914 	if (engine->buffer == NULL)
2915 		return NULL;
2916 
2917 	ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2918 	if (!ipehr_is_semaphore_wait(engine->dev, ipehr))
2919 		return NULL;
2920 
2921 	/*
2922 	 * HEAD is likely pointing to the dword after the actual command,
2923 	 * so scan backwards until we find the MBOX. But limit it to just 3
2924 	 * or 4 dwords depending on the semaphore wait command size.
2925 	 * Note that we don't care about ACTHD here since that might
2926 	 * point at at batch, and semaphores are always emitted into the
2927 	 * ringbuffer itself.
2928 	 */
2929 	head = I915_READ_HEAD(engine) & HEAD_ADDR;
2930 	backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4;
2931 
2932 	for (i = backwards; i; --i) {
2933 		/*
2934 		 * Be paranoid and presume the hw has gone off into the wild -
2935 		 * our ring is smaller than what the hardware (and hence
2936 		 * HEAD_ADDR) allows. Also handles wrap-around.
2937 		 */
2938 		head &= engine->buffer->size - 1;
2939 
2940 		/* This here seems to blow up */
2941 		cmd = ioread32(engine->buffer->virtual_start + head);
2942 		if (cmd == ipehr)
2943 			break;
2944 
2945 		head -= 4;
2946 	}
2947 
2948 	if (!i)
2949 		return NULL;
2950 
2951 	*seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2952 	if (INTEL_INFO(engine->dev)->gen >= 8) {
2953 		offset = ioread32(engine->buffer->virtual_start + head + 12);
2954 		offset <<= 32;
2955 		offset = ioread32(engine->buffer->virtual_start + head + 8);
2956 	}
2957 	return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2958 }
2959 
2960 static int semaphore_passed(struct intel_engine_cs *engine)
2961 {
2962 	struct drm_i915_private *dev_priv = engine->dev->dev_private;
2963 	struct intel_engine_cs *signaller;
2964 	u32 seqno;
2965 
2966 	engine->hangcheck.deadlock++;
2967 
2968 	signaller = semaphore_waits_for(engine, &seqno);
2969 	if (signaller == NULL)
2970 		return -1;
2971 
2972 	/* Prevent pathological recursion due to driver bugs */
2973 	if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2974 		return -1;
2975 
2976 	if (i915_seqno_passed(signaller->get_seqno(signaller), seqno))
2977 		return 1;
2978 
2979 	/* cursory check for an unkickable deadlock */
2980 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2981 	    semaphore_passed(signaller) < 0)
2982 		return -1;
2983 
2984 	return 0;
2985 }
2986 
2987 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2988 {
2989 	struct intel_engine_cs *engine;
2990 
2991 	for_each_engine(engine, dev_priv)
2992 		engine->hangcheck.deadlock = 0;
2993 }
2994 
2995 static bool subunits_stuck(struct intel_engine_cs *engine)
2996 {
2997 	u32 instdone[I915_NUM_INSTDONE_REG];
2998 	bool stuck;
2999 	int i;
3000 
3001 	if (engine->id != RCS)
3002 		return true;
3003 
3004 	i915_get_extra_instdone(engine->dev, instdone);
3005 
3006 	/* There might be unstable subunit states even when
3007 	 * actual head is not moving. Filter out the unstable ones by
3008 	 * accumulating the undone -> done transitions and only
3009 	 * consider those as progress.
3010 	 */
3011 	stuck = true;
3012 	for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
3013 		const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
3014 
3015 		if (tmp != engine->hangcheck.instdone[i])
3016 			stuck = false;
3017 
3018 		engine->hangcheck.instdone[i] |= tmp;
3019 	}
3020 
3021 	return stuck;
3022 }
3023 
3024 static enum intel_ring_hangcheck_action
3025 head_stuck(struct intel_engine_cs *engine, u64 acthd)
3026 {
3027 	if (acthd != engine->hangcheck.acthd) {
3028 
3029 		/* Clear subunit states on head movement */
3030 		memset(engine->hangcheck.instdone, 0,
3031 		       sizeof(engine->hangcheck.instdone));
3032 
3033 		return HANGCHECK_ACTIVE;
3034 	}
3035 
3036 	if (!subunits_stuck(engine))
3037 		return HANGCHECK_ACTIVE;
3038 
3039 	return HANGCHECK_HUNG;
3040 }
3041 
3042 static enum intel_ring_hangcheck_action
3043 ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3044 {
3045 	struct drm_device *dev = engine->dev;
3046 	struct drm_i915_private *dev_priv = dev->dev_private;
3047 	enum intel_ring_hangcheck_action ha;
3048 	u32 tmp;
3049 
3050 	ha = head_stuck(engine, acthd);
3051 	if (ha != HANGCHECK_HUNG)
3052 		return ha;
3053 
3054 	if (IS_GEN2(dev))
3055 		return HANGCHECK_HUNG;
3056 
3057 	/* Is the chip hanging on a WAIT_FOR_EVENT?
3058 	 * If so we can simply poke the RB_WAIT bit
3059 	 * and break the hang. This should work on
3060 	 * all but the second generation chipsets.
3061 	 */
3062 	tmp = I915_READ_CTL(engine);
3063 	if (tmp & RING_WAIT) {
3064 		i915_handle_error(dev, 0,
3065 				  "Kicking stuck wait on %s",
3066 				  engine->name);
3067 		I915_WRITE_CTL(engine, tmp);
3068 		return HANGCHECK_KICK;
3069 	}
3070 
3071 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3072 		switch (semaphore_passed(engine)) {
3073 		default:
3074 			return HANGCHECK_HUNG;
3075 		case 1:
3076 			i915_handle_error(dev, 0,
3077 					  "Kicking stuck semaphore on %s",
3078 					  engine->name);
3079 			I915_WRITE_CTL(engine, tmp);
3080 			return HANGCHECK_KICK;
3081 		case 0:
3082 			return HANGCHECK_WAIT;
3083 		}
3084 	}
3085 
3086 	return HANGCHECK_HUNG;
3087 }
3088 
3089 static unsigned kick_waiters(struct intel_engine_cs *engine)
3090 {
3091 	struct drm_i915_private *i915 = to_i915(engine->dev);
3092 	unsigned user_interrupts = READ_ONCE(engine->user_interrupts);
3093 
3094 	if (engine->hangcheck.user_interrupts == user_interrupts &&
3095 	    !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3096 		if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine)))
3097 			DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3098 				  engine->name);
3099 		else
3100 			DRM_INFO("Fake missed irq on %s\n",
3101 				 engine->name);
3102 		wake_up_all(&engine->irq_queue);
3103 	}
3104 
3105 	return user_interrupts;
3106 }
3107 /*
3108  * This is called when the chip hasn't reported back with completed
3109  * batchbuffers in a long time. We keep track per ring seqno progress and
3110  * if there are no progress, hangcheck score for that ring is increased.
3111  * Further, acthd is inspected to see if the ring is stuck. On stuck case
3112  * we kick the ring. If we see no progress on three subsequent calls
3113  * we assume chip is wedged and try to fix it by resetting the chip.
3114  */
3115 static void i915_hangcheck_elapsed(struct work_struct *work)
3116 {
3117 	struct drm_i915_private *dev_priv =
3118 		container_of(work, typeof(*dev_priv),
3119 			     gpu_error.hangcheck_work.work);
3120 	struct drm_device *dev = dev_priv->dev;
3121 	struct intel_engine_cs *engine;
3122 	enum intel_engine_id id;
3123 	int busy_count = 0, rings_hung = 0;
3124 	bool stuck[I915_NUM_ENGINES] = { 0 };
3125 #define BUSY 1
3126 #define KICK 5
3127 #define HUNG 20
3128 #define ACTIVE_DECAY 15
3129 
3130 	if (!i915.enable_hangcheck)
3131 		return;
3132 
3133 	/*
3134 	 * The hangcheck work is synced during runtime suspend, we don't
3135 	 * require a wakeref. TODO: instead of disabling the asserts make
3136 	 * sure that we hold a reference when this work is running.
3137 	 */
3138 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3139 
3140 	/* As enabling the GPU requires fairly extensive mmio access,
3141 	 * periodically arm the mmio checker to see if we are triggering
3142 	 * any invalid access.
3143 	 */
3144 	intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3145 
3146 	for_each_engine_id(engine, dev_priv, id) {
3147 		u64 acthd;
3148 		u32 seqno;
3149 		unsigned user_interrupts;
3150 		bool busy = true;
3151 
3152 		semaphore_clear_deadlocks(dev_priv);
3153 
3154 		/* We don't strictly need an irq-barrier here, as we are not
3155 		 * serving an interrupt request, be paranoid in case the
3156 		 * barrier has side-effects (such as preventing a broken
3157 		 * cacheline snoop) and so be sure that we can see the seqno
3158 		 * advance. If the seqno should stick, due to a stale
3159 		 * cacheline, we would erroneously declare the GPU hung.
3160 		 */
3161 		if (engine->irq_seqno_barrier)
3162 			engine->irq_seqno_barrier(engine);
3163 
3164 		acthd = intel_ring_get_active_head(engine);
3165 		seqno = engine->get_seqno(engine);
3166 
3167 		/* Reset stuck interrupts between batch advances */
3168 		user_interrupts = 0;
3169 
3170 		if (engine->hangcheck.seqno == seqno) {
3171 			if (ring_idle(engine, seqno)) {
3172 				engine->hangcheck.action = HANGCHECK_IDLE;
3173 				if (waitqueue_active(&engine->irq_queue)) {
3174 					/* Safeguard against driver failure */
3175 					user_interrupts = kick_waiters(engine);
3176 					engine->hangcheck.score += BUSY;
3177 				} else
3178 					busy = false;
3179 			} else {
3180 				/* We always increment the hangcheck score
3181 				 * if the ring is busy and still processing
3182 				 * the same request, so that no single request
3183 				 * can run indefinitely (such as a chain of
3184 				 * batches). The only time we do not increment
3185 				 * the hangcheck score on this ring, if this
3186 				 * ring is in a legitimate wait for another
3187 				 * ring. In that case the waiting ring is a
3188 				 * victim and we want to be sure we catch the
3189 				 * right culprit. Then every time we do kick
3190 				 * the ring, add a small increment to the
3191 				 * score so that we can catch a batch that is
3192 				 * being repeatedly kicked and so responsible
3193 				 * for stalling the machine.
3194 				 */
3195 				engine->hangcheck.action = ring_stuck(engine,
3196 								      acthd);
3197 
3198 				switch (engine->hangcheck.action) {
3199 				case HANGCHECK_IDLE:
3200 				case HANGCHECK_WAIT:
3201 					break;
3202 				case HANGCHECK_ACTIVE:
3203 					engine->hangcheck.score += BUSY;
3204 					break;
3205 				case HANGCHECK_KICK:
3206 					engine->hangcheck.score += KICK;
3207 					break;
3208 				case HANGCHECK_HUNG:
3209 					engine->hangcheck.score += HUNG;
3210 					stuck[id] = true;
3211 					break;
3212 				}
3213 			}
3214 		} else {
3215 			engine->hangcheck.action = HANGCHECK_ACTIVE;
3216 
3217 			/* Gradually reduce the count so that we catch DoS
3218 			 * attempts across multiple batches.
3219 			 */
3220 			if (engine->hangcheck.score > 0)
3221 				engine->hangcheck.score -= ACTIVE_DECAY;
3222 			if (engine->hangcheck.score < 0)
3223 				engine->hangcheck.score = 0;
3224 
3225 			/* Clear head and subunit states on seqno movement */
3226 			acthd = 0;
3227 
3228 			memset(engine->hangcheck.instdone, 0,
3229 			       sizeof(engine->hangcheck.instdone));
3230 		}
3231 
3232 		engine->hangcheck.seqno = seqno;
3233 		engine->hangcheck.acthd = acthd;
3234 		engine->hangcheck.user_interrupts = user_interrupts;
3235 		busy_count += busy;
3236 	}
3237 
3238 	for_each_engine_id(engine, dev_priv, id) {
3239 		if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3240 			DRM_INFO("%s on %s\n",
3241 				 stuck[id] ? "stuck" : "no progress",
3242 				 engine->name);
3243 			rings_hung |= intel_engine_flag(engine);
3244 		}
3245 	}
3246 
3247 	if (rings_hung) {
3248 		i915_handle_error(dev, rings_hung, "Engine(s) hung");
3249 		goto out;
3250 	}
3251 
3252 	if (busy_count)
3253 		/* Reset timer case chip hangs without another request
3254 		 * being added */
3255 		i915_queue_hangcheck(dev);
3256 
3257 out:
3258 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3259 }
3260 
3261 void i915_queue_hangcheck(struct drm_device *dev)
3262 {
3263 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3264 
3265 	if (!i915.enable_hangcheck)
3266 		return;
3267 
3268 	/* Don't continually defer the hangcheck so that it is always run at
3269 	 * least once after work has been scheduled on any ring. Otherwise,
3270 	 * we will ignore a hung ring if a second ring is kept busy.
3271 	 */
3272 
3273 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3274 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3275 }
3276 
3277 static void ibx_irq_reset(struct drm_device *dev)
3278 {
3279 	struct drm_i915_private *dev_priv = dev->dev_private;
3280 
3281 	if (HAS_PCH_NOP(dev))
3282 		return;
3283 
3284 	GEN5_IRQ_RESET(SDE);
3285 
3286 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3287 		I915_WRITE(SERR_INT, 0xffffffff);
3288 }
3289 
3290 /*
3291  * SDEIER is also touched by the interrupt handler to work around missed PCH
3292  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3293  * instead we unconditionally enable all PCH interrupt sources here, but then
3294  * only unmask them as needed with SDEIMR.
3295  *
3296  * This function needs to be called before interrupts are enabled.
3297  */
3298 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3299 {
3300 	struct drm_i915_private *dev_priv = dev->dev_private;
3301 
3302 	if (HAS_PCH_NOP(dev))
3303 		return;
3304 
3305 	WARN_ON(I915_READ(SDEIER) != 0);
3306 	I915_WRITE(SDEIER, 0xffffffff);
3307 	POSTING_READ(SDEIER);
3308 }
3309 
3310 static void gen5_gt_irq_reset(struct drm_device *dev)
3311 {
3312 	struct drm_i915_private *dev_priv = dev->dev_private;
3313 
3314 	GEN5_IRQ_RESET(GT);
3315 	if (INTEL_INFO(dev)->gen >= 6)
3316 		GEN5_IRQ_RESET(GEN6_PM);
3317 }
3318 
3319 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3320 {
3321 	enum i915_pipe pipe;
3322 
3323 	if (IS_CHERRYVIEW(dev_priv))
3324 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3325 	else
3326 		I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3327 
3328 	i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3329 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3330 
3331 	for_each_pipe(dev_priv, pipe) {
3332 		I915_WRITE(PIPESTAT(pipe),
3333 			   PIPE_FIFO_UNDERRUN_STATUS |
3334 			   PIPESTAT_INT_STATUS_MASK);
3335 		dev_priv->pipestat_irq_mask[pipe] = 0;
3336 	}
3337 
3338 	GEN5_IRQ_RESET(VLV_);
3339 	dev_priv->irq_mask = ~0;
3340 }
3341 
3342 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3343 {
3344 	u32 pipestat_mask;
3345 	u32 enable_mask;
3346 	enum i915_pipe pipe;
3347 
3348 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3349 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3350 
3351 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3352 	for_each_pipe(dev_priv, pipe)
3353 		i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3354 
3355 	enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3356 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3357 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3358 	if (IS_CHERRYVIEW(dev_priv))
3359 		enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3360 
3361 	WARN_ON(dev_priv->irq_mask != ~0);
3362 
3363 	dev_priv->irq_mask = ~enable_mask;
3364 
3365 	GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3366 }
3367 
3368 /* drm_dma.h hooks
3369 */
3370 static void ironlake_irq_reset(struct drm_device *dev)
3371 {
3372 	struct drm_i915_private *dev_priv = dev->dev_private;
3373 
3374 	I915_WRITE(HWSTAM, 0xffffffff);
3375 
3376 	GEN5_IRQ_RESET(DE);
3377 	if (IS_GEN7(dev))
3378 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3379 
3380 	gen5_gt_irq_reset(dev);
3381 
3382 	ibx_irq_reset(dev);
3383 }
3384 
3385 static void valleyview_irq_preinstall(struct drm_device *dev)
3386 {
3387 	struct drm_i915_private *dev_priv = dev->dev_private;
3388 
3389 	I915_WRITE(VLV_MASTER_IER, 0);
3390 	POSTING_READ(VLV_MASTER_IER);
3391 
3392 	gen5_gt_irq_reset(dev);
3393 
3394 	spin_lock_irq(&dev_priv->irq_lock);
3395 	if (dev_priv->display_irqs_enabled)
3396 		vlv_display_irq_reset(dev_priv);
3397 	spin_unlock_irq(&dev_priv->irq_lock);
3398 }
3399 
3400 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3401 {
3402 	GEN8_IRQ_RESET_NDX(GT, 0);
3403 	GEN8_IRQ_RESET_NDX(GT, 1);
3404 	GEN8_IRQ_RESET_NDX(GT, 2);
3405 	GEN8_IRQ_RESET_NDX(GT, 3);
3406 }
3407 
3408 static void gen8_irq_reset(struct drm_device *dev)
3409 {
3410 	struct drm_i915_private *dev_priv = dev->dev_private;
3411 	int pipe;
3412 
3413 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3414 	POSTING_READ(GEN8_MASTER_IRQ);
3415 
3416 	gen8_gt_irq_reset(dev_priv);
3417 
3418 	for_each_pipe(dev_priv, pipe)
3419 		if (intel_display_power_is_enabled(dev_priv,
3420 						   POWER_DOMAIN_PIPE(pipe)))
3421 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3422 
3423 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3424 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3425 	GEN5_IRQ_RESET(GEN8_PCU_);
3426 
3427 	if (HAS_PCH_SPLIT(dev))
3428 		ibx_irq_reset(dev);
3429 }
3430 
3431 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3432 				     unsigned int pipe_mask)
3433 {
3434 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3435 	enum i915_pipe pipe;
3436 
3437 	spin_lock_irq(&dev_priv->irq_lock);
3438 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3439 		GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3440 				  dev_priv->de_irq_mask[pipe],
3441 				  ~dev_priv->de_irq_mask[pipe] | extra_ier);
3442 	spin_unlock_irq(&dev_priv->irq_lock);
3443 }
3444 
3445 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3446 				     unsigned int pipe_mask)
3447 {
3448 	enum i915_pipe pipe;
3449 
3450 	spin_lock_irq(&dev_priv->irq_lock);
3451 	for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3452 		GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3453 	spin_unlock_irq(&dev_priv->irq_lock);
3454 
3455 	/* make sure we're done processing display irqs */
3456 	synchronize_irq(dev_priv->dev->irq);
3457 }
3458 
3459 static void cherryview_irq_preinstall(struct drm_device *dev)
3460 {
3461 	struct drm_i915_private *dev_priv = dev->dev_private;
3462 
3463 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3464 	POSTING_READ(GEN8_MASTER_IRQ);
3465 
3466 	gen8_gt_irq_reset(dev_priv);
3467 
3468 	GEN5_IRQ_RESET(GEN8_PCU_);
3469 
3470 	spin_lock_irq(&dev_priv->irq_lock);
3471 	if (dev_priv->display_irqs_enabled)
3472 		vlv_display_irq_reset(dev_priv);
3473 	spin_unlock_irq(&dev_priv->irq_lock);
3474 }
3475 
3476 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3477 				  const u32 hpd[HPD_NUM_PINS])
3478 {
3479 	struct drm_i915_private *dev_priv = to_i915(dev);
3480 	struct intel_encoder *encoder;
3481 	u32 enabled_irqs = 0;
3482 
3483 	for_each_intel_encoder(dev, encoder)
3484 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3485 			enabled_irqs |= hpd[encoder->hpd_pin];
3486 
3487 	return enabled_irqs;
3488 }
3489 
3490 static void ibx_hpd_irq_setup(struct drm_device *dev)
3491 {
3492 	struct drm_i915_private *dev_priv = dev->dev_private;
3493 	u32 hotplug_irqs, hotplug, enabled_irqs;
3494 
3495 	if (HAS_PCH_IBX(dev)) {
3496 		hotplug_irqs = SDE_HOTPLUG_MASK;
3497 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3498 	} else {
3499 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3500 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3501 	}
3502 
3503 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3504 
3505 	/*
3506 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3507 	 * duration to 2ms (which is the minimum in the Display Port spec).
3508 	 * The pulse duration bits are reserved on LPT+.
3509 	 */
3510 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3511 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3512 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3513 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3514 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3515 	/*
3516 	 * When CPU and PCH are on the same package, port A
3517 	 * HPD must be enabled in both north and south.
3518 	 */
3519 	if (HAS_PCH_LPT_LP(dev))
3520 		hotplug |= PORTA_HOTPLUG_ENABLE;
3521 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3522 }
3523 
3524 static void spt_hpd_irq_setup(struct drm_device *dev)
3525 {
3526 	struct drm_i915_private *dev_priv = dev->dev_private;
3527 	u32 hotplug_irqs, hotplug, enabled_irqs;
3528 
3529 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3530 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3531 
3532 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3533 
3534 	/* Enable digital hotplug on the PCH */
3535 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3536 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3537 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3538 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3539 
3540 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3541 	hotplug |= PORTE_HOTPLUG_ENABLE;
3542 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3543 }
3544 
3545 static void ilk_hpd_irq_setup(struct drm_device *dev)
3546 {
3547 	struct drm_i915_private *dev_priv = dev->dev_private;
3548 	u32 hotplug_irqs, hotplug, enabled_irqs;
3549 
3550 	if (INTEL_INFO(dev)->gen >= 8) {
3551 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3552 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3553 
3554 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3555 	} else if (INTEL_INFO(dev)->gen >= 7) {
3556 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3557 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3558 
3559 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3560 	} else {
3561 		hotplug_irqs = DE_DP_A_HOTPLUG;
3562 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3563 
3564 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3565 	}
3566 
3567 	/*
3568 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3569 	 * duration to 2ms (which is the minimum in the Display Port spec)
3570 	 * The pulse duration bits are reserved on HSW+.
3571 	 */
3572 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3573 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3574 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3575 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3576 
3577 	ibx_hpd_irq_setup(dev);
3578 }
3579 
3580 static void bxt_hpd_irq_setup(struct drm_device *dev)
3581 {
3582 	struct drm_i915_private *dev_priv = dev->dev_private;
3583 	u32 hotplug_irqs, hotplug, enabled_irqs;
3584 
3585 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3586 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3587 
3588 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3589 
3590 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3591 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3592 		PORTA_HOTPLUG_ENABLE;
3593 
3594 	DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3595 		      hotplug, enabled_irqs);
3596 	hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3597 
3598 	/*
3599 	 * For BXT invert bit has to be set based on AOB design
3600 	 * for HPD detection logic, update it based on VBT fields.
3601 	 */
3602 
3603 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3604 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3605 		hotplug |= BXT_DDIA_HPD_INVERT;
3606 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3607 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3608 		hotplug |= BXT_DDIB_HPD_INVERT;
3609 	if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3610 	    intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3611 		hotplug |= BXT_DDIC_HPD_INVERT;
3612 
3613 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3614 }
3615 
3616 static void ibx_irq_postinstall(struct drm_device *dev)
3617 {
3618 	struct drm_i915_private *dev_priv = dev->dev_private;
3619 	u32 mask;
3620 
3621 	if (HAS_PCH_NOP(dev))
3622 		return;
3623 
3624 	if (HAS_PCH_IBX(dev))
3625 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3626 	else
3627 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3628 
3629 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3630 	I915_WRITE(SDEIMR, ~mask);
3631 }
3632 
3633 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3634 {
3635 	struct drm_i915_private *dev_priv = dev->dev_private;
3636 	u32 pm_irqs, gt_irqs;
3637 
3638 	pm_irqs = gt_irqs = 0;
3639 
3640 	dev_priv->gt_irq_mask = ~0;
3641 	if (HAS_L3_DPF(dev)) {
3642 		/* L3 parity interrupt is always unmasked. */
3643 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3644 		gt_irqs |= GT_PARITY_ERROR(dev);
3645 	}
3646 
3647 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3648 	if (IS_GEN5(dev)) {
3649 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3650 			   ILK_BSD_USER_INTERRUPT;
3651 	} else {
3652 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3653 	}
3654 
3655 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3656 
3657 	if (INTEL_INFO(dev)->gen >= 6) {
3658 		/*
3659 		 * RPS interrupts will get enabled/disabled on demand when RPS
3660 		 * itself is enabled/disabled.
3661 		 */
3662 		if (HAS_VEBOX(dev))
3663 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3664 
3665 		dev_priv->pm_irq_mask = 0xffffffff;
3666 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3667 	}
3668 }
3669 
3670 static int ironlake_irq_postinstall(struct drm_device *dev)
3671 {
3672 	struct drm_i915_private *dev_priv = dev->dev_private;
3673 	u32 display_mask, extra_mask;
3674 
3675 	if (INTEL_INFO(dev)->gen >= 7) {
3676 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3677 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3678 				DE_PLANEB_FLIP_DONE_IVB |
3679 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3680 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3681 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3682 			      DE_DP_A_HOTPLUG_IVB);
3683 	} else {
3684 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3685 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3686 				DE_AUX_CHANNEL_A |
3687 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3688 				DE_POISON);
3689 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3690 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3691 			      DE_DP_A_HOTPLUG);
3692 	}
3693 
3694 	dev_priv->irq_mask = ~display_mask;
3695 
3696 	I915_WRITE(HWSTAM, 0xeffe);
3697 
3698 	ibx_irq_pre_postinstall(dev);
3699 
3700 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3701 
3702 	gen5_gt_irq_postinstall(dev);
3703 
3704 	ibx_irq_postinstall(dev);
3705 
3706 	if (IS_IRONLAKE_M(dev)) {
3707 		/* Enable PCU event interrupts
3708 		 *
3709 		 * spinlocking not required here for correctness since interrupt
3710 		 * setup is guaranteed to run in single-threaded context. But we
3711 		 * need it to make the assert_spin_locked happy. */
3712 		spin_lock_irq(&dev_priv->irq_lock);
3713 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3714 		spin_unlock_irq(&dev_priv->irq_lock);
3715 	}
3716 
3717 	return 0;
3718 }
3719 
3720 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3721 {
3722 	assert_spin_locked(&dev_priv->irq_lock);
3723 
3724 	if (dev_priv->display_irqs_enabled)
3725 		return;
3726 
3727 	dev_priv->display_irqs_enabled = true;
3728 
3729 	if (intel_irqs_enabled(dev_priv)) {
3730 		vlv_display_irq_reset(dev_priv);
3731 		vlv_display_irq_postinstall(dev_priv);
3732 	}
3733 }
3734 
3735 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3736 {
3737 	assert_spin_locked(&dev_priv->irq_lock);
3738 
3739 	if (!dev_priv->display_irqs_enabled)
3740 		return;
3741 
3742 	dev_priv->display_irqs_enabled = false;
3743 
3744 	if (intel_irqs_enabled(dev_priv))
3745 		vlv_display_irq_reset(dev_priv);
3746 }
3747 
3748 
3749 static int valleyview_irq_postinstall(struct drm_device *dev)
3750 {
3751 	struct drm_i915_private *dev_priv = dev->dev_private;
3752 
3753 	gen5_gt_irq_postinstall(dev);
3754 
3755 	spin_lock_irq(&dev_priv->irq_lock);
3756 	if (dev_priv->display_irqs_enabled)
3757 		vlv_display_irq_postinstall(dev_priv);
3758 	spin_unlock_irq(&dev_priv->irq_lock);
3759 
3760 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3761 	POSTING_READ(VLV_MASTER_IER);
3762 
3763 	return 0;
3764 }
3765 
3766 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3767 {
3768 	/* These are interrupts we'll toggle with the ring mask register */
3769 	uint32_t gt_interrupts[] = {
3770 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3771 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3772 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3773 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3774 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3775 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3776 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3777 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3778 		0,
3779 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3780 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3781 		};
3782 
3783 	if (HAS_L3_DPF(dev_priv))
3784 		gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3785 
3786 	dev_priv->pm_irq_mask = 0xffffffff;
3787 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3788 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3789 	/*
3790 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3791 	 * is enabled/disabled.
3792 	 */
3793 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3794 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3795 }
3796 
3797 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3798 {
3799 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3800 	uint32_t de_pipe_enables;
3801 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3802 	u32 de_port_enables;
3803 	enum i915_pipe pipe;
3804 
3805 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3806 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3807 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3808 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3809 				  GEN9_AUX_CHANNEL_D;
3810 		if (IS_BROXTON(dev_priv))
3811 			de_port_masked |= BXT_DE_PORT_GMBUS;
3812 	} else {
3813 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3814 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3815 	}
3816 
3817 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3818 					   GEN8_PIPE_FIFO_UNDERRUN;
3819 
3820 	de_port_enables = de_port_masked;
3821 	if (IS_BROXTON(dev_priv))
3822 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3823 	else if (IS_BROADWELL(dev_priv))
3824 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3825 
3826 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3827 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3828 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3829 
3830 	for_each_pipe(dev_priv, pipe)
3831 		if (intel_display_power_is_enabled(dev_priv,
3832 				POWER_DOMAIN_PIPE(pipe)))
3833 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3834 					  dev_priv->de_irq_mask[pipe],
3835 					  de_pipe_enables);
3836 
3837 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3838 }
3839 
3840 static int gen8_irq_postinstall(struct drm_device *dev)
3841 {
3842 	struct drm_i915_private *dev_priv = dev->dev_private;
3843 
3844 	if (HAS_PCH_SPLIT(dev))
3845 		ibx_irq_pre_postinstall(dev);
3846 
3847 	gen8_gt_irq_postinstall(dev_priv);
3848 	gen8_de_irq_postinstall(dev_priv);
3849 
3850 	if (HAS_PCH_SPLIT(dev))
3851 		ibx_irq_postinstall(dev);
3852 
3853 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3854 	POSTING_READ(GEN8_MASTER_IRQ);
3855 
3856 	return 0;
3857 }
3858 
3859 static int cherryview_irq_postinstall(struct drm_device *dev)
3860 {
3861 	struct drm_i915_private *dev_priv = dev->dev_private;
3862 
3863 	gen8_gt_irq_postinstall(dev_priv);
3864 
3865 	spin_lock_irq(&dev_priv->irq_lock);
3866 	if (dev_priv->display_irqs_enabled)
3867 		vlv_display_irq_postinstall(dev_priv);
3868 	spin_unlock_irq(&dev_priv->irq_lock);
3869 
3870 	I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3871 	POSTING_READ(GEN8_MASTER_IRQ);
3872 
3873 	return 0;
3874 }
3875 
3876 static void gen8_irq_uninstall(struct drm_device *dev)
3877 {
3878 	struct drm_i915_private *dev_priv = dev->dev_private;
3879 
3880 	if (!dev_priv)
3881 		return;
3882 
3883 	gen8_irq_reset(dev);
3884 }
3885 
3886 static void valleyview_irq_uninstall(struct drm_device *dev)
3887 {
3888 	struct drm_i915_private *dev_priv = dev->dev_private;
3889 
3890 	if (!dev_priv)
3891 		return;
3892 
3893 	I915_WRITE(VLV_MASTER_IER, 0);
3894 	POSTING_READ(VLV_MASTER_IER);
3895 
3896 	gen5_gt_irq_reset(dev);
3897 
3898 	I915_WRITE(HWSTAM, 0xffffffff);
3899 
3900 	spin_lock_irq(&dev_priv->irq_lock);
3901 	if (dev_priv->display_irqs_enabled)
3902 		vlv_display_irq_reset(dev_priv);
3903 	spin_unlock_irq(&dev_priv->irq_lock);
3904 }
3905 
3906 static void cherryview_irq_uninstall(struct drm_device *dev)
3907 {
3908 	struct drm_i915_private *dev_priv = dev->dev_private;
3909 
3910 	if (!dev_priv)
3911 		return;
3912 
3913 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3914 	POSTING_READ(GEN8_MASTER_IRQ);
3915 
3916 	gen8_gt_irq_reset(dev_priv);
3917 
3918 	GEN5_IRQ_RESET(GEN8_PCU_);
3919 
3920 	spin_lock_irq(&dev_priv->irq_lock);
3921 	if (dev_priv->display_irqs_enabled)
3922 		vlv_display_irq_reset(dev_priv);
3923 	spin_unlock_irq(&dev_priv->irq_lock);
3924 }
3925 
3926 static void ironlake_irq_uninstall(struct drm_device *dev)
3927 {
3928 	struct drm_i915_private *dev_priv = dev->dev_private;
3929 
3930 	if (!dev_priv)
3931 		return;
3932 
3933 	ironlake_irq_reset(dev);
3934 }
3935 
3936 static void i8xx_irq_preinstall(struct drm_device * dev)
3937 {
3938 	struct drm_i915_private *dev_priv = dev->dev_private;
3939 	int pipe;
3940 
3941 	for_each_pipe(dev_priv, pipe)
3942 		I915_WRITE(PIPESTAT(pipe), 0);
3943 	I915_WRITE16(IMR, 0xffff);
3944 	I915_WRITE16(IER, 0x0);
3945 	POSTING_READ16(IER);
3946 }
3947 
3948 static int i8xx_irq_postinstall(struct drm_device *dev)
3949 {
3950 	struct drm_i915_private *dev_priv = dev->dev_private;
3951 
3952 	I915_WRITE16(EMR,
3953 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3954 
3955 	/* Unmask the interrupts that we always want on. */
3956 	dev_priv->irq_mask =
3957 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3958 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3959 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3960 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3961 	I915_WRITE16(IMR, dev_priv->irq_mask);
3962 
3963 	I915_WRITE16(IER,
3964 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3965 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3966 		     I915_USER_INTERRUPT);
3967 	POSTING_READ16(IER);
3968 
3969 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3970 	 * just to make the assert_spin_locked check happy. */
3971 	spin_lock_irq(&dev_priv->irq_lock);
3972 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3973 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3974 	spin_unlock_irq(&dev_priv->irq_lock);
3975 
3976 	return 0;
3977 }
3978 
3979 /*
3980  * Returns true when a page flip has completed.
3981  */
3982 static bool i8xx_handle_vblank(struct drm_device *dev,
3983 			       int plane, int pipe, u32 iir)
3984 {
3985 	struct drm_i915_private *dev_priv = dev->dev_private;
3986 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3987 
3988 	if (!intel_pipe_handle_vblank(dev, pipe))
3989 		return false;
3990 
3991 	if ((iir & flip_pending) == 0)
3992 		goto check_page_flip;
3993 
3994 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3995 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3996 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3997 	 * the flip is completed (no longer pending). Since this doesn't raise
3998 	 * an interrupt per se, we watch for the change at vblank.
3999 	 */
4000 	if (I915_READ16(ISR) & flip_pending)
4001 		goto check_page_flip;
4002 
4003 	intel_prepare_page_flip(dev, plane);
4004 	intel_finish_page_flip(dev, pipe);
4005 	return true;
4006 
4007 check_page_flip:
4008 	intel_check_page_flip(dev, pipe);
4009 	return false;
4010 }
4011 
4012 static irqreturn_t i8xx_irq_handler(void *arg)
4013 {
4014 	struct drm_device *dev = arg;
4015 	struct drm_i915_private *dev_priv = dev->dev_private;
4016 	u16 iir, new_iir;
4017 	u32 pipe_stats[2];
4018 	int pipe;
4019 	u16 flip_mask =
4020 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4021 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4022 
4023 	if (!intel_irqs_enabled(dev_priv))
4024 		return IRQ_NONE;
4025 
4026 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4027 	disable_rpm_wakeref_asserts(dev_priv);
4028 
4029 	iir = I915_READ16(IIR);
4030 	if (iir == 0)
4031 		goto out;
4032 
4033 	while (iir & ~flip_mask) {
4034 		/* Can't rely on pipestat interrupt bit in iir as it might
4035 		 * have been cleared after the pipestat interrupt was received.
4036 		 * It doesn't set the bit in iir again, but it still produces
4037 		 * interrupts (for non-MSI).
4038 		 */
4039 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4040 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4041 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4042 
4043 		for_each_pipe(dev_priv, pipe) {
4044 			i915_reg_t reg = PIPESTAT(pipe);
4045 			pipe_stats[pipe] = I915_READ(reg);
4046 
4047 			/*
4048 			 * Clear the PIPE*STAT regs before the IIR
4049 			 */
4050 			if (pipe_stats[pipe] & 0x8000ffff)
4051 				I915_WRITE(reg, pipe_stats[pipe]);
4052 		}
4053 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4054 
4055 		I915_WRITE16(IIR, iir & ~flip_mask);
4056 		new_iir = I915_READ16(IIR); /* Flush posted writes */
4057 
4058 		if (iir & I915_USER_INTERRUPT)
4059 			notify_ring(&dev_priv->engine[RCS]);
4060 
4061 		for_each_pipe(dev_priv, pipe) {
4062 			int plane = pipe;
4063 			if (HAS_FBC(dev))
4064 				plane = !plane;
4065 
4066 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4067 			    i8xx_handle_vblank(dev, plane, pipe, iir))
4068 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4069 
4070 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4071 				i9xx_pipe_crc_irq_handler(dev, pipe);
4072 
4073 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4074 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4075 								    pipe);
4076 		}
4077 
4078 		iir = new_iir;
4079 	}
4080 
4081 out:
4082 	enable_rpm_wakeref_asserts(dev_priv);
4083 
4084 }
4085 
4086 static void i8xx_irq_uninstall(struct drm_device * dev)
4087 {
4088 	struct drm_i915_private *dev_priv = dev->dev_private;
4089 	int pipe;
4090 
4091 	for_each_pipe(dev_priv, pipe) {
4092 		/* Clear enable bits; then clear status bits */
4093 		I915_WRITE(PIPESTAT(pipe), 0);
4094 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4095 	}
4096 	I915_WRITE16(IMR, 0xffff);
4097 	I915_WRITE16(IER, 0x0);
4098 	I915_WRITE16(IIR, I915_READ16(IIR));
4099 }
4100 
4101 static void i915_irq_preinstall(struct drm_device * dev)
4102 {
4103 	struct drm_i915_private *dev_priv = dev->dev_private;
4104 	int pipe;
4105 
4106 	if (I915_HAS_HOTPLUG(dev)) {
4107 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4108 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4109 	}
4110 
4111 	I915_WRITE16(HWSTAM, 0xeffe);
4112 	for_each_pipe(dev_priv, pipe)
4113 		I915_WRITE(PIPESTAT(pipe), 0);
4114 	I915_WRITE(IMR, 0xffffffff);
4115 	I915_WRITE(IER, 0x0);
4116 	POSTING_READ(IER);
4117 }
4118 
4119 static int i915_irq_postinstall(struct drm_device *dev)
4120 {
4121 	struct drm_i915_private *dev_priv = dev->dev_private;
4122 	u32 enable_mask;
4123 
4124 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4125 
4126 	/* Unmask the interrupts that we always want on. */
4127 	dev_priv->irq_mask =
4128 		~(I915_ASLE_INTERRUPT |
4129 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4130 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4131 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4132 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4133 
4134 	enable_mask =
4135 		I915_ASLE_INTERRUPT |
4136 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4137 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4138 		I915_USER_INTERRUPT;
4139 
4140 	if (I915_HAS_HOTPLUG(dev)) {
4141 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4142 		POSTING_READ(PORT_HOTPLUG_EN);
4143 
4144 		/* Enable in IER... */
4145 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4146 		/* and unmask in IMR */
4147 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4148 	}
4149 
4150 	I915_WRITE(IMR, dev_priv->irq_mask);
4151 	I915_WRITE(IER, enable_mask);
4152 	POSTING_READ(IER);
4153 
4154 	i915_enable_asle_pipestat(dev);
4155 
4156 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4157 	 * just to make the assert_spin_locked check happy. */
4158 	spin_lock_irq(&dev_priv->irq_lock);
4159 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4160 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4161 	spin_unlock_irq(&dev_priv->irq_lock);
4162 
4163 	return 0;
4164 }
4165 
4166 /*
4167  * Returns true when a page flip has completed.
4168  */
4169 static bool i915_handle_vblank(struct drm_device *dev,
4170 			       int plane, int pipe, u32 iir)
4171 {
4172 	struct drm_i915_private *dev_priv = dev->dev_private;
4173 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4174 
4175 	if (!intel_pipe_handle_vblank(dev, pipe))
4176 		return false;
4177 
4178 	if ((iir & flip_pending) == 0)
4179 		goto check_page_flip;
4180 
4181 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4182 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4183 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4184 	 * the flip is completed (no longer pending). Since this doesn't raise
4185 	 * an interrupt per se, we watch for the change at vblank.
4186 	 */
4187 	if (I915_READ(ISR) & flip_pending)
4188 		goto check_page_flip;
4189 
4190 	intel_prepare_page_flip(dev, plane);
4191 	intel_finish_page_flip(dev, pipe);
4192 	return true;
4193 
4194 check_page_flip:
4195 	intel_check_page_flip(dev, pipe);
4196 	return false;
4197 }
4198 
4199 static irqreturn_t i915_irq_handler(void *arg)
4200 {
4201 	struct drm_device *dev = arg;
4202 	struct drm_i915_private *dev_priv = dev->dev_private;
4203 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4204 	u32 flip_mask =
4205 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4206 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4207 	int pipe;
4208 
4209 	if (!intel_irqs_enabled(dev_priv))
4210 		return IRQ_NONE;
4211 
4212 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4213 	disable_rpm_wakeref_asserts(dev_priv);
4214 
4215 	iir = I915_READ(IIR);
4216 	do {
4217 		bool irq_received = (iir & ~flip_mask) != 0;
4218 		bool blc_event = false;
4219 
4220 		/* Can't rely on pipestat interrupt bit in iir as it might
4221 		 * have been cleared after the pipestat interrupt was received.
4222 		 * It doesn't set the bit in iir again, but it still produces
4223 		 * interrupts (for non-MSI).
4224 		 */
4225 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4226 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4227 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4228 
4229 		for_each_pipe(dev_priv, pipe) {
4230 			i915_reg_t reg = PIPESTAT(pipe);
4231 			pipe_stats[pipe] = I915_READ(reg);
4232 
4233 			/* Clear the PIPE*STAT regs before the IIR */
4234 			if (pipe_stats[pipe] & 0x8000ffff) {
4235 				I915_WRITE(reg, pipe_stats[pipe]);
4236 				irq_received = true;
4237 			}
4238 		}
4239 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4240 
4241 		if (!irq_received)
4242 			break;
4243 
4244 		/* Consume port.  Then clear IIR or we'll miss events */
4245 		if (I915_HAS_HOTPLUG(dev) &&
4246 		    iir & I915_DISPLAY_PORT_INTERRUPT) {
4247 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4248 			if (hotplug_status)
4249 				i9xx_hpd_irq_handler(dev, hotplug_status);
4250 		}
4251 
4252 		I915_WRITE(IIR, iir & ~flip_mask);
4253 		new_iir = I915_READ(IIR); /* Flush posted writes */
4254 
4255 		if (iir & I915_USER_INTERRUPT)
4256 			notify_ring(&dev_priv->engine[RCS]);
4257 
4258 		for_each_pipe(dev_priv, pipe) {
4259 			int plane = pipe;
4260 			if (HAS_FBC(dev))
4261 				plane = !plane;
4262 
4263 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4264 			    i915_handle_vblank(dev, plane, pipe, iir))
4265 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4266 
4267 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4268 				blc_event = true;
4269 
4270 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4271 				i9xx_pipe_crc_irq_handler(dev, pipe);
4272 
4273 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4274 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4275 								    pipe);
4276 		}
4277 
4278 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4279 			intel_opregion_asle_intr(dev);
4280 
4281 		/* With MSI, interrupts are only generated when iir
4282 		 * transitions from zero to nonzero.  If another bit got
4283 		 * set while we were handling the existing iir bits, then
4284 		 * we would never get another interrupt.
4285 		 *
4286 		 * This is fine on non-MSI as well, as if we hit this path
4287 		 * we avoid exiting the interrupt handler only to generate
4288 		 * another one.
4289 		 *
4290 		 * Note that for MSI this could cause a stray interrupt report
4291 		 * if an interrupt landed in the time between writing IIR and
4292 		 * the posting read.  This should be rare enough to never
4293 		 * trigger the 99% of 100,000 interrupts test for disabling
4294 		 * stray interrupts.
4295 		 */
4296 		iir = new_iir;
4297 	} while (iir & ~flip_mask);
4298 
4299 	enable_rpm_wakeref_asserts(dev_priv);
4300 
4301 }
4302 
4303 static void i915_irq_uninstall(struct drm_device * dev)
4304 {
4305 	struct drm_i915_private *dev_priv = dev->dev_private;
4306 	int pipe;
4307 
4308 	if (I915_HAS_HOTPLUG(dev)) {
4309 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4310 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4311 	}
4312 
4313 	I915_WRITE16(HWSTAM, 0xffff);
4314 	for_each_pipe(dev_priv, pipe) {
4315 		/* Clear enable bits; then clear status bits */
4316 		I915_WRITE(PIPESTAT(pipe), 0);
4317 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4318 	}
4319 	I915_WRITE(IMR, 0xffffffff);
4320 	I915_WRITE(IER, 0x0);
4321 
4322 	I915_WRITE(IIR, I915_READ(IIR));
4323 }
4324 
4325 static void i965_irq_preinstall(struct drm_device * dev)
4326 {
4327 	struct drm_i915_private *dev_priv = dev->dev_private;
4328 	int pipe;
4329 
4330 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4331 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4332 
4333 	I915_WRITE(HWSTAM, 0xeffe);
4334 	for_each_pipe(dev_priv, pipe)
4335 		I915_WRITE(PIPESTAT(pipe), 0);
4336 	I915_WRITE(IMR, 0xffffffff);
4337 	I915_WRITE(IER, 0x0);
4338 	POSTING_READ(IER);
4339 }
4340 
4341 static int i965_irq_postinstall(struct drm_device *dev)
4342 {
4343 	struct drm_i915_private *dev_priv = dev->dev_private;
4344 	u32 enable_mask;
4345 	u32 error_mask;
4346 
4347 	/* Unmask the interrupts that we always want on. */
4348 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4349 			       I915_DISPLAY_PORT_INTERRUPT |
4350 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4351 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4352 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4353 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4354 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4355 
4356 	enable_mask = ~dev_priv->irq_mask;
4357 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4358 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4359 	enable_mask |= I915_USER_INTERRUPT;
4360 
4361 	if (IS_G4X(dev))
4362 		enable_mask |= I915_BSD_USER_INTERRUPT;
4363 
4364 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4365 	 * just to make the assert_spin_locked check happy. */
4366 	spin_lock_irq(&dev_priv->irq_lock);
4367 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4368 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4369 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4370 	spin_unlock_irq(&dev_priv->irq_lock);
4371 
4372 	/*
4373 	 * Enable some error detection, note the instruction error mask
4374 	 * bit is reserved, so we leave it masked.
4375 	 */
4376 	if (IS_G4X(dev)) {
4377 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4378 			       GM45_ERROR_MEM_PRIV |
4379 			       GM45_ERROR_CP_PRIV |
4380 			       I915_ERROR_MEMORY_REFRESH);
4381 	} else {
4382 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4383 			       I915_ERROR_MEMORY_REFRESH);
4384 	}
4385 	I915_WRITE(EMR, error_mask);
4386 
4387 	I915_WRITE(IMR, dev_priv->irq_mask);
4388 	I915_WRITE(IER, enable_mask);
4389 	POSTING_READ(IER);
4390 
4391 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4392 	POSTING_READ(PORT_HOTPLUG_EN);
4393 
4394 	i915_enable_asle_pipestat(dev);
4395 
4396 	return 0;
4397 }
4398 
4399 static void i915_hpd_irq_setup(struct drm_device *dev)
4400 {
4401 	struct drm_i915_private *dev_priv = dev->dev_private;
4402 	u32 hotplug_en;
4403 
4404 	assert_spin_locked(&dev_priv->irq_lock);
4405 
4406 	/* Note HDMI and DP share hotplug bits */
4407 	/* enable bits are the same for all generations */
4408 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4409 	/* Programming the CRT detection parameters tends
4410 	   to generate a spurious hotplug event about three
4411 	   seconds later.  So just do it once.
4412 	*/
4413 	if (IS_G4X(dev))
4414 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4415 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4416 
4417 	/* Ignore TV since it's buggy */
4418 	i915_hotplug_interrupt_update_locked(dev_priv,
4419 					     HOTPLUG_INT_EN_MASK |
4420 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4421 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4422 					     hotplug_en);
4423 }
4424 
4425 static irqreturn_t i965_irq_handler(void *arg)
4426 {
4427 	struct drm_device *dev = arg;
4428 	struct drm_i915_private *dev_priv = dev->dev_private;
4429 	u32 iir, new_iir;
4430 	u32 pipe_stats[I915_MAX_PIPES];
4431 	int pipe;
4432 	u32 flip_mask =
4433 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4434 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4435 
4436 	if (!intel_irqs_enabled(dev_priv))
4437 		return IRQ_NONE;
4438 
4439 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4440 	disable_rpm_wakeref_asserts(dev_priv);
4441 
4442 	iir = I915_READ(IIR);
4443 
4444 	for (;;) {
4445 		bool irq_received = (iir & ~flip_mask) != 0;
4446 		bool blc_event = false;
4447 
4448 		/* Can't rely on pipestat interrupt bit in iir as it might
4449 		 * have been cleared after the pipestat interrupt was received.
4450 		 * It doesn't set the bit in iir again, but it still produces
4451 		 * interrupts (for non-MSI).
4452 		 */
4453 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4454 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4455 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4456 
4457 		for_each_pipe(dev_priv, pipe) {
4458 			i915_reg_t reg = PIPESTAT(pipe);
4459 			pipe_stats[pipe] = I915_READ(reg);
4460 
4461 			/*
4462 			 * Clear the PIPE*STAT regs before the IIR
4463 			 */
4464 			if (pipe_stats[pipe] & 0x8000ffff) {
4465 				I915_WRITE(reg, pipe_stats[pipe]);
4466 				irq_received = true;
4467 			}
4468 		}
4469 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4470 
4471 		if (!irq_received)
4472 			break;
4473 
4474 
4475 		/* Consume port.  Then clear IIR or we'll miss events */
4476 		if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4477 			u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4478 			if (hotplug_status)
4479 				i9xx_hpd_irq_handler(dev, hotplug_status);
4480 		}
4481 
4482 		I915_WRITE(IIR, iir & ~flip_mask);
4483 		new_iir = I915_READ(IIR); /* Flush posted writes */
4484 
4485 		if (iir & I915_USER_INTERRUPT)
4486 			notify_ring(&dev_priv->engine[RCS]);
4487 		if (iir & I915_BSD_USER_INTERRUPT)
4488 			notify_ring(&dev_priv->engine[VCS]);
4489 
4490 		for_each_pipe(dev_priv, pipe) {
4491 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4492 			    i915_handle_vblank(dev, pipe, pipe, iir))
4493 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4494 
4495 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4496 				blc_event = true;
4497 
4498 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4499 				i9xx_pipe_crc_irq_handler(dev, pipe);
4500 
4501 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4502 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4503 		}
4504 
4505 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4506 			intel_opregion_asle_intr(dev);
4507 
4508 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4509 			gmbus_irq_handler(dev);
4510 
4511 		/* With MSI, interrupts are only generated when iir
4512 		 * transitions from zero to nonzero.  If another bit got
4513 		 * set while we were handling the existing iir bits, then
4514 		 * we would never get another interrupt.
4515 		 *
4516 		 * This is fine on non-MSI as well, as if we hit this path
4517 		 * we avoid exiting the interrupt handler only to generate
4518 		 * another one.
4519 		 *
4520 		 * Note that for MSI this could cause a stray interrupt report
4521 		 * if an interrupt landed in the time between writing IIR and
4522 		 * the posting read.  This should be rare enough to never
4523 		 * trigger the 99% of 100,000 interrupts test for disabling
4524 		 * stray interrupts.
4525 		 */
4526 		iir = new_iir;
4527 	}
4528 
4529 	enable_rpm_wakeref_asserts(dev_priv);
4530 
4531 }
4532 
4533 static void i965_irq_uninstall(struct drm_device * dev)
4534 {
4535 	struct drm_i915_private *dev_priv = dev->dev_private;
4536 	int pipe;
4537 
4538 	if (!dev_priv)
4539 		return;
4540 
4541 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4542 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4543 
4544 	I915_WRITE(HWSTAM, 0xffffffff);
4545 	for_each_pipe(dev_priv, pipe)
4546 		I915_WRITE(PIPESTAT(pipe), 0);
4547 	I915_WRITE(IMR, 0xffffffff);
4548 	I915_WRITE(IER, 0x0);
4549 
4550 	for_each_pipe(dev_priv, pipe)
4551 		I915_WRITE(PIPESTAT(pipe),
4552 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4553 	I915_WRITE(IIR, I915_READ(IIR));
4554 }
4555 
4556 /**
4557  * intel_irq_init - initializes irq support
4558  * @dev_priv: i915 device instance
4559  *
4560  * This function initializes all the irq support including work items, timers
4561  * and all the vtables. It does not setup the interrupt itself though.
4562  */
4563 void intel_irq_init(struct drm_i915_private *dev_priv)
4564 {
4565 	struct drm_device *dev = dev_priv->dev;
4566 
4567 	intel_hpd_init_work(dev_priv);
4568 
4569 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4570 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4571 
4572 	/* Let's track the enabled rps events */
4573 	if (IS_VALLEYVIEW(dev_priv))
4574 		/* WaGsvRC0ResidencyMethod:vlv */
4575 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4576 	else
4577 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4578 
4579 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4580 			  i915_hangcheck_elapsed);
4581 
4582 	if (IS_GEN2(dev_priv)) {
4583 		dev->max_vblank_count = 0;
4584 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4585 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4586 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4587 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4588 	} else {
4589 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4590 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4591 	}
4592 
4593 	/*
4594 	 * Opt out of the vblank disable timer on everything except gen2.
4595 	 * Gen2 doesn't have a hardware frame counter and so depends on
4596 	 * vblank interrupts to produce sane vblank seuquence numbers.
4597 	 */
4598 	if (!IS_GEN2(dev_priv))
4599 		dev->vblank_disable_immediate = true;
4600 
4601 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4602 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4603 
4604 	if (IS_CHERRYVIEW(dev_priv)) {
4605 		dev->driver->irq_handler = cherryview_irq_handler;
4606 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4607 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4608 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4609 		dev->driver->enable_vblank = valleyview_enable_vblank;
4610 		dev->driver->disable_vblank = valleyview_disable_vblank;
4611 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4612 	} else if (IS_VALLEYVIEW(dev_priv)) {
4613 		dev->driver->irq_handler = valleyview_irq_handler;
4614 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4615 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4616 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4617 		dev->driver->enable_vblank = valleyview_enable_vblank;
4618 		dev->driver->disable_vblank = valleyview_disable_vblank;
4619 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4620 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4621 		dev->driver->irq_handler = gen8_irq_handler;
4622 		dev->driver->irq_preinstall = gen8_irq_reset;
4623 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4624 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4625 		dev->driver->enable_vblank = gen8_enable_vblank;
4626 		dev->driver->disable_vblank = gen8_disable_vblank;
4627 		if (IS_BROXTON(dev))
4628 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4629 		else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4630 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4631 		else
4632 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4633 	} else if (HAS_PCH_SPLIT(dev)) {
4634 		dev->driver->irq_handler = ironlake_irq_handler;
4635 		dev->driver->irq_preinstall = ironlake_irq_reset;
4636 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4637 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4638 		dev->driver->enable_vblank = ironlake_enable_vblank;
4639 		dev->driver->disable_vblank = ironlake_disable_vblank;
4640 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4641 	} else {
4642 		if (INTEL_INFO(dev_priv)->gen == 2) {
4643 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4644 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4645 			dev->driver->irq_handler = i8xx_irq_handler;
4646 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4647 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4648 			dev->driver->irq_preinstall = i915_irq_preinstall;
4649 			dev->driver->irq_postinstall = i915_irq_postinstall;
4650 			dev->driver->irq_uninstall = i915_irq_uninstall;
4651 			dev->driver->irq_handler = i915_irq_handler;
4652 		} else {
4653 			dev->driver->irq_preinstall = i965_irq_preinstall;
4654 			dev->driver->irq_postinstall = i965_irq_postinstall;
4655 			dev->driver->irq_uninstall = i965_irq_uninstall;
4656 			dev->driver->irq_handler = i965_irq_handler;
4657 		}
4658 		if (I915_HAS_HOTPLUG(dev_priv))
4659 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4660 		dev->driver->enable_vblank = i915_enable_vblank;
4661 		dev->driver->disable_vblank = i915_disable_vblank;
4662 	}
4663 }
4664 
4665 /**
4666  * intel_irq_install - enables the hardware interrupt
4667  * @dev_priv: i915 device instance
4668  *
4669  * This function enables the hardware interrupt handling, but leaves the hotplug
4670  * handling still disabled. It is called after intel_irq_init().
4671  *
4672  * In the driver load and resume code we need working interrupts in a few places
4673  * but don't want to deal with the hassle of concurrent probe and hotplug
4674  * workers. Hence the split into this two-stage approach.
4675  */
4676 int intel_irq_install(struct drm_i915_private *dev_priv)
4677 {
4678 	/*
4679 	 * We enable some interrupt sources in our postinstall hooks, so mark
4680 	 * interrupts as enabled _before_ actually enabling them to avoid
4681 	 * special cases in our ordering checks.
4682 	 */
4683 	dev_priv->pm.irqs_enabled = true;
4684 
4685 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4686 }
4687 
4688 /**
4689  * intel_irq_uninstall - finilizes all irq handling
4690  * @dev_priv: i915 device instance
4691  *
4692  * This stops interrupt and hotplug handling and unregisters and frees all
4693  * resources acquired in the init functions.
4694  */
4695 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4696 {
4697 	drm_irq_uninstall(dev_priv->dev);
4698 	intel_hpd_cancel_work(dev_priv);
4699 	dev_priv->pm.irqs_enabled = false;
4700 }
4701 
4702 /**
4703  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4704  * @dev_priv: i915 device instance
4705  *
4706  * This function is used to disable interrupts at runtime, both in the runtime
4707  * pm and the system suspend/resume code.
4708  */
4709 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4710 {
4711 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4712 	dev_priv->pm.irqs_enabled = false;
4713 	synchronize_irq(dev_priv->dev->irq);
4714 }
4715 
4716 /**
4717  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4718  * @dev_priv: i915 device instance
4719  *
4720  * This function is used to enable interrupts at runtime, both in the runtime
4721  * pm and the system suspend/resume code.
4722  */
4723 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4724 {
4725 	dev_priv->pm.irqs_enabled = true;
4726 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4727 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4728 }
4729