xref: /dragonfly/sys/dev/drm/i915/i915_irq.c (revision 38b5d46c)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: interrupt handling
37  *
38  * These functions provide the basic support for enabling and disabling the
39  * interrupt handling support. There's a lot more functionality in i915_irq.c
40  * and related files, but that will be described in separate chapters.
41  */
42 
43 static const u32 hpd_ilk[HPD_NUM_PINS] = {
44 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
45 };
46 
47 static const u32 hpd_ivb[HPD_NUM_PINS] = {
48 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
49 };
50 
51 static const u32 hpd_bdw[HPD_NUM_PINS] = {
52 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
53 };
54 
55 static const u32 hpd_ibx[HPD_NUM_PINS] = {
56 	[HPD_CRT] = SDE_CRT_HOTPLUG,
57 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
58 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
59 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
60 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
61 };
62 
63 static const u32 hpd_cpt[HPD_NUM_PINS] = {
64 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
65 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
66 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
67 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
68 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
69 };
70 
71 static const u32 hpd_spt[HPD_NUM_PINS] = {
72 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
73 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
76 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
77 };
78 
79 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
80 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
81 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
82 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
83 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
84 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
85 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
86 };
87 
88 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
89 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
90 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
91 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
92 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
93 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
94 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
95 };
96 
97 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
98 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
99 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
100 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
101 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
102 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
103 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104 };
105 
106 /* BXT hpd list */
107 static const u32 hpd_bxt[HPD_NUM_PINS] = {
108 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
109 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
110 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
111 };
112 
113 /* IIR can theoretically queue up two events. Be paranoid. */
114 #define GEN8_IRQ_RESET_NDX(type, which) do { \
115 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
116 	POSTING_READ(GEN8_##type##_IMR(which)); \
117 	I915_WRITE(GEN8_##type##_IER(which), 0); \
118 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
119 	POSTING_READ(GEN8_##type##_IIR(which)); \
120 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
121 	POSTING_READ(GEN8_##type##_IIR(which)); \
122 } while (0)
123 
124 #define GEN5_IRQ_RESET(type) do { \
125 	I915_WRITE(type##IMR, 0xffffffff); \
126 	POSTING_READ(type##IMR); \
127 	I915_WRITE(type##IER, 0); \
128 	I915_WRITE(type##IIR, 0xffffffff); \
129 	POSTING_READ(type##IIR); \
130 	I915_WRITE(type##IIR, 0xffffffff); \
131 	POSTING_READ(type##IIR); \
132 } while (0)
133 
134 /*
135  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
136  */
137 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
138 				    i915_reg_t reg)
139 {
140 	u32 val = I915_READ(reg);
141 
142 	if (val == 0)
143 		return;
144 
145 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
146 	     i915_mmio_reg_offset(reg), val);
147 	I915_WRITE(reg, 0xffffffff);
148 	POSTING_READ(reg);
149 	I915_WRITE(reg, 0xffffffff);
150 	POSTING_READ(reg);
151 }
152 
153 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
154 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
155 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
156 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
157 	POSTING_READ(GEN8_##type##_IMR(which)); \
158 } while (0)
159 
160 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
161 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
162 	I915_WRITE(type##IER, (ier_val)); \
163 	I915_WRITE(type##IMR, (imr_val)); \
164 	POSTING_READ(type##IMR); \
165 } while (0)
166 
167 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
168 
169 /* For display hotplug interrupt */
170 static inline void
171 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
172 				     uint32_t mask,
173 				     uint32_t bits)
174 {
175 	uint32_t val;
176 
177 	assert_spin_locked(&dev_priv->irq_lock);
178 	WARN_ON(bits & ~mask);
179 
180 	val = I915_READ(PORT_HOTPLUG_EN);
181 	val &= ~mask;
182 	val |= bits;
183 	I915_WRITE(PORT_HOTPLUG_EN, val);
184 }
185 
186 /**
187  * i915_hotplug_interrupt_update - update hotplug interrupt enable
188  * @dev_priv: driver private
189  * @mask: bits to update
190  * @bits: bits to enable
191  * NOTE: the HPD enable bits are modified both inside and outside
192  * of an interrupt context. To avoid that read-modify-write cycles
193  * interfer, these bits are protected by a spinlock. Since this
194  * function is usually not called from a context where the lock is
195  * held already, this function acquires the lock itself. A non-locking
196  * version is also available.
197  */
198 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
199 				   uint32_t mask,
200 				   uint32_t bits)
201 {
202 	spin_lock_irq(&dev_priv->irq_lock);
203 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
204 	spin_unlock_irq(&dev_priv->irq_lock);
205 }
206 
207 /**
208  * ilk_update_display_irq - update DEIMR
209  * @dev_priv: driver private
210  * @interrupt_mask: mask of interrupt bits to update
211  * @enabled_irq_mask: mask of interrupt bits to enable
212  */
213 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
214 			    uint32_t interrupt_mask,
215 			    uint32_t enabled_irq_mask)
216 {
217 	uint32_t new_val;
218 
219 	assert_spin_locked(&dev_priv->irq_lock);
220 
221 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
222 
223 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
224 		return;
225 
226 	new_val = dev_priv->irq_mask;
227 	new_val &= ~interrupt_mask;
228 	new_val |= (~enabled_irq_mask & interrupt_mask);
229 
230 	if (new_val != dev_priv->irq_mask) {
231 		dev_priv->irq_mask = new_val;
232 		I915_WRITE(DEIMR, dev_priv->irq_mask);
233 		POSTING_READ(DEIMR);
234 	}
235 }
236 
237 /**
238  * ilk_update_gt_irq - update GTIMR
239  * @dev_priv: driver private
240  * @interrupt_mask: mask of interrupt bits to update
241  * @enabled_irq_mask: mask of interrupt bits to enable
242  */
243 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
244 			      uint32_t interrupt_mask,
245 			      uint32_t enabled_irq_mask)
246 {
247 	assert_spin_locked(&dev_priv->irq_lock);
248 
249 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
250 
251 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
252 		return;
253 
254 	dev_priv->gt_irq_mask &= ~interrupt_mask;
255 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
256 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
257 	POSTING_READ(GTIMR);
258 }
259 
260 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
261 {
262 	ilk_update_gt_irq(dev_priv, mask, mask);
263 }
264 
265 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266 {
267 	ilk_update_gt_irq(dev_priv, mask, 0);
268 }
269 
270 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
271 {
272 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
273 }
274 
275 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
276 {
277 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
278 }
279 
280 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
281 {
282 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
283 }
284 
285 /**
286  * snb_update_pm_irq - update GEN6_PMIMR
287  * @dev_priv: driver private
288  * @interrupt_mask: mask of interrupt bits to update
289  * @enabled_irq_mask: mask of interrupt bits to enable
290  */
291 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
292 			      uint32_t interrupt_mask,
293 			      uint32_t enabled_irq_mask)
294 {
295 	uint32_t new_val;
296 
297 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
298 
299 	assert_spin_locked(&dev_priv->irq_lock);
300 
301 	new_val = dev_priv->pm_irq_mask;
302 	new_val &= ~interrupt_mask;
303 	new_val |= (~enabled_irq_mask & interrupt_mask);
304 
305 	if (new_val != dev_priv->pm_irq_mask) {
306 		dev_priv->pm_irq_mask = new_val;
307 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
308 		POSTING_READ(gen6_pm_imr(dev_priv));
309 	}
310 }
311 
312 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
313 {
314 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
315 		return;
316 
317 	snb_update_pm_irq(dev_priv, mask, mask);
318 }
319 
320 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
321 				  uint32_t mask)
322 {
323 	snb_update_pm_irq(dev_priv, mask, 0);
324 }
325 
326 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
327 {
328 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
329 		return;
330 
331 	__gen6_disable_pm_irq(dev_priv, mask);
332 }
333 
334 void gen6_reset_rps_interrupts(struct drm_device *dev)
335 {
336 	struct drm_i915_private *dev_priv = dev->dev_private;
337 	i915_reg_t reg = gen6_pm_iir(dev_priv);
338 
339 	spin_lock_irq(&dev_priv->irq_lock);
340 	I915_WRITE(reg, dev_priv->pm_rps_events);
341 	I915_WRITE(reg, dev_priv->pm_rps_events);
342 	POSTING_READ(reg);
343 	dev_priv->rps.pm_iir = 0;
344 	spin_unlock_irq(&dev_priv->irq_lock);
345 }
346 
347 void gen6_enable_rps_interrupts(struct drm_device *dev)
348 {
349 	struct drm_i915_private *dev_priv = dev->dev_private;
350 
351 	spin_lock_irq(&dev_priv->irq_lock);
352 
353 	WARN_ON(dev_priv->rps.pm_iir);
354 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
355 	dev_priv->rps.interrupts_enabled = true;
356 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
357 				dev_priv->pm_rps_events);
358 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
359 
360 	spin_unlock_irq(&dev_priv->irq_lock);
361 }
362 
363 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
364 {
365 	/*
366 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
367 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
368 	 *
369 	 * TODO: verify if this can be reproduced on VLV,CHV.
370 	 */
371 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
372 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
373 
374 	if (INTEL_INFO(dev_priv)->gen >= 8)
375 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
376 
377 	return mask;
378 }
379 
380 void gen6_disable_rps_interrupts(struct drm_device *dev)
381 {
382 	struct drm_i915_private *dev_priv = dev->dev_private;
383 
384 	spin_lock_irq(&dev_priv->irq_lock);
385 	dev_priv->rps.interrupts_enabled = false;
386 	spin_unlock_irq(&dev_priv->irq_lock);
387 
388 	cancel_work_sync(&dev_priv->rps.work);
389 
390 	spin_lock_irq(&dev_priv->irq_lock);
391 
392 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
393 
394 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
395 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
396 				~dev_priv->pm_rps_events);
397 
398 	spin_unlock_irq(&dev_priv->irq_lock);
399 
400 #if 0
401 	synchronize_irq(dev->irq);
402 #endif
403 }
404 
405 /**
406  * bdw_update_port_irq - update DE port interrupt
407  * @dev_priv: driver private
408  * @interrupt_mask: mask of interrupt bits to update
409  * @enabled_irq_mask: mask of interrupt bits to enable
410  */
411 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
412 				uint32_t interrupt_mask,
413 				uint32_t enabled_irq_mask)
414 {
415 	uint32_t new_val;
416 	uint32_t old_val;
417 
418 	assert_spin_locked(&dev_priv->irq_lock);
419 
420 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
421 
422 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
423 		return;
424 
425 	old_val = I915_READ(GEN8_DE_PORT_IMR);
426 
427 	new_val = old_val;
428 	new_val &= ~interrupt_mask;
429 	new_val |= (~enabled_irq_mask & interrupt_mask);
430 
431 	if (new_val != old_val) {
432 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
433 		POSTING_READ(GEN8_DE_PORT_IMR);
434 	}
435 }
436 
437 /**
438  * bdw_update_pipe_irq - update DE pipe interrupt
439  * @dev_priv: driver private
440  * @pipe: pipe whose interrupt to update
441  * @interrupt_mask: mask of interrupt bits to update
442  * @enabled_irq_mask: mask of interrupt bits to enable
443  */
444 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
445 			 enum i915_pipe pipe,
446 			 uint32_t interrupt_mask,
447 			 uint32_t enabled_irq_mask)
448 {
449 	uint32_t new_val;
450 
451 	assert_spin_locked(&dev_priv->irq_lock);
452 
453 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
454 
455 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
456 		return;
457 
458 	new_val = dev_priv->de_irq_mask[pipe];
459 	new_val &= ~interrupt_mask;
460 	new_val |= (~enabled_irq_mask & interrupt_mask);
461 
462 	if (new_val != dev_priv->de_irq_mask[pipe]) {
463 		dev_priv->de_irq_mask[pipe] = new_val;
464 		I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
465 		POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
466 	}
467 }
468 
469 /**
470  * ibx_display_interrupt_update - update SDEIMR
471  * @dev_priv: driver private
472  * @interrupt_mask: mask of interrupt bits to update
473  * @enabled_irq_mask: mask of interrupt bits to enable
474  */
475 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
476 				  uint32_t interrupt_mask,
477 				  uint32_t enabled_irq_mask)
478 {
479 	uint32_t sdeimr = I915_READ(SDEIMR);
480 	sdeimr &= ~interrupt_mask;
481 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
482 
483 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
484 
485 	assert_spin_locked(&dev_priv->irq_lock);
486 
487 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
488 		return;
489 
490 	I915_WRITE(SDEIMR, sdeimr);
491 	POSTING_READ(SDEIMR);
492 }
493 
494 static void
495 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
496 		       u32 enable_mask, u32 status_mask)
497 {
498 	i915_reg_t reg = PIPESTAT(pipe);
499 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
500 
501 	assert_spin_locked(&dev_priv->irq_lock);
502 	WARN_ON(!intel_irqs_enabled(dev_priv));
503 
504 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
505 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
506 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
507 		      pipe_name(pipe), enable_mask, status_mask))
508 		return;
509 
510 	if ((pipestat & enable_mask) == enable_mask)
511 		return;
512 
513 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
514 
515 	/* Enable the interrupt, clear any pending status */
516 	pipestat |= enable_mask | status_mask;
517 	I915_WRITE(reg, pipestat);
518 	POSTING_READ(reg);
519 }
520 
521 static void
522 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
523 		        u32 enable_mask, u32 status_mask)
524 {
525 	i915_reg_t reg = PIPESTAT(pipe);
526 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
527 
528 	assert_spin_locked(&dev_priv->irq_lock);
529 	WARN_ON(!intel_irqs_enabled(dev_priv));
530 
531 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
532 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
533 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
534 		      pipe_name(pipe), enable_mask, status_mask))
535 		return;
536 
537 	if ((pipestat & enable_mask) == 0)
538 		return;
539 
540 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
541 
542 	pipestat &= ~enable_mask;
543 	I915_WRITE(reg, pipestat);
544 	POSTING_READ(reg);
545 }
546 
547 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
548 {
549 	u32 enable_mask = status_mask << 16;
550 
551 	/*
552 	 * On pipe A we don't support the PSR interrupt yet,
553 	 * on pipe B and C the same bit MBZ.
554 	 */
555 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
556 		return 0;
557 	/*
558 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
559 	 * A the same bit is for perf counters which we don't use either.
560 	 */
561 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
562 		return 0;
563 
564 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
565 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
566 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
567 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
568 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
569 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
570 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
571 
572 	return enable_mask;
573 }
574 
575 void
576 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
577 		     u32 status_mask)
578 {
579 	u32 enable_mask;
580 
581 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
582 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
583 							   status_mask);
584 	else
585 		enable_mask = status_mask << 16;
586 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
587 }
588 
589 void
590 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
591 		      u32 status_mask)
592 {
593 	u32 enable_mask;
594 
595 	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
596 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
597 							   status_mask);
598 	else
599 		enable_mask = status_mask << 16;
600 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
601 }
602 
603 /**
604  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
605  * @dev: drm device
606  */
607 static void i915_enable_asle_pipestat(struct drm_device *dev)
608 {
609 	struct drm_i915_private *dev_priv = dev->dev_private;
610 
611 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
612 		return;
613 
614 	spin_lock_irq(&dev_priv->irq_lock);
615 
616 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
617 	if (INTEL_INFO(dev)->gen >= 4)
618 		i915_enable_pipestat(dev_priv, PIPE_A,
619 				     PIPE_LEGACY_BLC_EVENT_STATUS);
620 
621 	spin_unlock_irq(&dev_priv->irq_lock);
622 }
623 
624 /*
625  * This timing diagram depicts the video signal in and
626  * around the vertical blanking period.
627  *
628  * Assumptions about the fictitious mode used in this example:
629  *  vblank_start >= 3
630  *  vsync_start = vblank_start + 1
631  *  vsync_end = vblank_start + 2
632  *  vtotal = vblank_start + 3
633  *
634  *           start of vblank:
635  *           latch double buffered registers
636  *           increment frame counter (ctg+)
637  *           generate start of vblank interrupt (gen4+)
638  *           |
639  *           |          frame start:
640  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
641  *           |          may be shifted forward 1-3 extra lines via PIPECONF
642  *           |          |
643  *           |          |  start of vsync:
644  *           |          |  generate vsync interrupt
645  *           |          |  |
646  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
647  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
648  * ----va---> <-----------------vb--------------------> <--------va-------------
649  *       |          |       <----vs----->                     |
650  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
651  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
652  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
653  *       |          |                                         |
654  *       last visible pixel                                   first visible pixel
655  *                  |                                         increment frame counter (gen3/4)
656  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
657  *
658  * x  = horizontal active
659  * _  = horizontal blanking
660  * hs = horizontal sync
661  * va = vertical active
662  * vb = vertical blanking
663  * vs = vertical sync
664  * vbs = vblank_start (number)
665  *
666  * Summary:
667  * - most events happen at the start of horizontal sync
668  * - frame start happens at the start of horizontal blank, 1-4 lines
669  *   (depending on PIPECONF settings) after the start of vblank
670  * - gen3/4 pixel and frame counter are synchronized with the start
671  *   of horizontal active on the first line of vertical active
672  */
673 
674 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
675 {
676 	/* Gen2 doesn't have a hardware frame counter */
677 	return 0;
678 }
679 
680 /* Called from drm generic code, passed a 'crtc', which
681  * we use as a pipe index
682  */
683 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
684 {
685 	struct drm_i915_private *dev_priv = dev->dev_private;
686 	i915_reg_t high_frame, low_frame;
687 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
688 	struct intel_crtc *intel_crtc =
689 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
690 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
691 
692 	htotal = mode->crtc_htotal;
693 	hsync_start = mode->crtc_hsync_start;
694 	vbl_start = mode->crtc_vblank_start;
695 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
696 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
697 
698 	/* Convert to pixel count */
699 	vbl_start *= htotal;
700 
701 	/* Start of vblank event occurs at start of hsync */
702 	vbl_start -= htotal - hsync_start;
703 
704 	high_frame = PIPEFRAME(pipe);
705 	low_frame = PIPEFRAMEPIXEL(pipe);
706 
707 	/*
708 	 * High & low register fields aren't synchronized, so make sure
709 	 * we get a low value that's stable across two reads of the high
710 	 * register.
711 	 */
712 	do {
713 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
714 		low   = I915_READ(low_frame);
715 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
716 	} while (high1 != high2);
717 
718 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
719 	pixel = low & PIPE_PIXEL_MASK;
720 	low >>= PIPE_FRAME_LOW_SHIFT;
721 
722 	/*
723 	 * The frame counter increments at beginning of active.
724 	 * Cook up a vblank counter by also checking the pixel
725 	 * counter against vblank start.
726 	 */
727 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
728 }
729 
730 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
731 {
732 	struct drm_i915_private *dev_priv = dev->dev_private;
733 
734 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
735 }
736 
737 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
738 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
739 {
740 	struct drm_device *dev = crtc->base.dev;
741 	struct drm_i915_private *dev_priv = dev->dev_private;
742 	const struct drm_display_mode *mode = &crtc->base.hwmode;
743 	enum i915_pipe pipe = crtc->pipe;
744 	int position, vtotal;
745 
746 	vtotal = mode->crtc_vtotal;
747 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
748 		vtotal /= 2;
749 
750 	if (IS_GEN2(dev))
751 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
752 	else
753 		position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
754 
755 	/*
756 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
757 	 * read it just before the start of vblank.  So try it again
758 	 * so we don't accidentally end up spanning a vblank frame
759 	 * increment, causing the pipe_update_end() code to squak at us.
760 	 *
761 	 * The nature of this problem means we can't simply check the ISR
762 	 * bit and return the vblank start value; nor can we use the scanline
763 	 * debug register in the transcoder as it appears to have the same
764 	 * problem.  We may need to extend this to include other platforms,
765 	 * but so far testing only shows the problem on HSW.
766 	 */
767 	if (HAS_DDI(dev) && !position) {
768 		int i, temp;
769 
770 		for (i = 0; i < 100; i++) {
771 			udelay(1);
772 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
773 				DSL_LINEMASK_GEN3;
774 			if (temp != position) {
775 				position = temp;
776 				break;
777 			}
778 		}
779 	}
780 
781 	/*
782 	 * See update_scanline_offset() for the details on the
783 	 * scanline_offset adjustment.
784 	 */
785 	return (position + crtc->scanline_offset) % vtotal;
786 }
787 
788 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
789 				    unsigned int flags, int *vpos, int *hpos,
790 				    ktime_t *stime, ktime_t *etime,
791 				    const struct drm_display_mode *mode)
792 {
793 	struct drm_i915_private *dev_priv = dev->dev_private;
794 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
795 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
796 	int position;
797 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
798 	bool in_vbl = true;
799 	int ret = 0;
800 	unsigned long irqflags;
801 
802 	if (WARN_ON(!mode->crtc_clock)) {
803 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
804 				 "pipe %c\n", pipe_name(pipe));
805 		return 0;
806 	}
807 
808 	htotal = mode->crtc_htotal;
809 	hsync_start = mode->crtc_hsync_start;
810 	vtotal = mode->crtc_vtotal;
811 	vbl_start = mode->crtc_vblank_start;
812 	vbl_end = mode->crtc_vblank_end;
813 
814 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
815 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
816 		vbl_end /= 2;
817 		vtotal /= 2;
818 	}
819 
820 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
821 
822 	/*
823 	 * Lock uncore.lock, as we will do multiple timing critical raw
824 	 * register reads, potentially with preemption disabled, so the
825 	 * following code must not block on uncore.lock.
826 	 */
827 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
828 
829 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
830 
831 	/* Get optional system timestamp before query. */
832 	if (stime)
833 		*stime = ktime_get();
834 
835 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
836 		/* No obvious pixelcount register. Only query vertical
837 		 * scanout position from Display scan line register.
838 		 */
839 		position = __intel_get_crtc_scanline(intel_crtc);
840 	} else {
841 		/* Have access to pixelcount since start of frame.
842 		 * We can split this into vertical and horizontal
843 		 * scanout position.
844 		 */
845 		position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
846 
847 		/* convert to pixel counts */
848 		vbl_start *= htotal;
849 		vbl_end *= htotal;
850 		vtotal *= htotal;
851 
852 		/*
853 		 * In interlaced modes, the pixel counter counts all pixels,
854 		 * so one field will have htotal more pixels. In order to avoid
855 		 * the reported position from jumping backwards when the pixel
856 		 * counter is beyond the length of the shorter field, just
857 		 * clamp the position the length of the shorter field. This
858 		 * matches how the scanline counter based position works since
859 		 * the scanline counter doesn't count the two half lines.
860 		 */
861 		if (position >= vtotal)
862 			position = vtotal - 1;
863 
864 		/*
865 		 * Start of vblank interrupt is triggered at start of hsync,
866 		 * just prior to the first active line of vblank. However we
867 		 * consider lines to start at the leading edge of horizontal
868 		 * active. So, should we get here before we've crossed into
869 		 * the horizontal active of the first line in vblank, we would
870 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
871 		 * always add htotal-hsync_start to the current pixel position.
872 		 */
873 		position = (position + htotal - hsync_start) % vtotal;
874 	}
875 
876 	/* Get optional system timestamp after query. */
877 	if (etime)
878 		*etime = ktime_get();
879 
880 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
881 
882 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
883 
884 	in_vbl = position >= vbl_start && position < vbl_end;
885 
886 	/*
887 	 * While in vblank, position will be negative
888 	 * counting up towards 0 at vbl_end. And outside
889 	 * vblank, position will be positive counting
890 	 * up since vbl_end.
891 	 */
892 	if (position >= vbl_start)
893 		position -= vbl_end;
894 	else
895 		position += vtotal - vbl_end;
896 
897 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
898 		*vpos = position;
899 		*hpos = 0;
900 	} else {
901 		*vpos = position / htotal;
902 		*hpos = position - (*vpos * htotal);
903 	}
904 
905 	/* In vblank? */
906 	if (in_vbl)
907 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
908 
909 	return ret;
910 }
911 
912 int intel_get_crtc_scanline(struct intel_crtc *crtc)
913 {
914 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
915 	unsigned long irqflags;
916 	int position;
917 
918 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
919 	position = __intel_get_crtc_scanline(crtc);
920 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
921 
922 	return position;
923 }
924 
925 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
926 			      int *max_error,
927 			      struct timeval *vblank_time,
928 			      unsigned flags)
929 {
930 	struct drm_crtc *crtc;
931 
932 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
933 		DRM_ERROR("Invalid crtc %u\n", pipe);
934 		return -EINVAL;
935 	}
936 
937 	/* Get drm_crtc to timestamp: */
938 	crtc = intel_get_crtc_for_pipe(dev, pipe);
939 	if (crtc == NULL) {
940 		DRM_ERROR("Invalid crtc %u\n", pipe);
941 		return -EINVAL;
942 	}
943 
944 	if (!crtc->hwmode.crtc_clock) {
945 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
946 		return -EBUSY;
947 	}
948 
949 	/* Helper routine in DRM core does all the work: */
950 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
951 						     vblank_time, flags,
952 						     &crtc->hwmode);
953 }
954 
955 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
956 {
957 	struct drm_i915_private *dev_priv = dev->dev_private;
958 	u32 busy_up, busy_down, max_avg, min_avg;
959 	u8 new_delay;
960 
961 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
962 
963 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
964 
965 	new_delay = dev_priv->ips.cur_delay;
966 
967 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
968 	busy_up = I915_READ(RCPREVBSYTUPAVG);
969 	busy_down = I915_READ(RCPREVBSYTDNAVG);
970 	max_avg = I915_READ(RCBMAXAVG);
971 	min_avg = I915_READ(RCBMINAVG);
972 
973 	/* Handle RCS change request from hw */
974 	if (busy_up > max_avg) {
975 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
976 			new_delay = dev_priv->ips.cur_delay - 1;
977 		if (new_delay < dev_priv->ips.max_delay)
978 			new_delay = dev_priv->ips.max_delay;
979 	} else if (busy_down < min_avg) {
980 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
981 			new_delay = dev_priv->ips.cur_delay + 1;
982 		if (new_delay > dev_priv->ips.min_delay)
983 			new_delay = dev_priv->ips.min_delay;
984 	}
985 
986 	if (ironlake_set_drps(dev, new_delay))
987 		dev_priv->ips.cur_delay = new_delay;
988 
989 	lockmgr(&mchdev_lock, LK_RELEASE);
990 
991 	return;
992 }
993 
994 static void notify_ring(struct intel_engine_cs *ring)
995 {
996 	if (!intel_ring_initialized(ring))
997 		return;
998 
999 	trace_i915_gem_request_notify(ring);
1000 
1001 	wake_up_all(&ring->irq_queue);
1002 }
1003 
1004 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1005 			struct intel_rps_ei *ei)
1006 {
1007 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1008 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1009 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1010 }
1011 
1012 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1013 			 const struct intel_rps_ei *old,
1014 			 const struct intel_rps_ei *now,
1015 			 int threshold)
1016 {
1017 	u64 time, c0;
1018 	unsigned int mul = 100;
1019 
1020 	if (old->cz_clock == 0)
1021 		return false;
1022 
1023 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1024 		mul <<= 8;
1025 
1026 	time = now->cz_clock - old->cz_clock;
1027 	time *= threshold * dev_priv->czclk_freq;
1028 
1029 	/* Workload can be split between render + media, e.g. SwapBuffers
1030 	 * being blitted in X after being rendered in mesa. To account for
1031 	 * this we need to combine both engines into our activity counter.
1032 	 */
1033 	c0 = now->render_c0 - old->render_c0;
1034 	c0 += now->media_c0 - old->media_c0;
1035 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1036 
1037 	return c0 >= time;
1038 }
1039 
1040 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1041 {
1042 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1043 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1044 }
1045 
1046 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1047 {
1048 	struct intel_rps_ei now;
1049 	u32 events = 0;
1050 
1051 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1052 		return 0;
1053 
1054 	vlv_c0_read(dev_priv, &now);
1055 	if (now.cz_clock == 0)
1056 		return 0;
1057 
1058 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1059 		if (!vlv_c0_above(dev_priv,
1060 				  &dev_priv->rps.down_ei, &now,
1061 				  dev_priv->rps.down_threshold))
1062 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1063 		dev_priv->rps.down_ei = now;
1064 	}
1065 
1066 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1067 		if (vlv_c0_above(dev_priv,
1068 				 &dev_priv->rps.up_ei, &now,
1069 				 dev_priv->rps.up_threshold))
1070 			events |= GEN6_PM_RP_UP_THRESHOLD;
1071 		dev_priv->rps.up_ei = now;
1072 	}
1073 
1074 	return events;
1075 }
1076 
1077 static bool any_waiters(struct drm_i915_private *dev_priv)
1078 {
1079 	struct intel_engine_cs *ring;
1080 	int i;
1081 
1082 	for_each_ring(ring, dev_priv, i)
1083 		if (ring->irq_refcount)
1084 			return true;
1085 
1086 	return false;
1087 }
1088 
1089 static void gen6_pm_rps_work(struct work_struct *work)
1090 {
1091 	struct drm_i915_private *dev_priv =
1092 		container_of(work, struct drm_i915_private, rps.work);
1093 	bool client_boost;
1094 	int new_delay, adj, min, max;
1095 	u32 pm_iir;
1096 
1097 	spin_lock_irq(&dev_priv->irq_lock);
1098 	/* Speed up work cancelation during disabling rps interrupts. */
1099 	if (!dev_priv->rps.interrupts_enabled) {
1100 		spin_unlock_irq(&dev_priv->irq_lock);
1101 		return;
1102 	}
1103 
1104 	/*
1105 	 * The RPS work is synced during runtime suspend, we don't require a
1106 	 * wakeref. TODO: instead of disabling the asserts make sure that we
1107 	 * always hold an RPM reference while the work is running.
1108 	 */
1109 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1110 
1111 	pm_iir = dev_priv->rps.pm_iir;
1112 	dev_priv->rps.pm_iir = 0;
1113 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1114 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1115 	client_boost = dev_priv->rps.client_boost;
1116 	dev_priv->rps.client_boost = false;
1117 	spin_unlock_irq(&dev_priv->irq_lock);
1118 
1119 	/* Make sure we didn't queue anything we're not going to process. */
1120 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1121 
1122 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1123 		goto out;
1124 
1125 	mutex_lock(&dev_priv->rps.hw_lock);
1126 
1127 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1128 
1129 	adj = dev_priv->rps.last_adj;
1130 	new_delay = dev_priv->rps.cur_freq;
1131 	min = dev_priv->rps.min_freq_softlimit;
1132 	max = dev_priv->rps.max_freq_softlimit;
1133 
1134 	if (client_boost) {
1135 		new_delay = dev_priv->rps.max_freq_softlimit;
1136 		adj = 0;
1137 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1138 		if (adj > 0)
1139 			adj *= 2;
1140 		else /* CHV needs even encode values */
1141 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1142 		/*
1143 		 * For better performance, jump directly
1144 		 * to RPe if we're below it.
1145 		 */
1146 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1147 			new_delay = dev_priv->rps.efficient_freq;
1148 			adj = 0;
1149 		}
1150 	} else if (any_waiters(dev_priv)) {
1151 		adj = 0;
1152 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1153 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1154 			new_delay = dev_priv->rps.efficient_freq;
1155 		else
1156 			new_delay = dev_priv->rps.min_freq_softlimit;
1157 		adj = 0;
1158 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1159 		if (adj < 0)
1160 			adj *= 2;
1161 		else /* CHV needs even encode values */
1162 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1163 	} else { /* unknown event */
1164 		adj = 0;
1165 	}
1166 
1167 	dev_priv->rps.last_adj = adj;
1168 
1169 	/* sysfs frequency interfaces may have snuck in while servicing the
1170 	 * interrupt
1171 	 */
1172 	new_delay += adj;
1173 	new_delay = clamp_t(int, new_delay, min, max);
1174 
1175 	intel_set_rps(dev_priv->dev, new_delay);
1176 
1177 	mutex_unlock(&dev_priv->rps.hw_lock);
1178 out:
1179 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1180 }
1181 
1182 
1183 /**
1184  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1185  * occurred.
1186  * @work: workqueue struct
1187  *
1188  * Doesn't actually do anything except notify userspace. As a consequence of
1189  * this event, userspace should try to remap the bad rows since statistically
1190  * it is likely the same row is more likely to go bad again.
1191  */
1192 static void ivybridge_parity_work(struct work_struct *work)
1193 {
1194 	struct drm_i915_private *dev_priv =
1195 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1196 	u32 error_status, row, bank, subbank;
1197 	char *parity_event[6];
1198 	uint32_t misccpctl;
1199 	uint8_t slice = 0;
1200 
1201 	/* We must turn off DOP level clock gating to access the L3 registers.
1202 	 * In order to prevent a get/put style interface, acquire struct mutex
1203 	 * any time we access those registers.
1204 	 */
1205 	mutex_lock(&dev_priv->dev->struct_mutex);
1206 
1207 	/* If we've screwed up tracking, just let the interrupt fire again */
1208 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1209 		goto out;
1210 
1211 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1212 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1213 	POSTING_READ(GEN7_MISCCPCTL);
1214 
1215 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1216 		i915_reg_t reg;
1217 
1218 		slice--;
1219 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1220 			break;
1221 
1222 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1223 
1224 		reg = GEN7_L3CDERRST1(slice);
1225 
1226 		error_status = I915_READ(reg);
1227 		row = GEN7_PARITY_ERROR_ROW(error_status);
1228 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1229 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1230 
1231 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1232 		POSTING_READ(reg);
1233 
1234 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1235 		parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row);
1236 		parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank);
1237 		parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1238 		parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice);
1239 		parity_event[5] = NULL;
1240 
1241 #if 0
1242 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1243 				   KOBJ_CHANGE, parity_event);
1244 #endif
1245 
1246 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1247 			  slice, row, bank, subbank);
1248 
1249 		kfree(parity_event[4]);
1250 		kfree(parity_event[3]);
1251 		kfree(parity_event[2]);
1252 		kfree(parity_event[1]);
1253 	}
1254 
1255 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1256 
1257 out:
1258 	WARN_ON(dev_priv->l3_parity.which_slice);
1259 	spin_lock_irq(&dev_priv->irq_lock);
1260 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1261 	spin_unlock_irq(&dev_priv->irq_lock);
1262 
1263 	mutex_unlock(&dev_priv->dev->struct_mutex);
1264 }
1265 
1266 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1267 {
1268 	struct drm_i915_private *dev_priv = dev->dev_private;
1269 
1270 	if (!HAS_L3_DPF(dev))
1271 		return;
1272 
1273 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1274 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1275 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1276 
1277 	iir &= GT_PARITY_ERROR(dev);
1278 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1279 		dev_priv->l3_parity.which_slice |= 1 << 1;
1280 
1281 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1282 		dev_priv->l3_parity.which_slice |= 1 << 0;
1283 
1284 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1285 }
1286 
1287 static void ilk_gt_irq_handler(struct drm_device *dev,
1288 			       struct drm_i915_private *dev_priv,
1289 			       u32 gt_iir)
1290 {
1291 	if (gt_iir &
1292 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1293 		notify_ring(&dev_priv->ring[RCS]);
1294 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1295 		notify_ring(&dev_priv->ring[VCS]);
1296 }
1297 
1298 static void snb_gt_irq_handler(struct drm_device *dev,
1299 			       struct drm_i915_private *dev_priv,
1300 			       u32 gt_iir)
1301 {
1302 
1303 	if (gt_iir &
1304 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1305 		notify_ring(&dev_priv->ring[RCS]);
1306 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1307 		notify_ring(&dev_priv->ring[VCS]);
1308 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1309 		notify_ring(&dev_priv->ring[BCS]);
1310 
1311 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1312 		      GT_BSD_CS_ERROR_INTERRUPT |
1313 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1314 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1315 
1316 	if (gt_iir & GT_PARITY_ERROR(dev))
1317 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1318 }
1319 
1320 static __always_inline void
1321 gen8_cs_irq_handler(struct intel_engine_cs *ring, u32 iir, int test_shift)
1322 {
1323 	if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1324 		notify_ring(ring);
1325 	if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1326 		intel_lrc_irq_handler(ring);
1327 }
1328 
1329 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1330 				       u32 master_ctl)
1331 {
1332 
1333 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1334 		u32 iir = I915_READ_FW(GEN8_GT_IIR(0));
1335 		if (iir) {
1336 			I915_WRITE_FW(GEN8_GT_IIR(0), iir);
1337 
1338 			gen8_cs_irq_handler(&dev_priv->ring[RCS],
1339 					iir, GEN8_RCS_IRQ_SHIFT);
1340 
1341 			gen8_cs_irq_handler(&dev_priv->ring[BCS],
1342 					iir, GEN8_BCS_IRQ_SHIFT);
1343 		} else
1344 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1345 	}
1346 
1347 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1348 		u32 iir = I915_READ_FW(GEN8_GT_IIR(1));
1349 		if (iir) {
1350 			I915_WRITE_FW(GEN8_GT_IIR(1), iir);
1351 
1352 			gen8_cs_irq_handler(&dev_priv->ring[VCS],
1353 					iir, GEN8_VCS1_IRQ_SHIFT);
1354 
1355 			gen8_cs_irq_handler(&dev_priv->ring[VCS2],
1356 					iir, GEN8_VCS2_IRQ_SHIFT);
1357 		} else
1358 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1359 	}
1360 
1361 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1362 		u32 iir = I915_READ_FW(GEN8_GT_IIR(3));
1363 		if (iir) {
1364 			I915_WRITE_FW(GEN8_GT_IIR(3), iir);
1365 
1366 			gen8_cs_irq_handler(&dev_priv->ring[VECS],
1367 					iir, GEN8_VECS_IRQ_SHIFT);
1368 		} else
1369 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1370 	}
1371 
1372 	if (master_ctl & GEN8_GT_PM_IRQ) {
1373 		u32 iir = I915_READ_FW(GEN8_GT_IIR(2));
1374 		if (iir & dev_priv->pm_rps_events) {
1375 			I915_WRITE_FW(GEN8_GT_IIR(2),
1376 				      iir & dev_priv->pm_rps_events);
1377 			gen6_rps_irq_handler(dev_priv, iir);
1378 		} else
1379 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1380 	}
1381 
1382 }
1383 
1384 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1385 {
1386 	switch (port) {
1387 	case PORT_A:
1388 		return val & PORTA_HOTPLUG_LONG_DETECT;
1389 	case PORT_B:
1390 		return val & PORTB_HOTPLUG_LONG_DETECT;
1391 	case PORT_C:
1392 		return val & PORTC_HOTPLUG_LONG_DETECT;
1393 	default:
1394 		return false;
1395 	}
1396 }
1397 
1398 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1399 {
1400 	switch (port) {
1401 	case PORT_E:
1402 		return val & PORTE_HOTPLUG_LONG_DETECT;
1403 	default:
1404 		return false;
1405 	}
1406 }
1407 
1408 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1409 {
1410 	switch (port) {
1411 	case PORT_A:
1412 		return val & PORTA_HOTPLUG_LONG_DETECT;
1413 	case PORT_B:
1414 		return val & PORTB_HOTPLUG_LONG_DETECT;
1415 	case PORT_C:
1416 		return val & PORTC_HOTPLUG_LONG_DETECT;
1417 	case PORT_D:
1418 		return val & PORTD_HOTPLUG_LONG_DETECT;
1419 	default:
1420 		return false;
1421 	}
1422 }
1423 
1424 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1425 {
1426 	switch (port) {
1427 	case PORT_A:
1428 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1429 	default:
1430 		return false;
1431 	}
1432 }
1433 
1434 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1435 {
1436 	switch (port) {
1437 	case PORT_B:
1438 		return val & PORTB_HOTPLUG_LONG_DETECT;
1439 	case PORT_C:
1440 		return val & PORTC_HOTPLUG_LONG_DETECT;
1441 	case PORT_D:
1442 		return val & PORTD_HOTPLUG_LONG_DETECT;
1443 	default:
1444 		return false;
1445 	}
1446 }
1447 
1448 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1449 {
1450 	switch (port) {
1451 	case PORT_B:
1452 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1453 	case PORT_C:
1454 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1455 	case PORT_D:
1456 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1457 	default:
1458 		return false;
1459 	}
1460 }
1461 
1462 /*
1463  * Get a bit mask of pins that have triggered, and which ones may be long.
1464  * This can be called multiple times with the same masks to accumulate
1465  * hotplug detection results from several registers.
1466  *
1467  * Note that the caller is expected to zero out the masks initially.
1468  */
1469 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1470 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1471 			     const u32 hpd[HPD_NUM_PINS],
1472 			     bool long_pulse_detect(enum port port, u32 val))
1473 {
1474 	enum port port;
1475 	int i;
1476 
1477 	for_each_hpd_pin(i) {
1478 		if ((hpd[i] & hotplug_trigger) == 0)
1479 			continue;
1480 
1481 		*pin_mask |= BIT(i);
1482 
1483 		if (!intel_hpd_pin_to_port(i, &port))
1484 			continue;
1485 
1486 		if (long_pulse_detect(port, dig_hotplug_reg))
1487 			*long_mask |= BIT(i);
1488 	}
1489 
1490 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1491 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1492 
1493 }
1494 
1495 static void gmbus_irq_handler(struct drm_device *dev)
1496 {
1497 	struct drm_i915_private *dev_priv = dev->dev_private;
1498 
1499 	wake_up_all(&dev_priv->gmbus_wait_queue);
1500 }
1501 
1502 static void dp_aux_irq_handler(struct drm_device *dev)
1503 {
1504 	struct drm_i915_private *dev_priv = dev->dev_private;
1505 
1506 	wake_up_all(&dev_priv->gmbus_wait_queue);
1507 }
1508 
1509 #if defined(CONFIG_DEBUG_FS)
1510 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1511 					 uint32_t crc0, uint32_t crc1,
1512 					 uint32_t crc2, uint32_t crc3,
1513 					 uint32_t crc4)
1514 {
1515 	struct drm_i915_private *dev_priv = dev->dev_private;
1516 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1517 	struct intel_pipe_crc_entry *entry;
1518 	int head, tail;
1519 
1520 	spin_lock(&pipe_crc->lock);
1521 
1522 	if (!pipe_crc->entries) {
1523 		spin_unlock(&pipe_crc->lock);
1524 		DRM_DEBUG_KMS("spurious interrupt\n");
1525 		return;
1526 	}
1527 
1528 	head = pipe_crc->head;
1529 	tail = pipe_crc->tail;
1530 
1531 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1532 		spin_unlock(&pipe_crc->lock);
1533 		DRM_ERROR("CRC buffer overflowing\n");
1534 		return;
1535 	}
1536 
1537 	entry = &pipe_crc->entries[head];
1538 
1539 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1540 	entry->crc[0] = crc0;
1541 	entry->crc[1] = crc1;
1542 	entry->crc[2] = crc2;
1543 	entry->crc[3] = crc3;
1544 	entry->crc[4] = crc4;
1545 
1546 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1547 	pipe_crc->head = head;
1548 
1549 	spin_unlock(&pipe_crc->lock);
1550 
1551 	wake_up_interruptible(&pipe_crc->wq);
1552 }
1553 #else
1554 static inline void
1555 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1556 			     uint32_t crc0, uint32_t crc1,
1557 			     uint32_t crc2, uint32_t crc3,
1558 			     uint32_t crc4) {}
1559 #endif
1560 
1561 
1562 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1563 {
1564 	struct drm_i915_private *dev_priv = dev->dev_private;
1565 
1566 	display_pipe_crc_irq_handler(dev, pipe,
1567 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1568 				     0, 0, 0, 0);
1569 }
1570 
1571 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1572 {
1573 	struct drm_i915_private *dev_priv = dev->dev_private;
1574 
1575 	display_pipe_crc_irq_handler(dev, pipe,
1576 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1577 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1578 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1579 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1580 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1581 }
1582 
1583 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1584 {
1585 	struct drm_i915_private *dev_priv = dev->dev_private;
1586 	uint32_t res1, res2;
1587 
1588 	if (INTEL_INFO(dev)->gen >= 3)
1589 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1590 	else
1591 		res1 = 0;
1592 
1593 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1594 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1595 	else
1596 		res2 = 0;
1597 
1598 	display_pipe_crc_irq_handler(dev, pipe,
1599 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1600 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1601 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1602 				     res1, res2);
1603 }
1604 
1605 /* The RPS events need forcewake, so we add them to a work queue and mask their
1606  * IMR bits until the work is done. Other interrupts can be processed without
1607  * the work queue. */
1608 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1609 {
1610 	if (pm_iir & dev_priv->pm_rps_events) {
1611 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1612 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1613 		if (dev_priv->rps.interrupts_enabled) {
1614 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1615 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1616 		}
1617 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1618 	}
1619 
1620 	if (INTEL_INFO(dev_priv)->gen >= 8)
1621 		return;
1622 
1623 	if (HAS_VEBOX(dev_priv->dev)) {
1624 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1625 			notify_ring(&dev_priv->ring[VECS]);
1626 
1627 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1628 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1629 	}
1630 }
1631 
1632 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1633 {
1634 	if (!drm_handle_vblank(dev, pipe))
1635 		return false;
1636 
1637 	return true;
1638 }
1639 
1640 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1641 {
1642 	struct drm_i915_private *dev_priv = dev->dev_private;
1643 	u32 pipe_stats[I915_MAX_PIPES] = { };
1644 	int pipe;
1645 
1646 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1647 	for_each_pipe(dev_priv, pipe) {
1648 		i915_reg_t reg;
1649 		u32 mask, iir_bit = 0;
1650 
1651 		/*
1652 		 * PIPESTAT bits get signalled even when the interrupt is
1653 		 * disabled with the mask bits, and some of the status bits do
1654 		 * not generate interrupts at all (like the underrun bit). Hence
1655 		 * we need to be careful that we only handle what we want to
1656 		 * handle.
1657 		 */
1658 
1659 		/* fifo underruns are filterered in the underrun handler. */
1660 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1661 
1662 		switch (pipe) {
1663 		case PIPE_A:
1664 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1665 			break;
1666 		case PIPE_B:
1667 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1668 			break;
1669 		case PIPE_C:
1670 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1671 			break;
1672 		}
1673 		if (iir & iir_bit)
1674 			mask |= dev_priv->pipestat_irq_mask[pipe];
1675 
1676 		if (!mask)
1677 			continue;
1678 
1679 		reg = PIPESTAT(pipe);
1680 		mask |= PIPESTAT_INT_ENABLE_MASK;
1681 		pipe_stats[pipe] = I915_READ(reg) & mask;
1682 
1683 		/*
1684 		 * Clear the PIPE*STAT regs before the IIR
1685 		 */
1686 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1687 					PIPESTAT_INT_STATUS_MASK))
1688 			I915_WRITE(reg, pipe_stats[pipe]);
1689 	}
1690 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1691 
1692 	for_each_pipe(dev_priv, pipe) {
1693 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1694 		    intel_pipe_handle_vblank(dev, pipe))
1695 			intel_check_page_flip(dev, pipe);
1696 
1697 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1698 			intel_prepare_page_flip(dev, pipe);
1699 			intel_finish_page_flip(dev, pipe);
1700 		}
1701 
1702 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1703 			i9xx_pipe_crc_irq_handler(dev, pipe);
1704 
1705 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1706 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1707 	}
1708 
1709 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1710 		gmbus_irq_handler(dev);
1711 }
1712 
1713 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1714 {
1715 	struct drm_i915_private *dev_priv = dev->dev_private;
1716 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1717 	u32 pin_mask = 0, long_mask = 0;
1718 
1719 	if (!hotplug_status)
1720 		return;
1721 
1722 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1723 	/*
1724 	 * Make sure hotplug status is cleared before we clear IIR, or else we
1725 	 * may miss hotplug events.
1726 	 */
1727 	POSTING_READ(PORT_HOTPLUG_STAT);
1728 
1729 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1730 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1731 
1732 		if (hotplug_trigger) {
1733 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1734 					   hotplug_trigger, hpd_status_g4x,
1735 					   i9xx_port_hotplug_long_detect);
1736 
1737 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1738 		}
1739 
1740 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1741 			dp_aux_irq_handler(dev);
1742 	} else {
1743 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1744 
1745 		if (hotplug_trigger) {
1746 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1747 					   hotplug_trigger, hpd_status_i915,
1748 					   i9xx_port_hotplug_long_detect);
1749 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1750 		}
1751 	}
1752 }
1753 
1754 static irqreturn_t valleyview_irq_handler(void *arg)
1755 {
1756 	struct drm_device *dev = arg;
1757 	struct drm_i915_private *dev_priv = dev->dev_private;
1758 	u32 iir, gt_iir, pm_iir;
1759 
1760 	if (!intel_irqs_enabled(dev_priv))
1761 		return IRQ_NONE;
1762 
1763 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1764 	disable_rpm_wakeref_asserts(dev_priv);
1765 
1766 	while (true) {
1767 		/* Find, clear, then process each source of interrupt */
1768 
1769 		gt_iir = I915_READ(GTIIR);
1770 		if (gt_iir)
1771 			I915_WRITE(GTIIR, gt_iir);
1772 
1773 		pm_iir = I915_READ(GEN6_PMIIR);
1774 		if (pm_iir)
1775 			I915_WRITE(GEN6_PMIIR, pm_iir);
1776 
1777 		iir = I915_READ(VLV_IIR);
1778 		if (iir) {
1779 			/* Consume port before clearing IIR or we'll miss events */
1780 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1781 				i9xx_hpd_irq_handler(dev);
1782 			I915_WRITE(VLV_IIR, iir);
1783 		}
1784 
1785 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1786 			goto out;
1787 
1788 
1789 		if (gt_iir)
1790 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1791 		if (pm_iir)
1792 			gen6_rps_irq_handler(dev_priv, pm_iir);
1793 		/* Call regardless, as some status bits might not be
1794 		 * signalled in iir */
1795 		valleyview_pipestat_irq_handler(dev, iir);
1796 	}
1797 
1798 out:
1799 	enable_rpm_wakeref_asserts(dev_priv);
1800 
1801 	return;
1802 }
1803 
1804 static irqreturn_t cherryview_irq_handler(void *arg)
1805 {
1806 	struct drm_device *dev = arg;
1807 	struct drm_i915_private *dev_priv = dev->dev_private;
1808 	u32 master_ctl, iir;
1809 
1810 	if (!intel_irqs_enabled(dev_priv))
1811 		return IRQ_NONE;
1812 
1813 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
1814 	disable_rpm_wakeref_asserts(dev_priv);
1815 
1816 	for (;;) {
1817 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1818 		iir = I915_READ(VLV_IIR);
1819 
1820 		if (master_ctl == 0 && iir == 0)
1821 			break;
1822 
1823 
1824 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1825 
1826 		/* Find, clear, then process each source of interrupt */
1827 
1828 		if (iir) {
1829 			/* Consume port before clearing IIR or we'll miss events */
1830 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1831 				i9xx_hpd_irq_handler(dev);
1832 			I915_WRITE(VLV_IIR, iir);
1833 		}
1834 
1835 		gen8_gt_irq_handler(dev_priv, master_ctl);
1836 
1837 		/* Call regardless, as some status bits might not be
1838 		 * signalled in iir */
1839 		valleyview_pipestat_irq_handler(dev, iir);
1840 
1841 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1842 		POSTING_READ(GEN8_MASTER_IRQ);
1843 	}
1844 
1845 	enable_rpm_wakeref_asserts(dev_priv);
1846 }
1847 
1848 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1849 				const u32 hpd[HPD_NUM_PINS])
1850 {
1851 	struct drm_i915_private *dev_priv = to_i915(dev);
1852 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1853 
1854 	/*
1855 	 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1856 	 * unless we touch the hotplug register, even if hotplug_trigger is
1857 	 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1858 	 * errors.
1859 	 */
1860 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1861 	if (!hotplug_trigger) {
1862 		u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1863 			PORTD_HOTPLUG_STATUS_MASK |
1864 			PORTC_HOTPLUG_STATUS_MASK |
1865 			PORTB_HOTPLUG_STATUS_MASK;
1866 		dig_hotplug_reg &= ~mask;
1867 	}
1868 
1869 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1870 	if (!hotplug_trigger)
1871 		return;
1872 
1873 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1874 			   dig_hotplug_reg, hpd,
1875 			   pch_port_hotplug_long_detect);
1876 
1877 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
1878 }
1879 
1880 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1881 {
1882 	struct drm_i915_private *dev_priv = dev->dev_private;
1883 	int pipe;
1884 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1885 
1886 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1887 
1888 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1889 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1890 			       SDE_AUDIO_POWER_SHIFT);
1891 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1892 				 port_name(port));
1893 	}
1894 
1895 	if (pch_iir & SDE_AUX_MASK)
1896 		dp_aux_irq_handler(dev);
1897 
1898 	if (pch_iir & SDE_GMBUS)
1899 		gmbus_irq_handler(dev);
1900 
1901 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1902 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1903 
1904 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1905 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1906 
1907 	if (pch_iir & SDE_POISON)
1908 		DRM_ERROR("PCH poison interrupt\n");
1909 
1910 	if (pch_iir & SDE_FDI_MASK)
1911 		for_each_pipe(dev_priv, pipe)
1912 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1913 					 pipe_name(pipe),
1914 					 I915_READ(FDI_RX_IIR(pipe)));
1915 
1916 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1917 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1918 
1919 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1920 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1921 
1922 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1923 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1924 
1925 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1926 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1927 }
1928 
1929 static void ivb_err_int_handler(struct drm_device *dev)
1930 {
1931 	struct drm_i915_private *dev_priv = dev->dev_private;
1932 	u32 err_int = I915_READ(GEN7_ERR_INT);
1933 	enum i915_pipe pipe;
1934 
1935 	if (err_int & ERR_INT_POISON)
1936 		DRM_ERROR("Poison interrupt\n");
1937 
1938 	for_each_pipe(dev_priv, pipe) {
1939 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1940 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1941 
1942 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1943 			if (IS_IVYBRIDGE(dev))
1944 				ivb_pipe_crc_irq_handler(dev, pipe);
1945 			else
1946 				hsw_pipe_crc_irq_handler(dev, pipe);
1947 		}
1948 	}
1949 
1950 	I915_WRITE(GEN7_ERR_INT, err_int);
1951 }
1952 
1953 static void cpt_serr_int_handler(struct drm_device *dev)
1954 {
1955 	struct drm_i915_private *dev_priv = dev->dev_private;
1956 	u32 serr_int = I915_READ(SERR_INT);
1957 
1958 	if (serr_int & SERR_INT_POISON)
1959 		DRM_ERROR("PCH poison interrupt\n");
1960 
1961 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1962 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1963 
1964 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1965 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1966 
1967 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1968 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1969 
1970 	I915_WRITE(SERR_INT, serr_int);
1971 }
1972 
1973 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1974 {
1975 	struct drm_i915_private *dev_priv = dev->dev_private;
1976 	int pipe;
1977 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1978 
1979 	ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1980 
1981 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1982 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1983 			       SDE_AUDIO_POWER_SHIFT_CPT);
1984 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1985 				 port_name(port));
1986 	}
1987 
1988 	if (pch_iir & SDE_AUX_MASK_CPT)
1989 		dp_aux_irq_handler(dev);
1990 
1991 	if (pch_iir & SDE_GMBUS_CPT)
1992 		gmbus_irq_handler(dev);
1993 
1994 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1995 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1996 
1997 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1998 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1999 
2000 	if (pch_iir & SDE_FDI_MASK_CPT)
2001 		for_each_pipe(dev_priv, pipe)
2002 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
2003 					 pipe_name(pipe),
2004 					 I915_READ(FDI_RX_IIR(pipe)));
2005 
2006 	if (pch_iir & SDE_ERROR_CPT)
2007 		cpt_serr_int_handler(dev);
2008 }
2009 
2010 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2011 {
2012 	struct drm_i915_private *dev_priv = dev->dev_private;
2013 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2014 		~SDE_PORTE_HOTPLUG_SPT;
2015 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2016 	u32 pin_mask = 0, long_mask = 0;
2017 
2018 	if (hotplug_trigger) {
2019 		u32 dig_hotplug_reg;
2020 
2021 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2022 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2023 
2024 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2025 				   dig_hotplug_reg, hpd_spt,
2026 				   spt_port_hotplug_long_detect);
2027 	}
2028 
2029 	if (hotplug2_trigger) {
2030 		u32 dig_hotplug_reg;
2031 
2032 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2033 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2034 
2035 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2036 				   dig_hotplug_reg, hpd_spt,
2037 				   spt_port_hotplug2_long_detect);
2038 	}
2039 
2040 	if (pin_mask)
2041 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
2042 
2043 	if (pch_iir & SDE_GMBUS_CPT)
2044 		gmbus_irq_handler(dev);
2045 }
2046 
2047 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2048 				const u32 hpd[HPD_NUM_PINS])
2049 {
2050 	struct drm_i915_private *dev_priv = to_i915(dev);
2051 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2052 
2053 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2054 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2055 
2056 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2057 			   dig_hotplug_reg, hpd,
2058 			   ilk_port_hotplug_long_detect);
2059 
2060 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2061 }
2062 
2063 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2064 {
2065 	struct drm_i915_private *dev_priv = dev->dev_private;
2066 	enum i915_pipe pipe;
2067 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2068 
2069 	if (hotplug_trigger)
2070 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2071 
2072 	if (de_iir & DE_AUX_CHANNEL_A)
2073 		dp_aux_irq_handler(dev);
2074 
2075 	if (de_iir & DE_GSE)
2076 		intel_opregion_asle_intr(dev);
2077 
2078 	if (de_iir & DE_POISON)
2079 		DRM_ERROR("Poison interrupt\n");
2080 
2081 	for_each_pipe(dev_priv, pipe) {
2082 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2083 		    intel_pipe_handle_vblank(dev, pipe))
2084 			intel_check_page_flip(dev, pipe);
2085 
2086 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2087 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2088 
2089 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2090 			i9xx_pipe_crc_irq_handler(dev, pipe);
2091 
2092 		/* plane/pipes map 1:1 on ilk+ */
2093 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2094 			intel_prepare_page_flip(dev, pipe);
2095 			intel_finish_page_flip_plane(dev, pipe);
2096 		}
2097 	}
2098 
2099 	/* check event from PCH */
2100 	if (de_iir & DE_PCH_EVENT) {
2101 		u32 pch_iir = I915_READ(SDEIIR);
2102 
2103 		if (HAS_PCH_CPT(dev))
2104 			cpt_irq_handler(dev, pch_iir);
2105 		else
2106 			ibx_irq_handler(dev, pch_iir);
2107 
2108 		/* should clear PCH hotplug event before clear CPU irq */
2109 		I915_WRITE(SDEIIR, pch_iir);
2110 	}
2111 
2112 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2113 		ironlake_rps_change_irq_handler(dev);
2114 }
2115 
2116 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2117 {
2118 	struct drm_i915_private *dev_priv = dev->dev_private;
2119 	enum i915_pipe pipe;
2120 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2121 
2122 	if (hotplug_trigger)
2123 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2124 
2125 	if (de_iir & DE_ERR_INT_IVB)
2126 		ivb_err_int_handler(dev);
2127 
2128 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2129 		dp_aux_irq_handler(dev);
2130 
2131 	if (de_iir & DE_GSE_IVB)
2132 		intel_opregion_asle_intr(dev);
2133 
2134 	for_each_pipe(dev_priv, pipe) {
2135 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2136 		    intel_pipe_handle_vblank(dev, pipe))
2137 			intel_check_page_flip(dev, pipe);
2138 
2139 		/* plane/pipes map 1:1 on ilk+ */
2140 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2141 			intel_prepare_page_flip(dev, pipe);
2142 			intel_finish_page_flip_plane(dev, pipe);
2143 		}
2144 	}
2145 
2146 	/* check event from PCH */
2147 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2148 		u32 pch_iir = I915_READ(SDEIIR);
2149 
2150 		cpt_irq_handler(dev, pch_iir);
2151 
2152 		/* clear PCH hotplug event before clear CPU irq */
2153 		I915_WRITE(SDEIIR, pch_iir);
2154 	}
2155 }
2156 
2157 /*
2158  * To handle irqs with the minimum potential races with fresh interrupts, we:
2159  * 1 - Disable Master Interrupt Control.
2160  * 2 - Find the source(s) of the interrupt.
2161  * 3 - Clear the Interrupt Identity bits (IIR).
2162  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2163  * 5 - Re-enable Master Interrupt Control.
2164  */
2165 static irqreturn_t ironlake_irq_handler(void *arg)
2166 {
2167 	struct drm_device *dev = arg;
2168 	struct drm_i915_private *dev_priv = dev->dev_private;
2169 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2170 
2171 	if (!intel_irqs_enabled(dev_priv))
2172 		return IRQ_NONE;
2173 
2174 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2175 	disable_rpm_wakeref_asserts(dev_priv);
2176 
2177 	/* We get interrupts on unclaimed registers, so check for this before we
2178 	 * do any I915_{READ,WRITE}. */
2179 	intel_uncore_check_errors(dev);
2180 
2181 	/* disable master interrupt before clearing iir  */
2182 	de_ier = I915_READ(DEIER);
2183 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2184 	POSTING_READ(DEIER);
2185 
2186 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2187 	 * interrupts will will be stored on its back queue, and then we'll be
2188 	 * able to process them after we restore SDEIER (as soon as we restore
2189 	 * it, we'll get an interrupt if SDEIIR still has something to process
2190 	 * due to its back queue). */
2191 	if (!HAS_PCH_NOP(dev)) {
2192 		sde_ier = I915_READ(SDEIER);
2193 		I915_WRITE(SDEIER, 0);
2194 		POSTING_READ(SDEIER);
2195 	}
2196 
2197 	/* Find, clear, then process each source of interrupt */
2198 
2199 	gt_iir = I915_READ(GTIIR);
2200 	if (gt_iir) {
2201 		I915_WRITE(GTIIR, gt_iir);
2202 		if (INTEL_INFO(dev)->gen >= 6)
2203 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2204 		else
2205 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2206 	}
2207 
2208 	de_iir = I915_READ(DEIIR);
2209 	if (de_iir) {
2210 		I915_WRITE(DEIIR, de_iir);
2211 		if (INTEL_INFO(dev)->gen >= 7)
2212 			ivb_display_irq_handler(dev, de_iir);
2213 		else
2214 			ilk_display_irq_handler(dev, de_iir);
2215 	}
2216 
2217 	if (INTEL_INFO(dev)->gen >= 6) {
2218 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2219 		if (pm_iir) {
2220 			I915_WRITE(GEN6_PMIIR, pm_iir);
2221 			gen6_rps_irq_handler(dev_priv, pm_iir);
2222 		}
2223 	}
2224 
2225 	I915_WRITE(DEIER, de_ier);
2226 	POSTING_READ(DEIER);
2227 	if (!HAS_PCH_NOP(dev)) {
2228 		I915_WRITE(SDEIER, sde_ier);
2229 		POSTING_READ(SDEIER);
2230 	}
2231 
2232 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2233 	enable_rpm_wakeref_asserts(dev_priv);
2234 
2235 }
2236 
2237 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2238 				const u32 hpd[HPD_NUM_PINS])
2239 {
2240 	struct drm_i915_private *dev_priv = to_i915(dev);
2241 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2242 
2243 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2244 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2245 
2246 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2247 			   dig_hotplug_reg, hpd,
2248 			   bxt_port_hotplug_long_detect);
2249 
2250 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2251 }
2252 
2253 static irqreturn_t gen8_irq_handler(void *arg)
2254 {
2255 	struct drm_device *dev = arg;
2256 	struct drm_i915_private *dev_priv = dev->dev_private;
2257 	u32 master_ctl;
2258 	uint32_t tmp = 0;
2259 	enum i915_pipe pipe;
2260 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2261 
2262 	if (!intel_irqs_enabled(dev_priv))
2263 		return IRQ_NONE;
2264 
2265 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
2266 	disable_rpm_wakeref_asserts(dev_priv);
2267 
2268 	if (INTEL_INFO(dev_priv)->gen >= 9)
2269 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2270 			GEN9_AUX_CHANNEL_D;
2271 
2272 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2273 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2274 	if (!master_ctl)
2275 		goto out;
2276 
2277 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2278 
2279 	/* Find, clear, then process each source of interrupt */
2280 
2281 	gen8_gt_irq_handler(dev_priv, master_ctl);
2282 
2283 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2284 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2285 		if (tmp) {
2286 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2287 			if (tmp & GEN8_DE_MISC_GSE)
2288 				intel_opregion_asle_intr(dev);
2289 			else
2290 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2291 		}
2292 		else
2293 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2294 	}
2295 
2296 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2297 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2298 		if (tmp) {
2299 			bool found = false;
2300 			u32 hotplug_trigger = 0;
2301 
2302 			if (IS_BROXTON(dev_priv))
2303 				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2304 			else if (IS_BROADWELL(dev_priv))
2305 				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2306 
2307 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2308 
2309 			if (tmp & aux_mask) {
2310 				dp_aux_irq_handler(dev);
2311 				found = true;
2312 			}
2313 
2314 			if (hotplug_trigger) {
2315 				if (IS_BROXTON(dev))
2316 					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2317 				else
2318 					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2319 				found = true;
2320 			}
2321 
2322 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2323 				gmbus_irq_handler(dev);
2324 				found = true;
2325 			}
2326 
2327 			if (!found)
2328 				DRM_ERROR("Unexpected DE Port interrupt\n");
2329 		}
2330 		else
2331 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2332 	}
2333 
2334 	for_each_pipe(dev_priv, pipe) {
2335 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2336 
2337 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2338 			continue;
2339 
2340 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2341 		if (pipe_iir) {
2342 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2343 
2344 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2345 			    intel_pipe_handle_vblank(dev, pipe))
2346 				intel_check_page_flip(dev, pipe);
2347 
2348 			if (INTEL_INFO(dev_priv)->gen >= 9)
2349 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2350 			else
2351 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2352 
2353 			if (flip_done) {
2354 				intel_prepare_page_flip(dev, pipe);
2355 				intel_finish_page_flip_plane(dev, pipe);
2356 			}
2357 
2358 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2359 				hsw_pipe_crc_irq_handler(dev, pipe);
2360 
2361 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2362 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2363 								    pipe);
2364 
2365 
2366 			if (INTEL_INFO(dev_priv)->gen >= 9)
2367 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2368 			else
2369 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2370 
2371 			if (fault_errors)
2372 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2373 					  pipe_name(pipe),
2374 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2375 		} else
2376 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2377 	}
2378 
2379 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2380 	    master_ctl & GEN8_DE_PCH_IRQ) {
2381 		/*
2382 		 * FIXME(BDW): Assume for now that the new interrupt handling
2383 		 * scheme also closed the SDE interrupt handling race we've seen
2384 		 * on older pch-split platforms. But this needs testing.
2385 		 */
2386 		u32 pch_iir = I915_READ(SDEIIR);
2387 		if (pch_iir) {
2388 			I915_WRITE(SDEIIR, pch_iir);
2389 
2390 			if (HAS_PCH_SPT(dev_priv))
2391 				spt_irq_handler(dev, pch_iir);
2392 			else
2393 				cpt_irq_handler(dev, pch_iir);
2394 		} else {
2395 			/*
2396 			 * Like on previous PCH there seems to be something
2397 			 * fishy going on with forwarding PCH interrupts.
2398 			 */
2399 			DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2400 		}
2401 	}
2402 
2403 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2404 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2405 
2406 out:
2407 	enable_rpm_wakeref_asserts(dev_priv);
2408 
2409 }
2410 
2411 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2412 			       bool reset_completed)
2413 {
2414 	struct intel_engine_cs *ring;
2415 	int i;
2416 
2417 	/*
2418 	 * Notify all waiters for GPU completion events that reset state has
2419 	 * been changed, and that they need to restart their wait after
2420 	 * checking for potential errors (and bail out to drop locks if there is
2421 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2422 	 */
2423 
2424 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2425 	for_each_ring(ring, dev_priv, i)
2426 		wake_up_all(&ring->irq_queue);
2427 
2428 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2429 	wake_up_all(&dev_priv->pending_flip_queue);
2430 
2431 	/*
2432 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2433 	 * reset state is cleared.
2434 	 */
2435 	if (reset_completed)
2436 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2437 }
2438 
2439 /**
2440  * i915_reset_and_wakeup - do process context error handling work
2441  * @dev: drm device
2442  *
2443  * Fire an error uevent so userspace can see that a hang or error
2444  * was detected.
2445  */
2446 static void i915_reset_and_wakeup(struct drm_device *dev)
2447 {
2448 	struct drm_i915_private *dev_priv = to_i915(dev);
2449 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2450 #if 0
2451 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2452 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2453 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2454 #endif
2455 	int ret;
2456 
2457 #if 0
2458 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2459 #endif
2460 
2461 	/*
2462 	 * Note that there's only one work item which does gpu resets, so we
2463 	 * need not worry about concurrent gpu resets potentially incrementing
2464 	 * error->reset_counter twice. We only need to take care of another
2465 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2466 	 * quick check for that is good enough: schedule_work ensures the
2467 	 * correct ordering between hang detection and this work item, and since
2468 	 * the reset in-progress bit is only ever set by code outside of this
2469 	 * work we don't need to worry about any other races.
2470 	 */
2471 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2472 		DRM_DEBUG_DRIVER("resetting chip\n");
2473 #if 0
2474 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2475 				   reset_event);
2476 #endif
2477 
2478 		/*
2479 		 * In most cases it's guaranteed that we get here with an RPM
2480 		 * reference held, for example because there is a pending GPU
2481 		 * request that won't finish until the reset is done. This
2482 		 * isn't the case at least when we get here by doing a
2483 		 * simulated reset via debugs, so get an RPM reference.
2484 		 */
2485 		intel_runtime_pm_get(dev_priv);
2486 
2487 		intel_prepare_reset(dev);
2488 
2489 		/*
2490 		 * All state reset _must_ be completed before we update the
2491 		 * reset counter, for otherwise waiters might miss the reset
2492 		 * pending state and not properly drop locks, resulting in
2493 		 * deadlocks with the reset work.
2494 		 */
2495 		ret = i915_reset(dev);
2496 
2497 		intel_finish_reset(dev);
2498 
2499 		intel_runtime_pm_put(dev_priv);
2500 
2501 		if (ret == 0) {
2502 			/*
2503 			 * After all the gem state is reset, increment the reset
2504 			 * counter and wake up everyone waiting for the reset to
2505 			 * complete.
2506 			 *
2507 			 * Since unlock operations are a one-sided barrier only,
2508 			 * we need to insert a barrier here to order any seqno
2509 			 * updates before
2510 			 * the counter increment.
2511 			 */
2512 			smp_mb__before_atomic();
2513 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2514 
2515 #if 0
2516 			kobject_uevent_env(&dev->primary->kdev->kobj,
2517 					   KOBJ_CHANGE, reset_done_event);
2518 #endif
2519 		} else {
2520 			atomic_or(I915_WEDGED, &error->reset_counter);
2521 		}
2522 
2523 		/*
2524 		 * Note: The wake_up also serves as a memory barrier so that
2525 		 * waiters see the update value of the reset counter atomic_t.
2526 		 */
2527 		i915_error_wake_up(dev_priv, true);
2528 	}
2529 }
2530 
2531 static void i915_report_and_clear_eir(struct drm_device *dev)
2532 {
2533 	struct drm_i915_private *dev_priv = dev->dev_private;
2534 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2535 	u32 eir = I915_READ(EIR);
2536 	int pipe, i;
2537 
2538 	if (!eir)
2539 		return;
2540 
2541 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2542 
2543 #if 0
2544 	i915_get_extra_instdone(dev, instdone);
2545 #endif
2546 
2547 	if (IS_G4X(dev)) {
2548 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2549 			u32 ipeir = I915_READ(IPEIR_I965);
2550 
2551 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2552 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2553 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2554 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2555 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2556 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2557 			I915_WRITE(IPEIR_I965, ipeir);
2558 			POSTING_READ(IPEIR_I965);
2559 		}
2560 		if (eir & GM45_ERROR_PAGE_TABLE) {
2561 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2562 			pr_err("page table error\n");
2563 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2564 			I915_WRITE(PGTBL_ER, pgtbl_err);
2565 			POSTING_READ(PGTBL_ER);
2566 		}
2567 	}
2568 
2569 	if (!IS_GEN2(dev)) {
2570 		if (eir & I915_ERROR_PAGE_TABLE) {
2571 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2572 			pr_err("page table error\n");
2573 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2574 			I915_WRITE(PGTBL_ER, pgtbl_err);
2575 			POSTING_READ(PGTBL_ER);
2576 		}
2577 	}
2578 
2579 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2580 		pr_err("memory refresh error:\n");
2581 		for_each_pipe(dev_priv, pipe)
2582 			pr_err("pipe %c stat: 0x%08x\n",
2583 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2584 		/* pipestat has already been acked */
2585 	}
2586 	if (eir & I915_ERROR_INSTRUCTION) {
2587 		pr_err("instruction error\n");
2588 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2589 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2590 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2591 		if (INTEL_INFO(dev)->gen < 4) {
2592 			u32 ipeir = I915_READ(IPEIR);
2593 
2594 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2595 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2596 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2597 			I915_WRITE(IPEIR, ipeir);
2598 			POSTING_READ(IPEIR);
2599 		} else {
2600 			u32 ipeir = I915_READ(IPEIR_I965);
2601 
2602 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2603 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2604 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2605 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2606 			I915_WRITE(IPEIR_I965, ipeir);
2607 			POSTING_READ(IPEIR_I965);
2608 		}
2609 	}
2610 
2611 	I915_WRITE(EIR, eir);
2612 	POSTING_READ(EIR);
2613 	eir = I915_READ(EIR);
2614 	if (eir) {
2615 		/*
2616 		 * some errors might have become stuck,
2617 		 * mask them.
2618 		 */
2619 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2620 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2621 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2622 	}
2623 }
2624 
2625 /**
2626  * i915_handle_error - handle a gpu error
2627  * @dev: drm device
2628  *
2629  * Do some basic checking of register state at error time and
2630  * dump it to the syslog.  Also call i915_capture_error_state() to make
2631  * sure we get a record and make it available in debugfs.  Fire a uevent
2632  * so userspace knows something bad happened (should trigger collection
2633  * of a ring dump etc.).
2634  */
2635 void i915_handle_error(struct drm_device *dev, bool wedged,
2636 		       const char *fmt, ...)
2637 {
2638 	struct drm_i915_private *dev_priv = dev->dev_private;
2639 #if 0
2640 	va_list args;
2641 	char error_msg[80];
2642 
2643 	va_start(args, fmt);
2644 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2645 	va_end(args);
2646 
2647 	i915_capture_error_state(dev, wedged, error_msg);
2648 #endif
2649 	i915_report_and_clear_eir(dev);
2650 
2651 	if (wedged) {
2652 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2653 				&dev_priv->gpu_error.reset_counter);
2654 
2655 		/*
2656 		 * Wakeup waiting processes so that the reset function
2657 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2658 		 * various locks. By bumping the reset counter first, the woken
2659 		 * processes will see a reset in progress and back off,
2660 		 * releasing their locks and then wait for the reset completion.
2661 		 * We must do this for _all_ gpu waiters that might hold locks
2662 		 * that the reset work needs to acquire.
2663 		 *
2664 		 * Note: The wake_up serves as the required memory barrier to
2665 		 * ensure that the waiters see the updated value of the reset
2666 		 * counter atomic_t.
2667 		 */
2668 		i915_error_wake_up(dev_priv, false);
2669 	}
2670 
2671 	i915_reset_and_wakeup(dev);
2672 }
2673 
2674 /* Called from drm generic code, passed 'crtc' which
2675  * we use as a pipe index
2676  */
2677 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2678 {
2679 	struct drm_i915_private *dev_priv = dev->dev_private;
2680 	unsigned long irqflags;
2681 
2682 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2683 	if (INTEL_INFO(dev)->gen >= 4)
2684 		i915_enable_pipestat(dev_priv, pipe,
2685 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2686 	else
2687 		i915_enable_pipestat(dev_priv, pipe,
2688 				     PIPE_VBLANK_INTERRUPT_STATUS);
2689 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2690 
2691 	return 0;
2692 }
2693 
2694 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2695 {
2696 	struct drm_i915_private *dev_priv = dev->dev_private;
2697 	unsigned long irqflags;
2698 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2699 						     DE_PIPE_VBLANK(pipe);
2700 
2701 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2702 	ilk_enable_display_irq(dev_priv, bit);
2703 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2704 
2705 	return 0;
2706 }
2707 
2708 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2709 {
2710 	struct drm_i915_private *dev_priv = dev->dev_private;
2711 	unsigned long irqflags;
2712 
2713 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2714 	i915_enable_pipestat(dev_priv, pipe,
2715 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2716 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2717 
2718 	return 0;
2719 }
2720 
2721 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2722 {
2723 	struct drm_i915_private *dev_priv = dev->dev_private;
2724 	unsigned long irqflags;
2725 
2726 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2727 	bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2728 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2729 
2730 	return 0;
2731 }
2732 
2733 /* Called from drm generic code, passed 'crtc' which
2734  * we use as a pipe index
2735  */
2736 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2737 {
2738 	struct drm_i915_private *dev_priv = dev->dev_private;
2739 	unsigned long irqflags;
2740 
2741 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2742 	i915_disable_pipestat(dev_priv, pipe,
2743 			      PIPE_VBLANK_INTERRUPT_STATUS |
2744 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2745 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2746 }
2747 
2748 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2749 {
2750 	struct drm_i915_private *dev_priv = dev->dev_private;
2751 	unsigned long irqflags;
2752 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2753 						     DE_PIPE_VBLANK(pipe);
2754 
2755 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2756 	ilk_disable_display_irq(dev_priv, bit);
2757 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2758 }
2759 
2760 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2761 {
2762 	struct drm_i915_private *dev_priv = dev->dev_private;
2763 	unsigned long irqflags;
2764 
2765 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2766 	i915_disable_pipestat(dev_priv, pipe,
2767 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2768 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2769 }
2770 
2771 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2772 {
2773 	struct drm_i915_private *dev_priv = dev->dev_private;
2774 	unsigned long irqflags;
2775 
2776 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2777 	bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2778 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2779 }
2780 
2781 static bool
2782 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2783 {
2784 	return (list_empty(&ring->request_list) ||
2785 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2786 }
2787 
2788 static bool
2789 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2790 {
2791 	if (INTEL_INFO(dev)->gen >= 8) {
2792 		return (ipehr >> 23) == 0x1c;
2793 	} else {
2794 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2795 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2796 				 MI_SEMAPHORE_REGISTER);
2797 	}
2798 }
2799 
2800 static struct intel_engine_cs *
2801 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2802 {
2803 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2804 	struct intel_engine_cs *signaller;
2805 	int i;
2806 
2807 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2808 		for_each_ring(signaller, dev_priv, i) {
2809 			if (ring == signaller)
2810 				continue;
2811 
2812 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2813 				return signaller;
2814 		}
2815 	} else {
2816 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2817 
2818 		for_each_ring(signaller, dev_priv, i) {
2819 			if(ring == signaller)
2820 				continue;
2821 
2822 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2823 				return signaller;
2824 		}
2825 	}
2826 
2827 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n",
2828 		  ring->id, ipehr, offset);
2829 
2830 	return NULL;
2831 }
2832 
2833 static struct intel_engine_cs *
2834 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2835 {
2836 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2837 	u32 cmd, ipehr, head;
2838 	u64 offset = 0;
2839 	int i, backwards;
2840 
2841 	/*
2842 	 * This function does not support execlist mode - any attempt to
2843 	 * proceed further into this function will result in a kernel panic
2844 	 * when dereferencing ring->buffer, which is not set up in execlist
2845 	 * mode.
2846 	 *
2847 	 * The correct way of doing it would be to derive the currently
2848 	 * executing ring buffer from the current context, which is derived
2849 	 * from the currently running request. Unfortunately, to get the
2850 	 * current request we would have to grab the struct_mutex before doing
2851 	 * anything else, which would be ill-advised since some other thread
2852 	 * might have grabbed it already and managed to hang itself, causing
2853 	 * the hang checker to deadlock.
2854 	 *
2855 	 * Therefore, this function does not support execlist mode in its
2856 	 * current form. Just return NULL and move on.
2857 	 */
2858 	if (ring->buffer == NULL)
2859 		return NULL;
2860 
2861 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2862 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2863 		return NULL;
2864 
2865 	/*
2866 	 * HEAD is likely pointing to the dword after the actual command,
2867 	 * so scan backwards until we find the MBOX. But limit it to just 3
2868 	 * or 4 dwords depending on the semaphore wait command size.
2869 	 * Note that we don't care about ACTHD here since that might
2870 	 * point at at batch, and semaphores are always emitted into the
2871 	 * ringbuffer itself.
2872 	 */
2873 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2874 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2875 
2876 	for (i = backwards; i; --i) {
2877 		/*
2878 		 * Be paranoid and presume the hw has gone off into the wild -
2879 		 * our ring is smaller than what the hardware (and hence
2880 		 * HEAD_ADDR) allows. Also handles wrap-around.
2881 		 */
2882 		head &= ring->buffer->size - 1;
2883 
2884 		/* This here seems to blow up */
2885 		cmd = ioread32(ring->buffer->virtual_start + head);
2886 		if (cmd == ipehr)
2887 			break;
2888 
2889 		head -= 4;
2890 	}
2891 
2892 	if (!i)
2893 		return NULL;
2894 
2895 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2896 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2897 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2898 		offset <<= 32;
2899 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2900 	}
2901 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2902 }
2903 
2904 static int semaphore_passed(struct intel_engine_cs *ring)
2905 {
2906 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2907 	struct intel_engine_cs *signaller;
2908 	u32 seqno;
2909 
2910 	ring->hangcheck.deadlock++;
2911 
2912 	signaller = semaphore_waits_for(ring, &seqno);
2913 	if (signaller == NULL)
2914 		return -1;
2915 
2916 	/* Prevent pathological recursion due to driver bugs */
2917 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2918 		return -1;
2919 
2920 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2921 		return 1;
2922 
2923 	/* cursory check for an unkickable deadlock */
2924 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2925 	    semaphore_passed(signaller) < 0)
2926 		return -1;
2927 
2928 	return 0;
2929 }
2930 
2931 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2932 {
2933 	struct intel_engine_cs *ring;
2934 	int i;
2935 
2936 	for_each_ring(ring, dev_priv, i)
2937 		ring->hangcheck.deadlock = 0;
2938 }
2939 
2940 static enum intel_ring_hangcheck_action
2941 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2942 {
2943 	struct drm_device *dev = ring->dev;
2944 	struct drm_i915_private *dev_priv = dev->dev_private;
2945 	u32 tmp;
2946 
2947 	if (acthd != ring->hangcheck.acthd) {
2948 		if (acthd > ring->hangcheck.max_acthd) {
2949 			ring->hangcheck.max_acthd = acthd;
2950 			return HANGCHECK_ACTIVE;
2951 		}
2952 
2953 		return HANGCHECK_ACTIVE_LOOP;
2954 	}
2955 
2956 	if (IS_GEN2(dev))
2957 		return HANGCHECK_HUNG;
2958 
2959 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2960 	 * If so we can simply poke the RB_WAIT bit
2961 	 * and break the hang. This should work on
2962 	 * all but the second generation chipsets.
2963 	 */
2964 	tmp = I915_READ_CTL(ring);
2965 	if (tmp & RING_WAIT) {
2966 		i915_handle_error(dev, false,
2967 				  "Kicking stuck wait on %s",
2968 				  ring->name);
2969 		I915_WRITE_CTL(ring, tmp);
2970 		return HANGCHECK_KICK;
2971 	}
2972 
2973 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2974 		switch (semaphore_passed(ring)) {
2975 		default:
2976 			return HANGCHECK_HUNG;
2977 		case 1:
2978 			i915_handle_error(dev, false,
2979 					  "Kicking stuck semaphore on %s",
2980 					  ring->name);
2981 			I915_WRITE_CTL(ring, tmp);
2982 			return HANGCHECK_KICK;
2983 		case 0:
2984 			return HANGCHECK_WAIT;
2985 		}
2986 	}
2987 
2988 	return HANGCHECK_HUNG;
2989 }
2990 
2991 /*
2992  * This is called when the chip hasn't reported back with completed
2993  * batchbuffers in a long time. We keep track per ring seqno progress and
2994  * if there are no progress, hangcheck score for that ring is increased.
2995  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2996  * we kick the ring. If we see no progress on three subsequent calls
2997  * we assume chip is wedged and try to fix it by resetting the chip.
2998  */
2999 static void i915_hangcheck_elapsed(struct work_struct *work)
3000 {
3001 	struct drm_i915_private *dev_priv =
3002 		container_of(work, typeof(*dev_priv),
3003 			     gpu_error.hangcheck_work.work);
3004 	struct drm_device *dev = dev_priv->dev;
3005 	struct intel_engine_cs *ring;
3006 	int i;
3007 	int busy_count = 0, rings_hung = 0;
3008 	bool stuck[I915_NUM_RINGS] = { 0 };
3009 #define BUSY 1
3010 #define KICK 5
3011 #define HUNG 20
3012 
3013 	if (!i915.enable_hangcheck)
3014 		return;
3015 
3016 	/*
3017 	 * The hangcheck work is synced during runtime suspend, we don't
3018 	 * require a wakeref. TODO: instead of disabling the asserts make
3019 	 * sure that we hold a reference when this work is running.
3020 	 */
3021 	DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3022 
3023 	for_each_ring(ring, dev_priv, i) {
3024 		u64 acthd;
3025 		u32 seqno;
3026 		bool busy = true;
3027 
3028 		semaphore_clear_deadlocks(dev_priv);
3029 
3030 		seqno = ring->get_seqno(ring, false);
3031 		acthd = intel_ring_get_active_head(ring);
3032 
3033 		if (ring->hangcheck.seqno == seqno) {
3034 			if (ring_idle(ring, seqno)) {
3035 				ring->hangcheck.action = HANGCHECK_IDLE;
3036 
3037 				if (waitqueue_active(&ring->irq_queue)) {
3038 					/* Issue a wake-up to catch stuck h/w. */
3039 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3040 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3041 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3042 								  ring->name);
3043 						else
3044 							DRM_INFO("Fake missed irq on %s\n",
3045 								 ring->name);
3046 						wake_up_all(&ring->irq_queue);
3047 					}
3048 					/* Safeguard against driver failure */
3049 					ring->hangcheck.score += BUSY;
3050 				} else
3051 					busy = false;
3052 			} else {
3053 				/* We always increment the hangcheck score
3054 				 * if the ring is busy and still processing
3055 				 * the same request, so that no single request
3056 				 * can run indefinitely (such as a chain of
3057 				 * batches). The only time we do not increment
3058 				 * the hangcheck score on this ring, if this
3059 				 * ring is in a legitimate wait for another
3060 				 * ring. In that case the waiting ring is a
3061 				 * victim and we want to be sure we catch the
3062 				 * right culprit. Then every time we do kick
3063 				 * the ring, add a small increment to the
3064 				 * score so that we can catch a batch that is
3065 				 * being repeatedly kicked and so responsible
3066 				 * for stalling the machine.
3067 				 */
3068 				ring->hangcheck.action = ring_stuck(ring,
3069 								    acthd);
3070 
3071 				switch (ring->hangcheck.action) {
3072 				case HANGCHECK_IDLE:
3073 				case HANGCHECK_WAIT:
3074 				case HANGCHECK_ACTIVE:
3075 					break;
3076 				case HANGCHECK_ACTIVE_LOOP:
3077 					ring->hangcheck.score += BUSY;
3078 					break;
3079 				case HANGCHECK_KICK:
3080 					ring->hangcheck.score += KICK;
3081 					break;
3082 				case HANGCHECK_HUNG:
3083 					ring->hangcheck.score += HUNG;
3084 					stuck[i] = true;
3085 					break;
3086 				}
3087 			}
3088 		} else {
3089 			ring->hangcheck.action = HANGCHECK_ACTIVE;
3090 
3091 			/* Gradually reduce the count so that we catch DoS
3092 			 * attempts across multiple batches.
3093 			 */
3094 			if (ring->hangcheck.score > 0)
3095 				ring->hangcheck.score--;
3096 
3097 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3098 		}
3099 
3100 		ring->hangcheck.seqno = seqno;
3101 		ring->hangcheck.acthd = acthd;
3102 		busy_count += busy;
3103 	}
3104 
3105 	for_each_ring(ring, dev_priv, i) {
3106 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3107 			DRM_INFO("%s on %s\n",
3108 				 stuck[i] ? "stuck" : "no progress",
3109 				 ring->name);
3110 			rings_hung++;
3111 		}
3112 	}
3113 
3114 	if (rings_hung) {
3115 		i915_handle_error(dev, true, "Ring hung");
3116 		goto out;
3117 	}
3118 
3119 	if (busy_count)
3120 		/* Reset timer case chip hangs without another request
3121 		 * being added */
3122 		i915_queue_hangcheck(dev);
3123 
3124 out:
3125 	ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3126 }
3127 
3128 void i915_queue_hangcheck(struct drm_device *dev)
3129 {
3130 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3131 
3132 	if (!i915.enable_hangcheck)
3133 		return;
3134 
3135 	/* Don't continually defer the hangcheck so that it is always run at
3136 	 * least once after work has been scheduled on any ring. Otherwise,
3137 	 * we will ignore a hung ring if a second ring is kept busy.
3138 	 */
3139 
3140 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3141 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3142 }
3143 
3144 static void ibx_irq_reset(struct drm_device *dev)
3145 {
3146 	struct drm_i915_private *dev_priv = dev->dev_private;
3147 
3148 	if (HAS_PCH_NOP(dev))
3149 		return;
3150 
3151 	GEN5_IRQ_RESET(SDE);
3152 
3153 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3154 		I915_WRITE(SERR_INT, 0xffffffff);
3155 }
3156 
3157 /*
3158  * SDEIER is also touched by the interrupt handler to work around missed PCH
3159  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3160  * instead we unconditionally enable all PCH interrupt sources here, but then
3161  * only unmask them as needed with SDEIMR.
3162  *
3163  * This function needs to be called before interrupts are enabled.
3164  */
3165 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3166 {
3167 	struct drm_i915_private *dev_priv = dev->dev_private;
3168 
3169 	if (HAS_PCH_NOP(dev))
3170 		return;
3171 
3172 	WARN_ON(I915_READ(SDEIER) != 0);
3173 	I915_WRITE(SDEIER, 0xffffffff);
3174 	POSTING_READ(SDEIER);
3175 }
3176 
3177 static void gen5_gt_irq_reset(struct drm_device *dev)
3178 {
3179 	struct drm_i915_private *dev_priv = dev->dev_private;
3180 
3181 	GEN5_IRQ_RESET(GT);
3182 	if (INTEL_INFO(dev)->gen >= 6)
3183 		GEN5_IRQ_RESET(GEN6_PM);
3184 }
3185 
3186 /* drm_dma.h hooks
3187 */
3188 static void ironlake_irq_reset(struct drm_device *dev)
3189 {
3190 	struct drm_i915_private *dev_priv = dev->dev_private;
3191 
3192 	I915_WRITE(HWSTAM, 0xffffffff);
3193 
3194 	GEN5_IRQ_RESET(DE);
3195 	if (IS_GEN7(dev))
3196 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3197 
3198 	gen5_gt_irq_reset(dev);
3199 
3200 	ibx_irq_reset(dev);
3201 }
3202 
3203 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3204 {
3205 	enum i915_pipe pipe;
3206 
3207 	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3208 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3209 
3210 	for_each_pipe(dev_priv, pipe)
3211 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3212 
3213 	GEN5_IRQ_RESET(VLV_);
3214 }
3215 
3216 static void valleyview_irq_preinstall(struct drm_device *dev)
3217 {
3218 	struct drm_i915_private *dev_priv = dev->dev_private;
3219 
3220 	/* VLV magic */
3221 	I915_WRITE(VLV_IMR, 0);
3222 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3223 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3224 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3225 
3226 	gen5_gt_irq_reset(dev);
3227 
3228 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3229 
3230 	vlv_display_irq_reset(dev_priv);
3231 }
3232 
3233 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3234 {
3235 	GEN8_IRQ_RESET_NDX(GT, 0);
3236 	GEN8_IRQ_RESET_NDX(GT, 1);
3237 	GEN8_IRQ_RESET_NDX(GT, 2);
3238 	GEN8_IRQ_RESET_NDX(GT, 3);
3239 }
3240 
3241 static void gen8_irq_reset(struct drm_device *dev)
3242 {
3243 	struct drm_i915_private *dev_priv = dev->dev_private;
3244 	int pipe;
3245 
3246 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3247 	POSTING_READ(GEN8_MASTER_IRQ);
3248 
3249 	gen8_gt_irq_reset(dev_priv);
3250 
3251 	for_each_pipe(dev_priv, pipe)
3252 		if (intel_display_power_is_enabled(dev_priv,
3253 						   POWER_DOMAIN_PIPE(pipe)))
3254 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3255 
3256 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3257 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3258 	GEN5_IRQ_RESET(GEN8_PCU_);
3259 
3260 	if (HAS_PCH_SPLIT(dev))
3261 		ibx_irq_reset(dev);
3262 }
3263 
3264 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3265 				     unsigned int pipe_mask)
3266 {
3267 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3268 
3269 	spin_lock_irq(&dev_priv->irq_lock);
3270 	if (pipe_mask & 1 << PIPE_A)
3271 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3272 				  dev_priv->de_irq_mask[PIPE_A],
3273 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3274 	if (pipe_mask & 1 << PIPE_B)
3275 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3276 				  dev_priv->de_irq_mask[PIPE_B],
3277 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3278 	if (pipe_mask & 1 << PIPE_C)
3279 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3280 				  dev_priv->de_irq_mask[PIPE_C],
3281 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3282 	spin_unlock_irq(&dev_priv->irq_lock);
3283 }
3284 
3285 static void cherryview_irq_preinstall(struct drm_device *dev)
3286 {
3287 	struct drm_i915_private *dev_priv = dev->dev_private;
3288 
3289 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3290 	POSTING_READ(GEN8_MASTER_IRQ);
3291 
3292 	gen8_gt_irq_reset(dev_priv);
3293 
3294 	GEN5_IRQ_RESET(GEN8_PCU_);
3295 
3296 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3297 
3298 	vlv_display_irq_reset(dev_priv);
3299 }
3300 
3301 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3302 				  const u32 hpd[HPD_NUM_PINS])
3303 {
3304 	struct drm_i915_private *dev_priv = to_i915(dev);
3305 	struct intel_encoder *encoder;
3306 	u32 enabled_irqs = 0;
3307 
3308 	for_each_intel_encoder(dev, encoder)
3309 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3310 			enabled_irqs |= hpd[encoder->hpd_pin];
3311 
3312 	return enabled_irqs;
3313 }
3314 
3315 static void ibx_hpd_irq_setup(struct drm_device *dev)
3316 {
3317 	struct drm_i915_private *dev_priv = dev->dev_private;
3318 	u32 hotplug_irqs, hotplug, enabled_irqs;
3319 
3320 	if (HAS_PCH_IBX(dev)) {
3321 		hotplug_irqs = SDE_HOTPLUG_MASK;
3322 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3323 	} else {
3324 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3325 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3326 	}
3327 
3328 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3329 
3330 	/*
3331 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3332 	 * duration to 2ms (which is the minimum in the Display Port spec).
3333 	 * The pulse duration bits are reserved on LPT+.
3334 	 */
3335 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3336 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3337 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3338 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3339 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3340 	/*
3341 	 * When CPU and PCH are on the same package, port A
3342 	 * HPD must be enabled in both north and south.
3343 	 */
3344 	if (HAS_PCH_LPT_LP(dev))
3345 		hotplug |= PORTA_HOTPLUG_ENABLE;
3346 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3347 }
3348 
3349 static void spt_hpd_irq_setup(struct drm_device *dev)
3350 {
3351 	struct drm_i915_private *dev_priv = dev->dev_private;
3352 	u32 hotplug_irqs, hotplug, enabled_irqs;
3353 
3354 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3355 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3356 
3357 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3358 
3359 	/* Enable digital hotplug on the PCH */
3360 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3361 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3362 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3363 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3364 
3365 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3366 	hotplug |= PORTE_HOTPLUG_ENABLE;
3367 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3368 }
3369 
3370 static void ilk_hpd_irq_setup(struct drm_device *dev)
3371 {
3372 	struct drm_i915_private *dev_priv = dev->dev_private;
3373 	u32 hotplug_irqs, hotplug, enabled_irqs;
3374 
3375 	if (INTEL_INFO(dev)->gen >= 8) {
3376 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3377 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3378 
3379 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3380 	} else if (INTEL_INFO(dev)->gen >= 7) {
3381 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3382 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3383 
3384 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3385 	} else {
3386 		hotplug_irqs = DE_DP_A_HOTPLUG;
3387 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3388 
3389 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3390 	}
3391 
3392 	/*
3393 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3394 	 * duration to 2ms (which is the minimum in the Display Port spec)
3395 	 * The pulse duration bits are reserved on HSW+.
3396 	 */
3397 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3398 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3399 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3400 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3401 
3402 	ibx_hpd_irq_setup(dev);
3403 }
3404 
3405 static void bxt_hpd_irq_setup(struct drm_device *dev)
3406 {
3407 	struct drm_i915_private *dev_priv = dev->dev_private;
3408 	u32 hotplug_irqs, hotplug, enabled_irqs;
3409 
3410 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3411 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3412 
3413 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3414 
3415 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3416 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3417 		PORTA_HOTPLUG_ENABLE;
3418 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3419 }
3420 
3421 static void ibx_irq_postinstall(struct drm_device *dev)
3422 {
3423 	struct drm_i915_private *dev_priv = dev->dev_private;
3424 	u32 mask;
3425 
3426 	if (HAS_PCH_NOP(dev))
3427 		return;
3428 
3429 	if (HAS_PCH_IBX(dev))
3430 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3431 	else
3432 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3433 
3434 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3435 	I915_WRITE(SDEIMR, ~mask);
3436 }
3437 
3438 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3439 {
3440 	struct drm_i915_private *dev_priv = dev->dev_private;
3441 	u32 pm_irqs, gt_irqs;
3442 
3443 	pm_irqs = gt_irqs = 0;
3444 
3445 	dev_priv->gt_irq_mask = ~0;
3446 	if (HAS_L3_DPF(dev)) {
3447 		/* L3 parity interrupt is always unmasked. */
3448 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3449 		gt_irqs |= GT_PARITY_ERROR(dev);
3450 	}
3451 
3452 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3453 	if (IS_GEN5(dev)) {
3454 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3455 			   ILK_BSD_USER_INTERRUPT;
3456 	} else {
3457 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3458 	}
3459 
3460 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3461 
3462 	if (INTEL_INFO(dev)->gen >= 6) {
3463 		/*
3464 		 * RPS interrupts will get enabled/disabled on demand when RPS
3465 		 * itself is enabled/disabled.
3466 		 */
3467 		if (HAS_VEBOX(dev))
3468 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3469 
3470 		dev_priv->pm_irq_mask = 0xffffffff;
3471 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3472 	}
3473 }
3474 
3475 static int ironlake_irq_postinstall(struct drm_device *dev)
3476 {
3477 	struct drm_i915_private *dev_priv = dev->dev_private;
3478 	u32 display_mask, extra_mask;
3479 
3480 	if (INTEL_INFO(dev)->gen >= 7) {
3481 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3482 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3483 				DE_PLANEB_FLIP_DONE_IVB |
3484 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3485 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3486 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3487 			      DE_DP_A_HOTPLUG_IVB);
3488 	} else {
3489 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3490 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3491 				DE_AUX_CHANNEL_A |
3492 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3493 				DE_POISON);
3494 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3495 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3496 			      DE_DP_A_HOTPLUG);
3497 	}
3498 
3499 	dev_priv->irq_mask = ~display_mask;
3500 
3501 	I915_WRITE(HWSTAM, 0xeffe);
3502 
3503 	ibx_irq_pre_postinstall(dev);
3504 
3505 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3506 
3507 	gen5_gt_irq_postinstall(dev);
3508 
3509 	ibx_irq_postinstall(dev);
3510 
3511 	if (IS_IRONLAKE_M(dev)) {
3512 		/* Enable PCU event interrupts
3513 		 *
3514 		 * spinlocking not required here for correctness since interrupt
3515 		 * setup is guaranteed to run in single-threaded context. But we
3516 		 * need it to make the assert_spin_locked happy. */
3517 		spin_lock_irq(&dev_priv->irq_lock);
3518 		ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3519 		spin_unlock_irq(&dev_priv->irq_lock);
3520 	}
3521 
3522 	return 0;
3523 }
3524 
3525 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3526 {
3527 	u32 pipestat_mask;
3528 	u32 iir_mask;
3529 	enum i915_pipe pipe;
3530 
3531 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3532 			PIPE_FIFO_UNDERRUN_STATUS;
3533 
3534 	for_each_pipe(dev_priv, pipe)
3535 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3536 	POSTING_READ(PIPESTAT(PIPE_A));
3537 
3538 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3539 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3540 
3541 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3542 	for_each_pipe(dev_priv, pipe)
3543 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3544 
3545 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3546 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3547 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3548 	if (IS_CHERRYVIEW(dev_priv))
3549 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3550 	dev_priv->irq_mask &= ~iir_mask;
3551 
3552 	I915_WRITE(VLV_IIR, iir_mask);
3553 	I915_WRITE(VLV_IIR, iir_mask);
3554 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3555 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3556 	POSTING_READ(VLV_IMR);
3557 }
3558 
3559 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3560 {
3561 	u32 pipestat_mask;
3562 	u32 iir_mask;
3563 	enum i915_pipe pipe;
3564 
3565 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3566 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3567 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3568 	if (IS_CHERRYVIEW(dev_priv))
3569 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3570 
3571 	dev_priv->irq_mask |= iir_mask;
3572 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3573 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3574 	I915_WRITE(VLV_IIR, iir_mask);
3575 	I915_WRITE(VLV_IIR, iir_mask);
3576 	POSTING_READ(VLV_IIR);
3577 
3578 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3579 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3580 
3581 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3582 	for_each_pipe(dev_priv, pipe)
3583 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3584 
3585 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3586 			PIPE_FIFO_UNDERRUN_STATUS;
3587 
3588 	for_each_pipe(dev_priv, pipe)
3589 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3590 	POSTING_READ(PIPESTAT(PIPE_A));
3591 }
3592 
3593 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3594 {
3595 	assert_spin_locked(&dev_priv->irq_lock);
3596 
3597 	if (dev_priv->display_irqs_enabled)
3598 		return;
3599 
3600 	dev_priv->display_irqs_enabled = true;
3601 
3602 	if (intel_irqs_enabled(dev_priv))
3603 		valleyview_display_irqs_install(dev_priv);
3604 }
3605 
3606 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3607 {
3608 	assert_spin_locked(&dev_priv->irq_lock);
3609 
3610 	if (!dev_priv->display_irqs_enabled)
3611 		return;
3612 
3613 	dev_priv->display_irqs_enabled = false;
3614 
3615 	if (intel_irqs_enabled(dev_priv))
3616 		valleyview_display_irqs_uninstall(dev_priv);
3617 }
3618 
3619 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3620 {
3621 	dev_priv->irq_mask = ~0;
3622 
3623 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3624 	POSTING_READ(PORT_HOTPLUG_EN);
3625 
3626 	I915_WRITE(VLV_IIR, 0xffffffff);
3627 	I915_WRITE(VLV_IIR, 0xffffffff);
3628 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3629 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3630 	POSTING_READ(VLV_IMR);
3631 
3632 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3633 	 * just to make the assert_spin_locked check happy. */
3634 	spin_lock_irq(&dev_priv->irq_lock);
3635 	if (dev_priv->display_irqs_enabled)
3636 		valleyview_display_irqs_install(dev_priv);
3637 	spin_unlock_irq(&dev_priv->irq_lock);
3638 }
3639 
3640 static int valleyview_irq_postinstall(struct drm_device *dev)
3641 {
3642 	struct drm_i915_private *dev_priv = dev->dev_private;
3643 
3644 	vlv_display_irq_postinstall(dev_priv);
3645 
3646 	gen5_gt_irq_postinstall(dev);
3647 
3648 	/* ack & enable invalid PTE error interrupts */
3649 #if 0 /* FIXME: add support to irq handler for checking these bits */
3650 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3651 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3652 #endif
3653 
3654 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3655 
3656 	return 0;
3657 }
3658 
3659 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3660 {
3661 	/* These are interrupts we'll toggle with the ring mask register */
3662 	uint32_t gt_interrupts[] = {
3663 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3664 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3665 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3666 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3667 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3668 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3669 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3670 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3671 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3672 		0,
3673 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3674 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3675 		};
3676 
3677 	dev_priv->pm_irq_mask = 0xffffffff;
3678 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3679 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3680 	/*
3681 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3682 	 * is enabled/disabled.
3683 	 */
3684 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3685 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3686 }
3687 
3688 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3689 {
3690 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3691 	uint32_t de_pipe_enables;
3692 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3693 	u32 de_port_enables;
3694 	enum i915_pipe pipe;
3695 
3696 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3697 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3698 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3699 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3700 				  GEN9_AUX_CHANNEL_D;
3701 		if (IS_BROXTON(dev_priv))
3702 			de_port_masked |= BXT_DE_PORT_GMBUS;
3703 	} else {
3704 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3705 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3706 	}
3707 
3708 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3709 					   GEN8_PIPE_FIFO_UNDERRUN;
3710 
3711 	de_port_enables = de_port_masked;
3712 	if (IS_BROXTON(dev_priv))
3713 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3714 	else if (IS_BROADWELL(dev_priv))
3715 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3716 
3717 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3718 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3719 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3720 
3721 	for_each_pipe(dev_priv, pipe)
3722 		if (intel_display_power_is_enabled(dev_priv,
3723 				POWER_DOMAIN_PIPE(pipe)))
3724 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3725 					  dev_priv->de_irq_mask[pipe],
3726 					  de_pipe_enables);
3727 
3728 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3729 }
3730 
3731 static int gen8_irq_postinstall(struct drm_device *dev)
3732 {
3733 	struct drm_i915_private *dev_priv = dev->dev_private;
3734 
3735 	if (HAS_PCH_SPLIT(dev))
3736 		ibx_irq_pre_postinstall(dev);
3737 
3738 	gen8_gt_irq_postinstall(dev_priv);
3739 	gen8_de_irq_postinstall(dev_priv);
3740 
3741 	if (HAS_PCH_SPLIT(dev))
3742 		ibx_irq_postinstall(dev);
3743 
3744 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3745 	POSTING_READ(GEN8_MASTER_IRQ);
3746 
3747 	return 0;
3748 }
3749 
3750 static int cherryview_irq_postinstall(struct drm_device *dev)
3751 {
3752 	struct drm_i915_private *dev_priv = dev->dev_private;
3753 
3754 	vlv_display_irq_postinstall(dev_priv);
3755 
3756 	gen8_gt_irq_postinstall(dev_priv);
3757 
3758 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3759 	POSTING_READ(GEN8_MASTER_IRQ);
3760 
3761 	return 0;
3762 }
3763 
3764 static void gen8_irq_uninstall(struct drm_device *dev)
3765 {
3766 	struct drm_i915_private *dev_priv = dev->dev_private;
3767 
3768 	if (!dev_priv)
3769 		return;
3770 
3771 	gen8_irq_reset(dev);
3772 }
3773 
3774 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3775 {
3776 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3777 	 * just to make the assert_spin_locked check happy. */
3778 	spin_lock_irq(&dev_priv->irq_lock);
3779 	if (dev_priv->display_irqs_enabled)
3780 		valleyview_display_irqs_uninstall(dev_priv);
3781 	spin_unlock_irq(&dev_priv->irq_lock);
3782 
3783 	vlv_display_irq_reset(dev_priv);
3784 
3785 	dev_priv->irq_mask = ~0;
3786 }
3787 
3788 static void valleyview_irq_uninstall(struct drm_device *dev)
3789 {
3790 	struct drm_i915_private *dev_priv = dev->dev_private;
3791 
3792 	if (!dev_priv)
3793 		return;
3794 
3795 	I915_WRITE(VLV_MASTER_IER, 0);
3796 
3797 	gen5_gt_irq_reset(dev);
3798 
3799 	I915_WRITE(HWSTAM, 0xffffffff);
3800 
3801 	vlv_display_irq_uninstall(dev_priv);
3802 }
3803 
3804 static void cherryview_irq_uninstall(struct drm_device *dev)
3805 {
3806 	struct drm_i915_private *dev_priv = dev->dev_private;
3807 
3808 	if (!dev_priv)
3809 		return;
3810 
3811 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3812 	POSTING_READ(GEN8_MASTER_IRQ);
3813 
3814 	gen8_gt_irq_reset(dev_priv);
3815 
3816 	GEN5_IRQ_RESET(GEN8_PCU_);
3817 
3818 	vlv_display_irq_uninstall(dev_priv);
3819 }
3820 
3821 static void ironlake_irq_uninstall(struct drm_device *dev)
3822 {
3823 	struct drm_i915_private *dev_priv = dev->dev_private;
3824 
3825 	if (!dev_priv)
3826 		return;
3827 
3828 	ironlake_irq_reset(dev);
3829 }
3830 
3831 static void i8xx_irq_preinstall(struct drm_device * dev)
3832 {
3833 	struct drm_i915_private *dev_priv = dev->dev_private;
3834 	int pipe;
3835 
3836 	for_each_pipe(dev_priv, pipe)
3837 		I915_WRITE(PIPESTAT(pipe), 0);
3838 	I915_WRITE16(IMR, 0xffff);
3839 	I915_WRITE16(IER, 0x0);
3840 	POSTING_READ16(IER);
3841 }
3842 
3843 static int i8xx_irq_postinstall(struct drm_device *dev)
3844 {
3845 	struct drm_i915_private *dev_priv = dev->dev_private;
3846 
3847 	I915_WRITE16(EMR,
3848 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3849 
3850 	/* Unmask the interrupts that we always want on. */
3851 	dev_priv->irq_mask =
3852 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3853 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3854 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3855 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3856 	I915_WRITE16(IMR, dev_priv->irq_mask);
3857 
3858 	I915_WRITE16(IER,
3859 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3860 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3861 		     I915_USER_INTERRUPT);
3862 	POSTING_READ16(IER);
3863 
3864 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3865 	 * just to make the assert_spin_locked check happy. */
3866 	spin_lock_irq(&dev_priv->irq_lock);
3867 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3868 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3869 	spin_unlock_irq(&dev_priv->irq_lock);
3870 
3871 	return 0;
3872 }
3873 
3874 /*
3875  * Returns true when a page flip has completed.
3876  */
3877 static bool i8xx_handle_vblank(struct drm_device *dev,
3878 			       int plane, int pipe, u32 iir)
3879 {
3880 	struct drm_i915_private *dev_priv = dev->dev_private;
3881 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3882 
3883 	if (!intel_pipe_handle_vblank(dev, pipe))
3884 		return false;
3885 
3886 	if ((iir & flip_pending) == 0)
3887 		goto check_page_flip;
3888 
3889 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3890 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3891 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3892 	 * the flip is completed (no longer pending). Since this doesn't raise
3893 	 * an interrupt per se, we watch for the change at vblank.
3894 	 */
3895 	if (I915_READ16(ISR) & flip_pending)
3896 		goto check_page_flip;
3897 
3898 	intel_prepare_page_flip(dev, plane);
3899 	intel_finish_page_flip(dev, pipe);
3900 	return true;
3901 
3902 check_page_flip:
3903 	intel_check_page_flip(dev, pipe);
3904 	return false;
3905 }
3906 
3907 static irqreturn_t i8xx_irq_handler(void *arg)
3908 {
3909 	struct drm_device *dev = arg;
3910 	struct drm_i915_private *dev_priv = dev->dev_private;
3911 	u16 iir, new_iir;
3912 	u32 pipe_stats[2];
3913 	int pipe;
3914 	u16 flip_mask =
3915 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3916 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3917 
3918 	if (!intel_irqs_enabled(dev_priv))
3919 		return IRQ_NONE;
3920 
3921 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
3922 	disable_rpm_wakeref_asserts(dev_priv);
3923 
3924 	iir = I915_READ16(IIR);
3925 	if (iir == 0)
3926 		goto out;
3927 
3928 	while (iir & ~flip_mask) {
3929 		/* Can't rely on pipestat interrupt bit in iir as it might
3930 		 * have been cleared after the pipestat interrupt was received.
3931 		 * It doesn't set the bit in iir again, but it still produces
3932 		 * interrupts (for non-MSI).
3933 		 */
3934 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3935 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3936 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3937 
3938 		for_each_pipe(dev_priv, pipe) {
3939 			i915_reg_t reg = PIPESTAT(pipe);
3940 			pipe_stats[pipe] = I915_READ(reg);
3941 
3942 			/*
3943 			 * Clear the PIPE*STAT regs before the IIR
3944 			 */
3945 			if (pipe_stats[pipe] & 0x8000ffff)
3946 				I915_WRITE(reg, pipe_stats[pipe]);
3947 		}
3948 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3949 
3950 		I915_WRITE16(IIR, iir & ~flip_mask);
3951 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3952 
3953 		if (iir & I915_USER_INTERRUPT)
3954 			notify_ring(&dev_priv->ring[RCS]);
3955 
3956 		for_each_pipe(dev_priv, pipe) {
3957 			int plane = pipe;
3958 			if (HAS_FBC(dev))
3959 				plane = !plane;
3960 
3961 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3962 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3963 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3964 
3965 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3966 				i9xx_pipe_crc_irq_handler(dev, pipe);
3967 
3968 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3969 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3970 								    pipe);
3971 		}
3972 
3973 		iir = new_iir;
3974 	}
3975 
3976 out:
3977 	enable_rpm_wakeref_asserts(dev_priv);
3978 
3979 }
3980 
3981 static void i8xx_irq_uninstall(struct drm_device * dev)
3982 {
3983 	struct drm_i915_private *dev_priv = dev->dev_private;
3984 	int pipe;
3985 
3986 	for_each_pipe(dev_priv, pipe) {
3987 		/* Clear enable bits; then clear status bits */
3988 		I915_WRITE(PIPESTAT(pipe), 0);
3989 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3990 	}
3991 	I915_WRITE16(IMR, 0xffff);
3992 	I915_WRITE16(IER, 0x0);
3993 	I915_WRITE16(IIR, I915_READ16(IIR));
3994 }
3995 
3996 static void i915_irq_preinstall(struct drm_device * dev)
3997 {
3998 	struct drm_i915_private *dev_priv = dev->dev_private;
3999 	int pipe;
4000 
4001 	if (I915_HAS_HOTPLUG(dev)) {
4002 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4003 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4004 	}
4005 
4006 	I915_WRITE16(HWSTAM, 0xeffe);
4007 	for_each_pipe(dev_priv, pipe)
4008 		I915_WRITE(PIPESTAT(pipe), 0);
4009 	I915_WRITE(IMR, 0xffffffff);
4010 	I915_WRITE(IER, 0x0);
4011 	POSTING_READ(IER);
4012 }
4013 
4014 static int i915_irq_postinstall(struct drm_device *dev)
4015 {
4016 	struct drm_i915_private *dev_priv = dev->dev_private;
4017 	u32 enable_mask;
4018 
4019 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4020 
4021 	/* Unmask the interrupts that we always want on. */
4022 	dev_priv->irq_mask =
4023 		~(I915_ASLE_INTERRUPT |
4024 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4025 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4026 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4027 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4028 
4029 	enable_mask =
4030 		I915_ASLE_INTERRUPT |
4031 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4032 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4033 		I915_USER_INTERRUPT;
4034 
4035 	if (I915_HAS_HOTPLUG(dev)) {
4036 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4037 		POSTING_READ(PORT_HOTPLUG_EN);
4038 
4039 		/* Enable in IER... */
4040 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4041 		/* and unmask in IMR */
4042 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4043 	}
4044 
4045 	I915_WRITE(IMR, dev_priv->irq_mask);
4046 	I915_WRITE(IER, enable_mask);
4047 	POSTING_READ(IER);
4048 
4049 	i915_enable_asle_pipestat(dev);
4050 
4051 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4052 	 * just to make the assert_spin_locked check happy. */
4053 	spin_lock_irq(&dev_priv->irq_lock);
4054 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4055 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4056 	spin_unlock_irq(&dev_priv->irq_lock);
4057 
4058 	return 0;
4059 }
4060 
4061 /*
4062  * Returns true when a page flip has completed.
4063  */
4064 static bool i915_handle_vblank(struct drm_device *dev,
4065 			       int plane, int pipe, u32 iir)
4066 {
4067 	struct drm_i915_private *dev_priv = dev->dev_private;
4068 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4069 
4070 	if (!intel_pipe_handle_vblank(dev, pipe))
4071 		return false;
4072 
4073 	if ((iir & flip_pending) == 0)
4074 		goto check_page_flip;
4075 
4076 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
4077 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4078 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4079 	 * the flip is completed (no longer pending). Since this doesn't raise
4080 	 * an interrupt per se, we watch for the change at vblank.
4081 	 */
4082 	if (I915_READ(ISR) & flip_pending)
4083 		goto check_page_flip;
4084 
4085 	intel_prepare_page_flip(dev, plane);
4086 	intel_finish_page_flip(dev, pipe);
4087 	return true;
4088 
4089 check_page_flip:
4090 	intel_check_page_flip(dev, pipe);
4091 	return false;
4092 }
4093 
4094 static irqreturn_t i915_irq_handler(void *arg)
4095 {
4096 	struct drm_device *dev = arg;
4097 	struct drm_i915_private *dev_priv = dev->dev_private;
4098 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4099 	u32 flip_mask =
4100 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4101 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4102 	int pipe;
4103 
4104 	if (!intel_irqs_enabled(dev_priv))
4105 		return IRQ_NONE;
4106 
4107 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4108 	disable_rpm_wakeref_asserts(dev_priv);
4109 
4110 	iir = I915_READ(IIR);
4111 	do {
4112 		bool irq_received = (iir & ~flip_mask) != 0;
4113 		bool blc_event = false;
4114 
4115 		/* Can't rely on pipestat interrupt bit in iir as it might
4116 		 * have been cleared after the pipestat interrupt was received.
4117 		 * It doesn't set the bit in iir again, but it still produces
4118 		 * interrupts (for non-MSI).
4119 		 */
4120 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4121 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4122 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4123 
4124 		for_each_pipe(dev_priv, pipe) {
4125 			i915_reg_t reg = PIPESTAT(pipe);
4126 			pipe_stats[pipe] = I915_READ(reg);
4127 
4128 			/* Clear the PIPE*STAT regs before the IIR */
4129 			if (pipe_stats[pipe] & 0x8000ffff) {
4130 				I915_WRITE(reg, pipe_stats[pipe]);
4131 				irq_received = true;
4132 			}
4133 		}
4134 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4135 
4136 		if (!irq_received)
4137 			break;
4138 
4139 		/* Consume port.  Then clear IIR or we'll miss events */
4140 		if (I915_HAS_HOTPLUG(dev) &&
4141 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4142 			i9xx_hpd_irq_handler(dev);
4143 
4144 		I915_WRITE(IIR, iir & ~flip_mask);
4145 		new_iir = I915_READ(IIR); /* Flush posted writes */
4146 
4147 		if (iir & I915_USER_INTERRUPT)
4148 			notify_ring(&dev_priv->ring[RCS]);
4149 
4150 		for_each_pipe(dev_priv, pipe) {
4151 			int plane = pipe;
4152 			if (HAS_FBC(dev))
4153 				plane = !plane;
4154 
4155 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4156 			    i915_handle_vblank(dev, plane, pipe, iir))
4157 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4158 
4159 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4160 				blc_event = true;
4161 
4162 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4163 				i9xx_pipe_crc_irq_handler(dev, pipe);
4164 
4165 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4166 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4167 								    pipe);
4168 		}
4169 
4170 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4171 			intel_opregion_asle_intr(dev);
4172 
4173 		/* With MSI, interrupts are only generated when iir
4174 		 * transitions from zero to nonzero.  If another bit got
4175 		 * set while we were handling the existing iir bits, then
4176 		 * we would never get another interrupt.
4177 		 *
4178 		 * This is fine on non-MSI as well, as if we hit this path
4179 		 * we avoid exiting the interrupt handler only to generate
4180 		 * another one.
4181 		 *
4182 		 * Note that for MSI this could cause a stray interrupt report
4183 		 * if an interrupt landed in the time between writing IIR and
4184 		 * the posting read.  This should be rare enough to never
4185 		 * trigger the 99% of 100,000 interrupts test for disabling
4186 		 * stray interrupts.
4187 		 */
4188 		iir = new_iir;
4189 	} while (iir & ~flip_mask);
4190 
4191 	enable_rpm_wakeref_asserts(dev_priv);
4192 
4193 }
4194 
4195 static void i915_irq_uninstall(struct drm_device * dev)
4196 {
4197 	struct drm_i915_private *dev_priv = dev->dev_private;
4198 	int pipe;
4199 
4200 	if (I915_HAS_HOTPLUG(dev)) {
4201 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4202 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4203 	}
4204 
4205 	I915_WRITE16(HWSTAM, 0xffff);
4206 	for_each_pipe(dev_priv, pipe) {
4207 		/* Clear enable bits; then clear status bits */
4208 		I915_WRITE(PIPESTAT(pipe), 0);
4209 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4210 	}
4211 	I915_WRITE(IMR, 0xffffffff);
4212 	I915_WRITE(IER, 0x0);
4213 
4214 	I915_WRITE(IIR, I915_READ(IIR));
4215 }
4216 
4217 static void i965_irq_preinstall(struct drm_device * dev)
4218 {
4219 	struct drm_i915_private *dev_priv = dev->dev_private;
4220 	int pipe;
4221 
4222 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4223 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4224 
4225 	I915_WRITE(HWSTAM, 0xeffe);
4226 	for_each_pipe(dev_priv, pipe)
4227 		I915_WRITE(PIPESTAT(pipe), 0);
4228 	I915_WRITE(IMR, 0xffffffff);
4229 	I915_WRITE(IER, 0x0);
4230 	POSTING_READ(IER);
4231 }
4232 
4233 static int i965_irq_postinstall(struct drm_device *dev)
4234 {
4235 	struct drm_i915_private *dev_priv = dev->dev_private;
4236 	u32 enable_mask;
4237 	u32 error_mask;
4238 
4239 	/* Unmask the interrupts that we always want on. */
4240 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4241 			       I915_DISPLAY_PORT_INTERRUPT |
4242 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4243 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4244 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4245 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4246 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4247 
4248 	enable_mask = ~dev_priv->irq_mask;
4249 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4250 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4251 	enable_mask |= I915_USER_INTERRUPT;
4252 
4253 	if (IS_G4X(dev))
4254 		enable_mask |= I915_BSD_USER_INTERRUPT;
4255 
4256 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4257 	 * just to make the assert_spin_locked check happy. */
4258 	spin_lock_irq(&dev_priv->irq_lock);
4259 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4260 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4261 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4262 	spin_unlock_irq(&dev_priv->irq_lock);
4263 
4264 	/*
4265 	 * Enable some error detection, note the instruction error mask
4266 	 * bit is reserved, so we leave it masked.
4267 	 */
4268 	if (IS_G4X(dev)) {
4269 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4270 			       GM45_ERROR_MEM_PRIV |
4271 			       GM45_ERROR_CP_PRIV |
4272 			       I915_ERROR_MEMORY_REFRESH);
4273 	} else {
4274 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4275 			       I915_ERROR_MEMORY_REFRESH);
4276 	}
4277 	I915_WRITE(EMR, error_mask);
4278 
4279 	I915_WRITE(IMR, dev_priv->irq_mask);
4280 	I915_WRITE(IER, enable_mask);
4281 	POSTING_READ(IER);
4282 
4283 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4284 	POSTING_READ(PORT_HOTPLUG_EN);
4285 
4286 	i915_enable_asle_pipestat(dev);
4287 
4288 	return 0;
4289 }
4290 
4291 static void i915_hpd_irq_setup(struct drm_device *dev)
4292 {
4293 	struct drm_i915_private *dev_priv = dev->dev_private;
4294 	u32 hotplug_en;
4295 
4296 	assert_spin_locked(&dev_priv->irq_lock);
4297 
4298 	/* Note HDMI and DP share hotplug bits */
4299 	/* enable bits are the same for all generations */
4300 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4301 	/* Programming the CRT detection parameters tends
4302 	   to generate a spurious hotplug event about three
4303 	   seconds later.  So just do it once.
4304 	*/
4305 	if (IS_G4X(dev))
4306 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4307 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4308 
4309 	/* Ignore TV since it's buggy */
4310 	i915_hotplug_interrupt_update_locked(dev_priv,
4311 					     HOTPLUG_INT_EN_MASK |
4312 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4313 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4314 					     hotplug_en);
4315 }
4316 
4317 static irqreturn_t i965_irq_handler(void *arg)
4318 {
4319 	struct drm_device *dev = arg;
4320 	struct drm_i915_private *dev_priv = dev->dev_private;
4321 	u32 iir, new_iir;
4322 	u32 pipe_stats[I915_MAX_PIPES];
4323 	int pipe;
4324 	u32 flip_mask =
4325 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4326 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4327 
4328 	if (!intel_irqs_enabled(dev_priv))
4329 		return IRQ_NONE;
4330 
4331 	/* IRQs are synced during runtime_suspend, we don't require a wakeref */
4332 	disable_rpm_wakeref_asserts(dev_priv);
4333 
4334 	iir = I915_READ(IIR);
4335 
4336 	for (;;) {
4337 		bool irq_received = (iir & ~flip_mask) != 0;
4338 		bool blc_event = false;
4339 
4340 		/* Can't rely on pipestat interrupt bit in iir as it might
4341 		 * have been cleared after the pipestat interrupt was received.
4342 		 * It doesn't set the bit in iir again, but it still produces
4343 		 * interrupts (for non-MSI).
4344 		 */
4345 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4346 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4347 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4348 
4349 		for_each_pipe(dev_priv, pipe) {
4350 			i915_reg_t reg = PIPESTAT(pipe);
4351 			pipe_stats[pipe] = I915_READ(reg);
4352 
4353 			/*
4354 			 * Clear the PIPE*STAT regs before the IIR
4355 			 */
4356 			if (pipe_stats[pipe] & 0x8000ffff) {
4357 				I915_WRITE(reg, pipe_stats[pipe]);
4358 				irq_received = true;
4359 			}
4360 		}
4361 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4362 
4363 		if (!irq_received)
4364 			break;
4365 
4366 		/* Consume port.  Then clear IIR or we'll miss events */
4367 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4368 			i9xx_hpd_irq_handler(dev);
4369 
4370 		I915_WRITE(IIR, iir & ~flip_mask);
4371 		new_iir = I915_READ(IIR); /* Flush posted writes */
4372 
4373 		if (iir & I915_USER_INTERRUPT)
4374 			notify_ring(&dev_priv->ring[RCS]);
4375 		if (iir & I915_BSD_USER_INTERRUPT)
4376 			notify_ring(&dev_priv->ring[VCS]);
4377 
4378 		for_each_pipe(dev_priv, pipe) {
4379 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4380 			    i915_handle_vblank(dev, pipe, pipe, iir))
4381 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4382 
4383 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4384 				blc_event = true;
4385 
4386 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4387 				i9xx_pipe_crc_irq_handler(dev, pipe);
4388 
4389 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4390 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4391 		}
4392 
4393 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4394 			intel_opregion_asle_intr(dev);
4395 
4396 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4397 			gmbus_irq_handler(dev);
4398 
4399 		/* With MSI, interrupts are only generated when iir
4400 		 * transitions from zero to nonzero.  If another bit got
4401 		 * set while we were handling the existing iir bits, then
4402 		 * we would never get another interrupt.
4403 		 *
4404 		 * This is fine on non-MSI as well, as if we hit this path
4405 		 * we avoid exiting the interrupt handler only to generate
4406 		 * another one.
4407 		 *
4408 		 * Note that for MSI this could cause a stray interrupt report
4409 		 * if an interrupt landed in the time between writing IIR and
4410 		 * the posting read.  This should be rare enough to never
4411 		 * trigger the 99% of 100,000 interrupts test for disabling
4412 		 * stray interrupts.
4413 		 */
4414 		iir = new_iir;
4415 	}
4416 
4417 	enable_rpm_wakeref_asserts(dev_priv);
4418 
4419 }
4420 
4421 static void i965_irq_uninstall(struct drm_device * dev)
4422 {
4423 	struct drm_i915_private *dev_priv = dev->dev_private;
4424 	int pipe;
4425 
4426 	if (!dev_priv)
4427 		return;
4428 
4429 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4430 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4431 
4432 	I915_WRITE(HWSTAM, 0xffffffff);
4433 	for_each_pipe(dev_priv, pipe)
4434 		I915_WRITE(PIPESTAT(pipe), 0);
4435 	I915_WRITE(IMR, 0xffffffff);
4436 	I915_WRITE(IER, 0x0);
4437 
4438 	for_each_pipe(dev_priv, pipe)
4439 		I915_WRITE(PIPESTAT(pipe),
4440 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4441 	I915_WRITE(IIR, I915_READ(IIR));
4442 }
4443 
4444 /**
4445  * intel_irq_init - initializes irq support
4446  * @dev_priv: i915 device instance
4447  *
4448  * This function initializes all the irq support including work items, timers
4449  * and all the vtables. It does not setup the interrupt itself though.
4450  */
4451 void intel_irq_init(struct drm_i915_private *dev_priv)
4452 {
4453 	struct drm_device *dev = dev_priv->dev;
4454 
4455 	intel_hpd_init_work(dev_priv);
4456 
4457 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4458 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4459 
4460 	/* Let's track the enabled rps events */
4461 	if (IS_VALLEYVIEW(dev_priv))
4462 		/* WaGsvRC0ResidencyMethod:vlv */
4463 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4464 	else
4465 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4466 
4467 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4468 			  i915_hangcheck_elapsed);
4469 
4470 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4471 
4472 	if (IS_GEN2(dev_priv)) {
4473 		dev->max_vblank_count = 0;
4474 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4475 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4476 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4477 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4478 	} else {
4479 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4480 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4481 	}
4482 
4483 	/*
4484 	 * Opt out of the vblank disable timer on everything except gen2.
4485 	 * Gen2 doesn't have a hardware frame counter and so depends on
4486 	 * vblank interrupts to produce sane vblank seuquence numbers.
4487 	 */
4488 	if (!IS_GEN2(dev_priv))
4489 		dev->vblank_disable_immediate = true;
4490 
4491 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4492 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4493 
4494 	if (IS_CHERRYVIEW(dev_priv)) {
4495 		dev->driver->irq_handler = cherryview_irq_handler;
4496 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4497 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4498 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4499 		dev->driver->enable_vblank = valleyview_enable_vblank;
4500 		dev->driver->disable_vblank = valleyview_disable_vblank;
4501 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4502 	} else if (IS_VALLEYVIEW(dev_priv)) {
4503 		dev->driver->irq_handler = valleyview_irq_handler;
4504 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4505 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4506 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4507 		dev->driver->enable_vblank = valleyview_enable_vblank;
4508 		dev->driver->disable_vblank = valleyview_disable_vblank;
4509 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4510 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4511 		dev->driver->irq_handler = gen8_irq_handler;
4512 		dev->driver->irq_preinstall = gen8_irq_reset;
4513 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4514 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4515 		dev->driver->enable_vblank = gen8_enable_vblank;
4516 		dev->driver->disable_vblank = gen8_disable_vblank;
4517 		if (IS_BROXTON(dev))
4518 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4519 		else if (HAS_PCH_SPT(dev))
4520 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4521 		else
4522 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4523 	} else if (HAS_PCH_SPLIT(dev)) {
4524 		dev->driver->irq_handler = ironlake_irq_handler;
4525 		dev->driver->irq_preinstall = ironlake_irq_reset;
4526 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4527 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4528 		dev->driver->enable_vblank = ironlake_enable_vblank;
4529 		dev->driver->disable_vblank = ironlake_disable_vblank;
4530 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4531 	} else {
4532 		if (INTEL_INFO(dev_priv)->gen == 2) {
4533 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4534 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4535 			dev->driver->irq_handler = i8xx_irq_handler;
4536 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4537 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4538 			dev->driver->irq_preinstall = i915_irq_preinstall;
4539 			dev->driver->irq_postinstall = i915_irq_postinstall;
4540 			dev->driver->irq_uninstall = i915_irq_uninstall;
4541 			dev->driver->irq_handler = i915_irq_handler;
4542 		} else {
4543 			dev->driver->irq_preinstall = i965_irq_preinstall;
4544 			dev->driver->irq_postinstall = i965_irq_postinstall;
4545 			dev->driver->irq_uninstall = i965_irq_uninstall;
4546 			dev->driver->irq_handler = i965_irq_handler;
4547 		}
4548 		if (I915_HAS_HOTPLUG(dev_priv))
4549 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4550 		dev->driver->enable_vblank = i915_enable_vblank;
4551 		dev->driver->disable_vblank = i915_disable_vblank;
4552 	}
4553 }
4554 
4555 /**
4556  * intel_irq_install - enables the hardware interrupt
4557  * @dev_priv: i915 device instance
4558  *
4559  * This function enables the hardware interrupt handling, but leaves the hotplug
4560  * handling still disabled. It is called after intel_irq_init().
4561  *
4562  * In the driver load and resume code we need working interrupts in a few places
4563  * but don't want to deal with the hassle of concurrent probe and hotplug
4564  * workers. Hence the split into this two-stage approach.
4565  */
4566 int intel_irq_install(struct drm_i915_private *dev_priv)
4567 {
4568 	/*
4569 	 * We enable some interrupt sources in our postinstall hooks, so mark
4570 	 * interrupts as enabled _before_ actually enabling them to avoid
4571 	 * special cases in our ordering checks.
4572 	 */
4573 	dev_priv->pm.irqs_enabled = true;
4574 
4575 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4576 }
4577 
4578 /**
4579  * intel_irq_uninstall - finilizes all irq handling
4580  * @dev_priv: i915 device instance
4581  *
4582  * This stops interrupt and hotplug handling and unregisters and frees all
4583  * resources acquired in the init functions.
4584  */
4585 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4586 {
4587 	drm_irq_uninstall(dev_priv->dev);
4588 	intel_hpd_cancel_work(dev_priv);
4589 	dev_priv->pm.irqs_enabled = false;
4590 }
4591 
4592 /**
4593  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4594  * @dev_priv: i915 device instance
4595  *
4596  * This function is used to disable interrupts at runtime, both in the runtime
4597  * pm and the system suspend/resume code.
4598  */
4599 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4600 {
4601 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4602 	dev_priv->pm.irqs_enabled = false;
4603 #if 0
4604 	synchronize_irq(dev_priv->dev->irq);
4605 #endif
4606 }
4607 
4608 /**
4609  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4610  * @dev_priv: i915 device instance
4611  *
4612  * This function is used to enable interrupts at runtime, both in the runtime
4613  * pm and the system suspend/resume code.
4614  */
4615 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4616 {
4617 	dev_priv->pm.irqs_enabled = true;
4618 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4619 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4620 }
4621