xref: /dragonfly/sys/dev/drm/i915/i915_irq.c (revision 277350a0)
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2  */
3 /*
4  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  */
28 
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 
35 /**
36  * DOC: interrupt handling
37  *
38  * These functions provide the basic support for enabling and disabling the
39  * interrupt handling support. There's a lot more functionality in i915_irq.c
40  * and related files, but that will be described in separate chapters.
41  */
42 
43 static const u32 hpd_ilk[HPD_NUM_PINS] = {
44 	[HPD_PORT_A] = DE_DP_A_HOTPLUG,
45 };
46 
47 static const u32 hpd_ivb[HPD_NUM_PINS] = {
48 	[HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
49 };
50 
51 static const u32 hpd_bdw[HPD_NUM_PINS] = {
52 	[HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
53 };
54 
55 static const u32 hpd_ibx[HPD_NUM_PINS] = {
56 	[HPD_CRT] = SDE_CRT_HOTPLUG,
57 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
58 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
59 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
60 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG
61 };
62 
63 static const u32 hpd_cpt[HPD_NUM_PINS] = {
64 	[HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
65 	[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
66 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
67 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
68 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
69 };
70 
71 static const u32 hpd_spt[HPD_NUM_PINS] = {
72 	[HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
73 	[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
74 	[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
75 	[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
76 	[HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
77 };
78 
79 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
80 	[HPD_CRT] = CRT_HOTPLUG_INT_EN,
81 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
82 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
83 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
84 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
85 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
86 };
87 
88 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
89 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
90 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
91 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
92 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
93 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
94 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
95 };
96 
97 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
98 	[HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
99 	[HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
100 	[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
101 	[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
102 	[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
103 	[HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
104 };
105 
106 /* BXT hpd list */
107 static const u32 hpd_bxt[HPD_NUM_PINS] = {
108 	[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
109 	[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
110 	[HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
111 };
112 
113 /* IIR can theoretically queue up two events. Be paranoid. */
114 #define GEN8_IRQ_RESET_NDX(type, which) do { \
115 	I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
116 	POSTING_READ(GEN8_##type##_IMR(which)); \
117 	I915_WRITE(GEN8_##type##_IER(which), 0); \
118 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
119 	POSTING_READ(GEN8_##type##_IIR(which)); \
120 	I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
121 	POSTING_READ(GEN8_##type##_IIR(which)); \
122 } while (0)
123 
124 #define GEN5_IRQ_RESET(type) do { \
125 	I915_WRITE(type##IMR, 0xffffffff); \
126 	POSTING_READ(type##IMR); \
127 	I915_WRITE(type##IER, 0); \
128 	I915_WRITE(type##IIR, 0xffffffff); \
129 	POSTING_READ(type##IIR); \
130 	I915_WRITE(type##IIR, 0xffffffff); \
131 	POSTING_READ(type##IIR); \
132 } while (0)
133 
134 /*
135  * We should clear IMR at preinstall/uninstall, and just check at postinstall.
136  */
137 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv, u32 reg)
138 {
139 	u32 val = I915_READ(reg);
140 
141 	if (val == 0)
142 		return;
143 
144 	WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
145 	     reg, val);
146 	I915_WRITE(reg, 0xffffffff);
147 	POSTING_READ(reg);
148 	I915_WRITE(reg, 0xffffffff);
149 	POSTING_READ(reg);
150 }
151 
152 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
153 	gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
154 	I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
155 	I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
156 	POSTING_READ(GEN8_##type##_IMR(which)); \
157 } while (0)
158 
159 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
160 	gen5_assert_iir_is_zero(dev_priv, type##IIR); \
161 	I915_WRITE(type##IER, (ier_val)); \
162 	I915_WRITE(type##IMR, (imr_val)); \
163 	POSTING_READ(type##IMR); \
164 } while (0)
165 
166 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
167 
168 /* For display hotplug interrupt */
169 static inline void
170 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
171 				     uint32_t mask,
172 				     uint32_t bits)
173 {
174 	uint32_t val;
175 
176 	assert_spin_locked(&dev_priv->irq_lock);
177 	WARN_ON(bits & ~mask);
178 
179 	val = I915_READ(PORT_HOTPLUG_EN);
180 	val &= ~mask;
181 	val |= bits;
182 	I915_WRITE(PORT_HOTPLUG_EN, val);
183 }
184 
185 /**
186  * i915_hotplug_interrupt_update - update hotplug interrupt enable
187  * @dev_priv: driver private
188  * @mask: bits to update
189  * @bits: bits to enable
190  * NOTE: the HPD enable bits are modified both inside and outside
191  * of an interrupt context. To avoid that read-modify-write cycles
192  * interfer, these bits are protected by a spinlock. Since this
193  * function is usually not called from a context where the lock is
194  * held already, this function acquires the lock itself. A non-locking
195  * version is also available.
196  */
197 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
198 				   uint32_t mask,
199 				   uint32_t bits)
200 {
201 	spin_lock_irq(&dev_priv->irq_lock);
202 	i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
203 	spin_unlock_irq(&dev_priv->irq_lock);
204 }
205 
206 /**
207  * ilk_update_display_irq - update DEIMR
208  * @dev_priv: driver private
209  * @interrupt_mask: mask of interrupt bits to update
210  * @enabled_irq_mask: mask of interrupt bits to enable
211  */
212 static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
213 				   uint32_t interrupt_mask,
214 				   uint32_t enabled_irq_mask)
215 {
216 	uint32_t new_val;
217 
218 	assert_spin_locked(&dev_priv->irq_lock);
219 
220 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
221 
222 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
223 		return;
224 
225 	new_val = dev_priv->irq_mask;
226 	new_val &= ~interrupt_mask;
227 	new_val |= (~enabled_irq_mask & interrupt_mask);
228 
229 	if (new_val != dev_priv->irq_mask) {
230 		dev_priv->irq_mask = new_val;
231 		I915_WRITE(DEIMR, dev_priv->irq_mask);
232 		POSTING_READ(DEIMR);
233 	}
234 }
235 
236 void
237 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
238 {
239 	ilk_update_display_irq(dev_priv, mask, mask);
240 }
241 
242 void
243 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
244 {
245 	ilk_update_display_irq(dev_priv, mask, 0);
246 }
247 
248 /**
249  * ilk_update_gt_irq - update GTIMR
250  * @dev_priv: driver private
251  * @interrupt_mask: mask of interrupt bits to update
252  * @enabled_irq_mask: mask of interrupt bits to enable
253  */
254 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
255 			      uint32_t interrupt_mask,
256 			      uint32_t enabled_irq_mask)
257 {
258 	assert_spin_locked(&dev_priv->irq_lock);
259 
260 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
261 
262 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
263 		return;
264 
265 	dev_priv->gt_irq_mask &= ~interrupt_mask;
266 	dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
267 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
268 	POSTING_READ(GTIMR);
269 }
270 
271 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 {
273 	ilk_update_gt_irq(dev_priv, mask, mask);
274 }
275 
276 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278 	ilk_update_gt_irq(dev_priv, mask, 0);
279 }
280 
281 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
282 {
283 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
284 }
285 
286 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
287 {
288 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
289 }
290 
291 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
292 {
293 	return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
294 }
295 
296 /**
297   * snb_update_pm_irq - update GEN6_PMIMR
298   * @dev_priv: driver private
299   * @interrupt_mask: mask of interrupt bits to update
300   * @enabled_irq_mask: mask of interrupt bits to enable
301   */
302 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
303 			      uint32_t interrupt_mask,
304 			      uint32_t enabled_irq_mask)
305 {
306 	uint32_t new_val;
307 
308 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
309 
310 	assert_spin_locked(&dev_priv->irq_lock);
311 
312 	new_val = dev_priv->pm_irq_mask;
313 	new_val &= ~interrupt_mask;
314 	new_val |= (~enabled_irq_mask & interrupt_mask);
315 
316 	if (new_val != dev_priv->pm_irq_mask) {
317 		dev_priv->pm_irq_mask = new_val;
318 		I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
319 		POSTING_READ(gen6_pm_imr(dev_priv));
320 	}
321 }
322 
323 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
324 {
325 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
326 		return;
327 
328 	snb_update_pm_irq(dev_priv, mask, mask);
329 }
330 
331 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
332 				  uint32_t mask)
333 {
334 	snb_update_pm_irq(dev_priv, mask, 0);
335 }
336 
337 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
338 {
339 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
340 		return;
341 
342 	__gen6_disable_pm_irq(dev_priv, mask);
343 }
344 
345 void gen6_reset_rps_interrupts(struct drm_device *dev)
346 {
347 	struct drm_i915_private *dev_priv = dev->dev_private;
348 	uint32_t reg = gen6_pm_iir(dev_priv);
349 
350 	spin_lock_irq(&dev_priv->irq_lock);
351 	I915_WRITE(reg, dev_priv->pm_rps_events);
352 	I915_WRITE(reg, dev_priv->pm_rps_events);
353 	POSTING_READ(reg);
354 	dev_priv->rps.pm_iir = 0;
355 	spin_unlock_irq(&dev_priv->irq_lock);
356 }
357 
358 void gen6_enable_rps_interrupts(struct drm_device *dev)
359 {
360 	struct drm_i915_private *dev_priv = dev->dev_private;
361 
362 	spin_lock_irq(&dev_priv->irq_lock);
363 
364 	WARN_ON(dev_priv->rps.pm_iir);
365 	WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
366 	dev_priv->rps.interrupts_enabled = true;
367 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
368 				dev_priv->pm_rps_events);
369 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
370 
371 	spin_unlock_irq(&dev_priv->irq_lock);
372 }
373 
374 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
375 {
376 	/*
377 	 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
378 	 * if GEN6_PM_UP_EI_EXPIRED is masked.
379 	 *
380 	 * TODO: verify if this can be reproduced on VLV,CHV.
381 	 */
382 	if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
383 		mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
384 
385 	if (INTEL_INFO(dev_priv)->gen >= 8)
386 		mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
387 
388 	return mask;
389 }
390 
391 void gen6_disable_rps_interrupts(struct drm_device *dev)
392 {
393 	struct drm_i915_private *dev_priv = dev->dev_private;
394 
395 	spin_lock_irq(&dev_priv->irq_lock);
396 	dev_priv->rps.interrupts_enabled = false;
397 	spin_unlock_irq(&dev_priv->irq_lock);
398 
399 	cancel_work_sync(&dev_priv->rps.work);
400 
401 	spin_lock_irq(&dev_priv->irq_lock);
402 
403 	I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
404 
405 	__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
406 	I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
407 				~dev_priv->pm_rps_events);
408 
409 	spin_unlock_irq(&dev_priv->irq_lock);
410 
411 #if 0
412 	synchronize_irq(dev->irq);
413 #endif
414 }
415 
416 /**
417   * bdw_update_port_irq - update DE port interrupt
418   * @dev_priv: driver private
419   * @interrupt_mask: mask of interrupt bits to update
420   * @enabled_irq_mask: mask of interrupt bits to enable
421   */
422 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
423 				uint32_t interrupt_mask,
424 				uint32_t enabled_irq_mask)
425 {
426 	uint32_t new_val;
427 	uint32_t old_val;
428 
429 	assert_spin_locked(&dev_priv->irq_lock);
430 
431 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
432 
433 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
434 		return;
435 
436 	old_val = I915_READ(GEN8_DE_PORT_IMR);
437 
438 	new_val = old_val;
439 	new_val &= ~interrupt_mask;
440 	new_val |= (~enabled_irq_mask & interrupt_mask);
441 
442 	if (new_val != old_val) {
443 		I915_WRITE(GEN8_DE_PORT_IMR, new_val);
444 		POSTING_READ(GEN8_DE_PORT_IMR);
445 	}
446 }
447 
448 /**
449  * ibx_display_interrupt_update - update SDEIMR
450  * @dev_priv: driver private
451  * @interrupt_mask: mask of interrupt bits to update
452  * @enabled_irq_mask: mask of interrupt bits to enable
453  */
454 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
455 				  uint32_t interrupt_mask,
456 				  uint32_t enabled_irq_mask)
457 {
458 	uint32_t sdeimr = I915_READ(SDEIMR);
459 	sdeimr &= ~interrupt_mask;
460 	sdeimr |= (~enabled_irq_mask & interrupt_mask);
461 
462 	WARN_ON(enabled_irq_mask & ~interrupt_mask);
463 
464 	assert_spin_locked(&dev_priv->irq_lock);
465 
466 	if (WARN_ON(!intel_irqs_enabled(dev_priv)))
467 		return;
468 
469 	I915_WRITE(SDEIMR, sdeimr);
470 	POSTING_READ(SDEIMR);
471 }
472 
473 static void
474 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
475 		       u32 enable_mask, u32 status_mask)
476 {
477 	u32 reg = PIPESTAT(pipe);
478 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
479 
480 	assert_spin_locked(&dev_priv->irq_lock);
481 	WARN_ON(!intel_irqs_enabled(dev_priv));
482 
483 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
484 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
485 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
486 		      pipe_name(pipe), enable_mask, status_mask))
487 		return;
488 
489 	if ((pipestat & enable_mask) == enable_mask)
490 		return;
491 
492 	dev_priv->pipestat_irq_mask[pipe] |= status_mask;
493 
494 	/* Enable the interrupt, clear any pending status */
495 	pipestat |= enable_mask | status_mask;
496 	I915_WRITE(reg, pipestat);
497 	POSTING_READ(reg);
498 }
499 
500 static void
501 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
502 		        u32 enable_mask, u32 status_mask)
503 {
504 	u32 reg = PIPESTAT(pipe);
505 	u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
506 
507 	assert_spin_locked(&dev_priv->irq_lock);
508 	WARN_ON(!intel_irqs_enabled(dev_priv));
509 
510 	if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
511 		      status_mask & ~PIPESTAT_INT_STATUS_MASK,
512 		      "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
513 		      pipe_name(pipe), enable_mask, status_mask))
514 		return;
515 
516 	if ((pipestat & enable_mask) == 0)
517 		return;
518 
519 	dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
520 
521 	pipestat &= ~enable_mask;
522 	I915_WRITE(reg, pipestat);
523 	POSTING_READ(reg);
524 }
525 
526 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
527 {
528 	u32 enable_mask = status_mask << 16;
529 
530 	/*
531 	 * On pipe A we don't support the PSR interrupt yet,
532 	 * on pipe B and C the same bit MBZ.
533 	 */
534 	if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
535 		return 0;
536 	/*
537 	 * On pipe B and C we don't support the PSR interrupt yet, on pipe
538 	 * A the same bit is for perf counters which we don't use either.
539 	 */
540 	if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
541 		return 0;
542 
543 	enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
544 			 SPRITE0_FLIP_DONE_INT_EN_VLV |
545 			 SPRITE1_FLIP_DONE_INT_EN_VLV);
546 	if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
547 		enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
548 	if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
549 		enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
550 
551 	return enable_mask;
552 }
553 
554 void
555 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
556 		     u32 status_mask)
557 {
558 	u32 enable_mask;
559 
560 	if (IS_VALLEYVIEW(dev_priv->dev))
561 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
562 							   status_mask);
563 	else
564 		enable_mask = status_mask << 16;
565 	__i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
566 }
567 
568 void
569 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum i915_pipe pipe,
570 		      u32 status_mask)
571 {
572 	u32 enable_mask;
573 
574 	if (IS_VALLEYVIEW(dev_priv->dev))
575 		enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
576 							   status_mask);
577 	else
578 		enable_mask = status_mask << 16;
579 	__i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
580 }
581 
582 /**
583  * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
584  * @dev: drm device
585  */
586 static void i915_enable_asle_pipestat(struct drm_device *dev)
587 {
588 	struct drm_i915_private *dev_priv = dev->dev_private;
589 
590 	if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
591 		return;
592 
593 	spin_lock_irq(&dev_priv->irq_lock);
594 
595 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
596 	if (INTEL_INFO(dev)->gen >= 4)
597 		i915_enable_pipestat(dev_priv, PIPE_A,
598 				     PIPE_LEGACY_BLC_EVENT_STATUS);
599 
600 	spin_unlock_irq(&dev_priv->irq_lock);
601 }
602 
603 /*
604  * This timing diagram depicts the video signal in and
605  * around the vertical blanking period.
606  *
607  * Assumptions about the fictitious mode used in this example:
608  *  vblank_start >= 3
609  *  vsync_start = vblank_start + 1
610  *  vsync_end = vblank_start + 2
611  *  vtotal = vblank_start + 3
612  *
613  *           start of vblank:
614  *           latch double buffered registers
615  *           increment frame counter (ctg+)
616  *           generate start of vblank interrupt (gen4+)
617  *           |
618  *           |          frame start:
619  *           |          generate frame start interrupt (aka. vblank interrupt) (gmch)
620  *           |          may be shifted forward 1-3 extra lines via PIPECONF
621  *           |          |
622  *           |          |  start of vsync:
623  *           |          |  generate vsync interrupt
624  *           |          |  |
625  * ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx___    ___xxxx
626  *       .   \hs/   .      \hs/          \hs/          \hs/   .      \hs/
627  * ----va---> <-----------------vb--------------------> <--------va-------------
628  *       |          |       <----vs----->                     |
629  * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
630  * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
631  * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
632  *       |          |                                         |
633  *       last visible pixel                                   first visible pixel
634  *                  |                                         increment frame counter (gen3/4)
635  *                  pixel counter = vblank_start * htotal     pixel counter = 0 (gen3/4)
636  *
637  * x  = horizontal active
638  * _  = horizontal blanking
639  * hs = horizontal sync
640  * va = vertical active
641  * vb = vertical blanking
642  * vs = vertical sync
643  * vbs = vblank_start (number)
644  *
645  * Summary:
646  * - most events happen at the start of horizontal sync
647  * - frame start happens at the start of horizontal blank, 1-4 lines
648  *   (depending on PIPECONF settings) after the start of vblank
649  * - gen3/4 pixel and frame counter are synchronized with the start
650  *   of horizontal active on the first line of vertical active
651  */
652 
653 static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
654 {
655 	/* Gen2 doesn't have a hardware frame counter */
656 	return 0;
657 }
658 
659 /* Called from drm generic code, passed a 'crtc', which
660  * we use as a pipe index
661  */
662 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
663 {
664 	struct drm_i915_private *dev_priv = dev->dev_private;
665 	unsigned long high_frame;
666 	unsigned long low_frame;
667 	u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
668 	struct intel_crtc *intel_crtc =
669 		to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
670 	const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
671 
672 	htotal = mode->crtc_htotal;
673 	hsync_start = mode->crtc_hsync_start;
674 	vbl_start = mode->crtc_vblank_start;
675 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
676 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
677 
678 	/* Convert to pixel count */
679 	vbl_start *= htotal;
680 
681 	/* Start of vblank event occurs at start of hsync */
682 	vbl_start -= htotal - hsync_start;
683 
684 	high_frame = PIPEFRAME(pipe);
685 	low_frame = PIPEFRAMEPIXEL(pipe);
686 
687 	/*
688 	 * High & low register fields aren't synchronized, so make sure
689 	 * we get a low value that's stable across two reads of the high
690 	 * register.
691 	 */
692 	do {
693 		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
694 		low   = I915_READ(low_frame);
695 		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
696 	} while (high1 != high2);
697 
698 	high1 >>= PIPE_FRAME_HIGH_SHIFT;
699 	pixel = low & PIPE_PIXEL_MASK;
700 	low >>= PIPE_FRAME_LOW_SHIFT;
701 
702 	/*
703 	 * The frame counter increments at beginning of active.
704 	 * Cook up a vblank counter by also checking the pixel
705 	 * counter against vblank start.
706 	 */
707 	return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
708 }
709 
710 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
711 {
712 	struct drm_i915_private *dev_priv = dev->dev_private;
713 
714 	return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
715 }
716 
717 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
718 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
719 
720 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
721 {
722 	struct drm_device *dev = crtc->base.dev;
723 	struct drm_i915_private *dev_priv = dev->dev_private;
724 	const struct drm_display_mode *mode = &crtc->base.hwmode;
725 	enum i915_pipe pipe = crtc->pipe;
726 	int position, vtotal;
727 
728 	vtotal = mode->crtc_vtotal;
729 	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
730 		vtotal /= 2;
731 
732 	if (IS_GEN2(dev))
733 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
734 	else
735 		position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
736 
737 	/*
738 	 * On HSW, the DSL reg (0x70000) appears to return 0 if we
739 	 * read it just before the start of vblank.  So try it again
740 	 * so we don't accidentally end up spanning a vblank frame
741 	 * increment, causing the pipe_update_end() code to squak at us.
742 	 *
743 	 * The nature of this problem means we can't simply check the ISR
744 	 * bit and return the vblank start value; nor can we use the scanline
745 	 * debug register in the transcoder as it appears to have the same
746 	 * problem.  We may need to extend this to include other platforms,
747 	 * but so far testing only shows the problem on HSW.
748 	 */
749 	if (HAS_DDI(dev) && !position) {
750 		int i, temp;
751 
752 		for (i = 0; i < 100; i++) {
753 			udelay(1);
754 			temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
755 				DSL_LINEMASK_GEN3;
756 			if (temp != position) {
757 				position = temp;
758 				break;
759 			}
760 		}
761 	}
762 
763 	/*
764 	 * See update_scanline_offset() for the details on the
765 	 * scanline_offset adjustment.
766 	 */
767 	return (position + crtc->scanline_offset) % vtotal;
768 }
769 
770 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
771 				    unsigned int flags, int *vpos, int *hpos,
772 				    ktime_t *stime, ktime_t *etime,
773 				    const struct drm_display_mode *mode)
774 {
775 	struct drm_i915_private *dev_priv = dev->dev_private;
776 	struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
777 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
778 	int position;
779 	int vbl_start, vbl_end, hsync_start, htotal, vtotal;
780 	bool in_vbl = true;
781 	int ret = 0;
782 	unsigned long irqflags;
783 
784 	if (WARN_ON(!mode->crtc_clock)) {
785 		DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
786 				 "pipe %c\n", pipe_name(pipe));
787 		return 0;
788 	}
789 
790 	htotal = mode->crtc_htotal;
791 	hsync_start = mode->crtc_hsync_start;
792 	vtotal = mode->crtc_vtotal;
793 	vbl_start = mode->crtc_vblank_start;
794 	vbl_end = mode->crtc_vblank_end;
795 
796 	if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
797 		vbl_start = DIV_ROUND_UP(vbl_start, 2);
798 		vbl_end /= 2;
799 		vtotal /= 2;
800 	}
801 
802 	ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
803 
804 	/*
805 	 * Lock uncore.lock, as we will do multiple timing critical raw
806 	 * register reads, potentially with preemption disabled, so the
807 	 * following code must not block on uncore.lock.
808 	 */
809 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
810 
811 	/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
812 
813 	/* Get optional system timestamp before query. */
814 	if (stime)
815 		*stime = ktime_get();
816 
817 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
818 		/* No obvious pixelcount register. Only query vertical
819 		 * scanout position from Display scan line register.
820 		 */
821 		position = __intel_get_crtc_scanline(intel_crtc);
822 	} else {
823 		/* Have access to pixelcount since start of frame.
824 		 * We can split this into vertical and horizontal
825 		 * scanout position.
826 		 */
827 		position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
828 
829 		/* convert to pixel counts */
830 		vbl_start *= htotal;
831 		vbl_end *= htotal;
832 		vtotal *= htotal;
833 
834 		/*
835 		 * In interlaced modes, the pixel counter counts all pixels,
836 		 * so one field will have htotal more pixels. In order to avoid
837 		 * the reported position from jumping backwards when the pixel
838 		 * counter is beyond the length of the shorter field, just
839 		 * clamp the position the length of the shorter field. This
840 		 * matches how the scanline counter based position works since
841 		 * the scanline counter doesn't count the two half lines.
842 		 */
843 		if (position >= vtotal)
844 			position = vtotal - 1;
845 
846 		/*
847 		 * Start of vblank interrupt is triggered at start of hsync,
848 		 * just prior to the first active line of vblank. However we
849 		 * consider lines to start at the leading edge of horizontal
850 		 * active. So, should we get here before we've crossed into
851 		 * the horizontal active of the first line in vblank, we would
852 		 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
853 		 * always add htotal-hsync_start to the current pixel position.
854 		 */
855 		position = (position + htotal - hsync_start) % vtotal;
856 	}
857 
858 	/* Get optional system timestamp after query. */
859 	if (etime)
860 		*etime = ktime_get();
861 
862 	/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
863 
864 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
865 
866 	in_vbl = position >= vbl_start && position < vbl_end;
867 
868 	/*
869 	 * While in vblank, position will be negative
870 	 * counting up towards 0 at vbl_end. And outside
871 	 * vblank, position will be positive counting
872 	 * up since vbl_end.
873 	 */
874 	if (position >= vbl_start)
875 		position -= vbl_end;
876 	else
877 		position += vtotal - vbl_end;
878 
879 	if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
880 		*vpos = position;
881 		*hpos = 0;
882 	} else {
883 		*vpos = position / htotal;
884 		*hpos = position - (*vpos * htotal);
885 	}
886 
887 	/* In vblank? */
888 	if (in_vbl)
889 		ret |= DRM_SCANOUTPOS_IN_VBLANK;
890 
891 	return ret;
892 }
893 
894 int intel_get_crtc_scanline(struct intel_crtc *crtc)
895 {
896 	struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
897 	unsigned long irqflags;
898 	int position;
899 
900 	spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
901 	position = __intel_get_crtc_scanline(crtc);
902 	spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
903 
904 	return position;
905 }
906 
907 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
908 			      int *max_error,
909 			      struct timeval *vblank_time,
910 			      unsigned flags)
911 {
912 	struct drm_crtc *crtc;
913 
914 	if (pipe >= INTEL_INFO(dev)->num_pipes) {
915 		DRM_ERROR("Invalid crtc %u\n", pipe);
916 		return -EINVAL;
917 	}
918 
919 	/* Get drm_crtc to timestamp: */
920 	crtc = intel_get_crtc_for_pipe(dev, pipe);
921 	if (crtc == NULL) {
922 		DRM_ERROR("Invalid crtc %u\n", pipe);
923 		return -EINVAL;
924 	}
925 
926 	if (!crtc->hwmode.crtc_clock) {
927 		DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
928 		return -EBUSY;
929 	}
930 
931 	/* Helper routine in DRM core does all the work: */
932 	return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
933 						     vblank_time, flags,
934 						     &crtc->hwmode);
935 }
936 
937 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
938 {
939 	struct drm_i915_private *dev_priv = dev->dev_private;
940 	u32 busy_up, busy_down, max_avg, min_avg;
941 	u8 new_delay;
942 
943 	lockmgr(&mchdev_lock, LK_EXCLUSIVE);
944 
945 	I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
946 
947 	new_delay = dev_priv->ips.cur_delay;
948 
949 	I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
950 	busy_up = I915_READ(RCPREVBSYTUPAVG);
951 	busy_down = I915_READ(RCPREVBSYTDNAVG);
952 	max_avg = I915_READ(RCBMAXAVG);
953 	min_avg = I915_READ(RCBMINAVG);
954 
955 	/* Handle RCS change request from hw */
956 	if (busy_up > max_avg) {
957 		if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
958 			new_delay = dev_priv->ips.cur_delay - 1;
959 		if (new_delay < dev_priv->ips.max_delay)
960 			new_delay = dev_priv->ips.max_delay;
961 	} else if (busy_down < min_avg) {
962 		if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
963 			new_delay = dev_priv->ips.cur_delay + 1;
964 		if (new_delay > dev_priv->ips.min_delay)
965 			new_delay = dev_priv->ips.min_delay;
966 	}
967 
968 	if (ironlake_set_drps(dev, new_delay))
969 		dev_priv->ips.cur_delay = new_delay;
970 
971 	lockmgr(&mchdev_lock, LK_RELEASE);
972 
973 	return;
974 }
975 
976 static void notify_ring(struct intel_engine_cs *ring)
977 {
978 	if (!intel_ring_initialized(ring))
979 		return;
980 
981 	trace_i915_gem_request_notify(ring);
982 
983 	wake_up_all(&ring->irq_queue);
984 }
985 
986 static void vlv_c0_read(struct drm_i915_private *dev_priv,
987 			struct intel_rps_ei *ei)
988 {
989 	ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
990 	ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
991 	ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
992 }
993 
994 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
995 			 const struct intel_rps_ei *old,
996 			 const struct intel_rps_ei *now,
997 			 int threshold)
998 {
999 	u64 time, c0;
1000 	unsigned int mul = 100;
1001 
1002 	if (old->cz_clock == 0)
1003 		return false;
1004 
1005 	if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
1006 		mul <<= 8;
1007 
1008 	time = now->cz_clock - old->cz_clock;
1009 	time *= threshold * dev_priv->czclk_freq;
1010 
1011 	/* Workload can be split between render + media, e.g. SwapBuffers
1012 	 * being blitted in X after being rendered in mesa. To account for
1013 	 * this we need to combine both engines into our activity counter.
1014 	 */
1015 	c0 = now->render_c0 - old->render_c0;
1016 	c0 += now->media_c0 - old->media_c0;
1017 	c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1018 
1019 	return c0 >= time;
1020 }
1021 
1022 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1023 {
1024 	vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1025 	dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1026 }
1027 
1028 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1029 {
1030 	struct intel_rps_ei now;
1031 	u32 events = 0;
1032 
1033 	if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1034 		return 0;
1035 
1036 	vlv_c0_read(dev_priv, &now);
1037 	if (now.cz_clock == 0)
1038 		return 0;
1039 
1040 	if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1041 		if (!vlv_c0_above(dev_priv,
1042 				  &dev_priv->rps.down_ei, &now,
1043 				  dev_priv->rps.down_threshold))
1044 			events |= GEN6_PM_RP_DOWN_THRESHOLD;
1045 		dev_priv->rps.down_ei = now;
1046 	}
1047 
1048 	if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1049 		if (vlv_c0_above(dev_priv,
1050 				 &dev_priv->rps.up_ei, &now,
1051 				 dev_priv->rps.up_threshold))
1052 			events |= GEN6_PM_RP_UP_THRESHOLD;
1053 		dev_priv->rps.up_ei = now;
1054 	}
1055 
1056 	return events;
1057 }
1058 
1059 static bool any_waiters(struct drm_i915_private *dev_priv)
1060 {
1061 	struct intel_engine_cs *ring;
1062 	int i;
1063 
1064 	for_each_ring(ring, dev_priv, i)
1065 		if (ring->irq_refcount)
1066 			return true;
1067 
1068 	return false;
1069 }
1070 
1071 static void gen6_pm_rps_work(struct work_struct *work)
1072 {
1073 	struct drm_i915_private *dev_priv =
1074 		container_of(work, struct drm_i915_private, rps.work);
1075 	bool client_boost;
1076 	int new_delay, adj, min, max;
1077 	u32 pm_iir;
1078 
1079 	spin_lock_irq(&dev_priv->irq_lock);
1080 	/* Speed up work cancelation during disabling rps interrupts. */
1081 	if (!dev_priv->rps.interrupts_enabled) {
1082 		spin_unlock_irq(&dev_priv->irq_lock);
1083 		return;
1084 	}
1085 	pm_iir = dev_priv->rps.pm_iir;
1086 	dev_priv->rps.pm_iir = 0;
1087 	/* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1088 	gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1089 	client_boost = dev_priv->rps.client_boost;
1090 	dev_priv->rps.client_boost = false;
1091 	spin_unlock_irq(&dev_priv->irq_lock);
1092 
1093 	/* Make sure we didn't queue anything we're not going to process. */
1094 	WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1095 
1096 	if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1097 		return;
1098 
1099 	mutex_lock(&dev_priv->rps.hw_lock);
1100 
1101 	pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1102 
1103 	adj = dev_priv->rps.last_adj;
1104 	new_delay = dev_priv->rps.cur_freq;
1105 	min = dev_priv->rps.min_freq_softlimit;
1106 	max = dev_priv->rps.max_freq_softlimit;
1107 
1108 	if (client_boost) {
1109 		new_delay = dev_priv->rps.max_freq_softlimit;
1110 		adj = 0;
1111 	} else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1112 		if (adj > 0)
1113 			adj *= 2;
1114 		else /* CHV needs even encode values */
1115 			adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1116 		/*
1117 		 * For better performance, jump directly
1118 		 * to RPe if we're below it.
1119 		 */
1120 		if (new_delay < dev_priv->rps.efficient_freq - adj) {
1121 			new_delay = dev_priv->rps.efficient_freq;
1122 			adj = 0;
1123 		}
1124 	} else if (any_waiters(dev_priv)) {
1125 		adj = 0;
1126 	} else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1127 		if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1128 			new_delay = dev_priv->rps.efficient_freq;
1129 		else
1130 			new_delay = dev_priv->rps.min_freq_softlimit;
1131 		adj = 0;
1132 	} else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1133 		if (adj < 0)
1134 			adj *= 2;
1135 		else /* CHV needs even encode values */
1136 			adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1137 	} else { /* unknown event */
1138 		adj = 0;
1139 	}
1140 
1141 	dev_priv->rps.last_adj = adj;
1142 
1143 	/* sysfs frequency interfaces may have snuck in while servicing the
1144 	 * interrupt
1145 	 */
1146 	new_delay += adj;
1147 	new_delay = clamp_t(int, new_delay, min, max);
1148 
1149 	intel_set_rps(dev_priv->dev, new_delay);
1150 
1151 	mutex_unlock(&dev_priv->rps.hw_lock);
1152 }
1153 
1154 
1155 /**
1156  * ivybridge_parity_work - Workqueue called when a parity error interrupt
1157  * occurred.
1158  * @work: workqueue struct
1159  *
1160  * Doesn't actually do anything except notify userspace. As a consequence of
1161  * this event, userspace should try to remap the bad rows since statistically
1162  * it is likely the same row is more likely to go bad again.
1163  */
1164 static void ivybridge_parity_work(struct work_struct *work)
1165 {
1166 	struct drm_i915_private *dev_priv =
1167 		container_of(work, struct drm_i915_private, l3_parity.error_work);
1168 	u32 error_status, row, bank, subbank;
1169 	char *parity_event[6];
1170 	uint32_t misccpctl;
1171 	uint8_t slice = 0;
1172 
1173 	/* We must turn off DOP level clock gating to access the L3 registers.
1174 	 * In order to prevent a get/put style interface, acquire struct mutex
1175 	 * any time we access those registers.
1176 	 */
1177 	mutex_lock(&dev_priv->dev->struct_mutex);
1178 
1179 	/* If we've screwed up tracking, just let the interrupt fire again */
1180 	if (WARN_ON(!dev_priv->l3_parity.which_slice))
1181 		goto out;
1182 
1183 	misccpctl = I915_READ(GEN7_MISCCPCTL);
1184 	I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1185 	POSTING_READ(GEN7_MISCCPCTL);
1186 
1187 	while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1188 		u32 reg;
1189 
1190 		slice--;
1191 		if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1192 			break;
1193 
1194 		dev_priv->l3_parity.which_slice &= ~(1<<slice);
1195 
1196 		reg = GEN7_L3CDERRST1 + (slice * 0x200);
1197 
1198 		error_status = I915_READ(reg);
1199 		row = GEN7_PARITY_ERROR_ROW(error_status);
1200 		bank = GEN7_PARITY_ERROR_BANK(error_status);
1201 		subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1202 
1203 		I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1204 		POSTING_READ(reg);
1205 
1206 		parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1207 		parity_event[1] = drm_asprintf(GFP_KERNEL, "ROW=%d", row);
1208 		parity_event[2] = drm_asprintf(GFP_KERNEL, "BANK=%d", bank);
1209 		parity_event[3] = drm_asprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1210 		parity_event[4] = drm_asprintf(GFP_KERNEL, "SLICE=%d", slice);
1211 		parity_event[5] = NULL;
1212 
1213 #if 0
1214 		kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1215 				   KOBJ_CHANGE, parity_event);
1216 #endif
1217 
1218 		DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1219 			  slice, row, bank, subbank);
1220 
1221 		kfree(parity_event[4]);
1222 		kfree(parity_event[3]);
1223 		kfree(parity_event[2]);
1224 		kfree(parity_event[1]);
1225 	}
1226 
1227 	I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1228 
1229 out:
1230 	WARN_ON(dev_priv->l3_parity.which_slice);
1231 	spin_lock_irq(&dev_priv->irq_lock);
1232 	gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1233 	spin_unlock_irq(&dev_priv->irq_lock);
1234 
1235 	mutex_unlock(&dev_priv->dev->struct_mutex);
1236 }
1237 
1238 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1239 {
1240 	struct drm_i915_private *dev_priv = dev->dev_private;
1241 
1242 	if (!HAS_L3_DPF(dev))
1243 		return;
1244 
1245 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1246 	gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1247 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1248 
1249 	iir &= GT_PARITY_ERROR(dev);
1250 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1251 		dev_priv->l3_parity.which_slice |= 1 << 1;
1252 
1253 	if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1254 		dev_priv->l3_parity.which_slice |= 1 << 0;
1255 
1256 	queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1257 }
1258 
1259 static void ilk_gt_irq_handler(struct drm_device *dev,
1260 			       struct drm_i915_private *dev_priv,
1261 			       u32 gt_iir)
1262 {
1263 	if (gt_iir &
1264 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1265 		notify_ring(&dev_priv->ring[RCS]);
1266 	if (gt_iir & ILK_BSD_USER_INTERRUPT)
1267 		notify_ring(&dev_priv->ring[VCS]);
1268 }
1269 
1270 static void snb_gt_irq_handler(struct drm_device *dev,
1271 			       struct drm_i915_private *dev_priv,
1272 			       u32 gt_iir)
1273 {
1274 
1275 	if (gt_iir &
1276 	    (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1277 		notify_ring(&dev_priv->ring[RCS]);
1278 	if (gt_iir & GT_BSD_USER_INTERRUPT)
1279 		notify_ring(&dev_priv->ring[VCS]);
1280 	if (gt_iir & GT_BLT_USER_INTERRUPT)
1281 		notify_ring(&dev_priv->ring[BCS]);
1282 
1283 	if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1284 		      GT_BSD_CS_ERROR_INTERRUPT |
1285 		      GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1286 		DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1287 
1288 	if (gt_iir & GT_PARITY_ERROR(dev))
1289 		ivybridge_parity_error_irq_handler(dev, gt_iir);
1290 }
1291 
1292 static irqreturn_t gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1293 				       u32 master_ctl)
1294 {
1295 
1296 	if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1297 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(0));
1298 		if (tmp) {
1299 			I915_WRITE_FW(GEN8_GT_IIR(0), tmp);
1300 
1301 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1302 				intel_lrc_irq_handler(&dev_priv->ring[RCS]);
1303 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT))
1304 				notify_ring(&dev_priv->ring[RCS]);
1305 
1306 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1307 				intel_lrc_irq_handler(&dev_priv->ring[BCS]);
1308 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT))
1309 				notify_ring(&dev_priv->ring[BCS]);
1310 		} else
1311 			DRM_ERROR("The master control interrupt lied (GT0)!\n");
1312 	}
1313 
1314 	if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1315 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(1));
1316 		if (tmp) {
1317 			I915_WRITE_FW(GEN8_GT_IIR(1), tmp);
1318 
1319 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1320 				intel_lrc_irq_handler(&dev_priv->ring[VCS]);
1321 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT))
1322 				notify_ring(&dev_priv->ring[VCS]);
1323 
1324 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1325 				intel_lrc_irq_handler(&dev_priv->ring[VCS2]);
1326 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT))
1327 				notify_ring(&dev_priv->ring[VCS2]);
1328 		} else
1329 			DRM_ERROR("The master control interrupt lied (GT1)!\n");
1330 	}
1331 
1332 	if (master_ctl & GEN8_GT_VECS_IRQ) {
1333 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(3));
1334 		if (tmp) {
1335 			I915_WRITE_FW(GEN8_GT_IIR(3), tmp);
1336 
1337 			if (tmp & (GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1338 				intel_lrc_irq_handler(&dev_priv->ring[VECS]);
1339 			if (tmp & (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT))
1340 				notify_ring(&dev_priv->ring[VECS]);
1341 		} else
1342 			DRM_ERROR("The master control interrupt lied (GT3)!\n");
1343 	}
1344 
1345 	if (master_ctl & GEN8_GT_PM_IRQ) {
1346 		u32 tmp = I915_READ_FW(GEN8_GT_IIR(2));
1347 		if (tmp & dev_priv->pm_rps_events) {
1348 			I915_WRITE_FW(GEN8_GT_IIR(2),
1349 				      tmp & dev_priv->pm_rps_events);
1350 			gen6_rps_irq_handler(dev_priv, tmp);
1351 		} else
1352 			DRM_ERROR("The master control interrupt lied (PM)!\n");
1353 	}
1354 
1355 }
1356 
1357 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1358 {
1359 	switch (port) {
1360 	case PORT_A:
1361 		return val & PORTA_HOTPLUG_LONG_DETECT;
1362 	case PORT_B:
1363 		return val & PORTB_HOTPLUG_LONG_DETECT;
1364 	case PORT_C:
1365 		return val & PORTC_HOTPLUG_LONG_DETECT;
1366 	default:
1367 		return false;
1368 	}
1369 }
1370 
1371 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1372 {
1373 	switch (port) {
1374 	case PORT_E:
1375 		return val & PORTE_HOTPLUG_LONG_DETECT;
1376 	default:
1377 		return false;
1378 	}
1379 }
1380 
1381 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1382 {
1383 	switch (port) {
1384 	case PORT_A:
1385 		return val & PORTA_HOTPLUG_LONG_DETECT;
1386 	case PORT_B:
1387 		return val & PORTB_HOTPLUG_LONG_DETECT;
1388 	case PORT_C:
1389 		return val & PORTC_HOTPLUG_LONG_DETECT;
1390 	case PORT_D:
1391 		return val & PORTD_HOTPLUG_LONG_DETECT;
1392 	default:
1393 		return false;
1394 	}
1395 }
1396 
1397 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1398 {
1399 	switch (port) {
1400 	case PORT_A:
1401 		return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1402 	default:
1403 		return false;
1404 	}
1405 }
1406 
1407 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1408 {
1409 	switch (port) {
1410 	case PORT_B:
1411 		return val & PORTB_HOTPLUG_LONG_DETECT;
1412 	case PORT_C:
1413 		return val & PORTC_HOTPLUG_LONG_DETECT;
1414 	case PORT_D:
1415 		return val & PORTD_HOTPLUG_LONG_DETECT;
1416 	default:
1417 		return false;
1418 	}
1419 }
1420 
1421 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1422 {
1423 	switch (port) {
1424 	case PORT_B:
1425 		return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1426 	case PORT_C:
1427 		return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1428 	case PORT_D:
1429 		return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1430 	default:
1431 		return false;
1432 	}
1433 }
1434 
1435 /*
1436  * Get a bit mask of pins that have triggered, and which ones may be long.
1437  * This can be called multiple times with the same masks to accumulate
1438  * hotplug detection results from several registers.
1439  *
1440  * Note that the caller is expected to zero out the masks initially.
1441  */
1442 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1443 			     u32 hotplug_trigger, u32 dig_hotplug_reg,
1444 			     const u32 hpd[HPD_NUM_PINS],
1445 			     bool long_pulse_detect(enum port port, u32 val))
1446 {
1447 	enum port port;
1448 	int i;
1449 
1450 	for_each_hpd_pin(i) {
1451 		if ((hpd[i] & hotplug_trigger) == 0)
1452 			continue;
1453 
1454 		*pin_mask |= BIT(i);
1455 
1456 		if (!intel_hpd_pin_to_port(i, &port))
1457 			continue;
1458 
1459 		if (long_pulse_detect(port, dig_hotplug_reg))
1460 			*long_mask |= BIT(i);
1461 	}
1462 
1463 	DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1464 			 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1465 
1466 }
1467 
1468 static void gmbus_irq_handler(struct drm_device *dev)
1469 {
1470 	struct drm_i915_private *dev_priv = dev->dev_private;
1471 
1472 	wake_up_all(&dev_priv->gmbus_wait_queue);
1473 }
1474 
1475 static void dp_aux_irq_handler(struct drm_device *dev)
1476 {
1477 	struct drm_i915_private *dev_priv = dev->dev_private;
1478 
1479 	wake_up_all(&dev_priv->gmbus_wait_queue);
1480 }
1481 
1482 #if defined(CONFIG_DEBUG_FS)
1483 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1484 					 uint32_t crc0, uint32_t crc1,
1485 					 uint32_t crc2, uint32_t crc3,
1486 					 uint32_t crc4)
1487 {
1488 	struct drm_i915_private *dev_priv = dev->dev_private;
1489 	struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1490 	struct intel_pipe_crc_entry *entry;
1491 	int head, tail;
1492 
1493 	spin_lock(&pipe_crc->lock);
1494 
1495 	if (!pipe_crc->entries) {
1496 		spin_unlock(&pipe_crc->lock);
1497 		DRM_DEBUG_KMS("spurious interrupt\n");
1498 		return;
1499 	}
1500 
1501 	head = pipe_crc->head;
1502 	tail = pipe_crc->tail;
1503 
1504 	if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1505 		spin_unlock(&pipe_crc->lock);
1506 		DRM_ERROR("CRC buffer overflowing\n");
1507 		return;
1508 	}
1509 
1510 	entry = &pipe_crc->entries[head];
1511 
1512 	entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1513 	entry->crc[0] = crc0;
1514 	entry->crc[1] = crc1;
1515 	entry->crc[2] = crc2;
1516 	entry->crc[3] = crc3;
1517 	entry->crc[4] = crc4;
1518 
1519 	head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1520 	pipe_crc->head = head;
1521 
1522 	spin_unlock(&pipe_crc->lock);
1523 
1524 	wake_up_interruptible(&pipe_crc->wq);
1525 }
1526 #else
1527 static inline void
1528 display_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe,
1529 			     uint32_t crc0, uint32_t crc1,
1530 			     uint32_t crc2, uint32_t crc3,
1531 			     uint32_t crc4) {}
1532 #endif
1533 
1534 
1535 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1536 {
1537 	struct drm_i915_private *dev_priv = dev->dev_private;
1538 
1539 	display_pipe_crc_irq_handler(dev, pipe,
1540 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1541 				     0, 0, 0, 0);
1542 }
1543 
1544 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1545 {
1546 	struct drm_i915_private *dev_priv = dev->dev_private;
1547 
1548 	display_pipe_crc_irq_handler(dev, pipe,
1549 				     I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1550 				     I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1551 				     I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1552 				     I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1553 				     I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1554 }
1555 
1556 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum i915_pipe pipe)
1557 {
1558 	struct drm_i915_private *dev_priv = dev->dev_private;
1559 	uint32_t res1, res2;
1560 
1561 	if (INTEL_INFO(dev)->gen >= 3)
1562 		res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1563 	else
1564 		res1 = 0;
1565 
1566 	if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1567 		res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1568 	else
1569 		res2 = 0;
1570 
1571 	display_pipe_crc_irq_handler(dev, pipe,
1572 				     I915_READ(PIPE_CRC_RES_RED(pipe)),
1573 				     I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1574 				     I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1575 				     res1, res2);
1576 }
1577 
1578 /* The RPS events need forcewake, so we add them to a work queue and mask their
1579  * IMR bits until the work is done. Other interrupts can be processed without
1580  * the work queue. */
1581 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1582 {
1583 	if (pm_iir & dev_priv->pm_rps_events) {
1584 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1585 		gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1586 		if (dev_priv->rps.interrupts_enabled) {
1587 			dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1588 			queue_work(dev_priv->wq, &dev_priv->rps.work);
1589 		}
1590 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1591 	}
1592 
1593 	if (INTEL_INFO(dev_priv)->gen >= 8)
1594 		return;
1595 
1596 	if (HAS_VEBOX(dev_priv->dev)) {
1597 		if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1598 			notify_ring(&dev_priv->ring[VECS]);
1599 
1600 		if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1601 			DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1602 	}
1603 }
1604 
1605 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum i915_pipe pipe)
1606 {
1607 	if (!drm_handle_vblank(dev, pipe))
1608 		return false;
1609 
1610 	return true;
1611 }
1612 
1613 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1614 {
1615 	struct drm_i915_private *dev_priv = dev->dev_private;
1616 	u32 pipe_stats[I915_MAX_PIPES] = { };
1617 	int pipe;
1618 
1619 	lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
1620 	for_each_pipe(dev_priv, pipe) {
1621 		int reg;
1622 		u32 mask, iir_bit = 0;
1623 
1624 		/*
1625 		 * PIPESTAT bits get signalled even when the interrupt is
1626 		 * disabled with the mask bits, and some of the status bits do
1627 		 * not generate interrupts at all (like the underrun bit). Hence
1628 		 * we need to be careful that we only handle what we want to
1629 		 * handle.
1630 		 */
1631 
1632 		/* fifo underruns are filterered in the underrun handler. */
1633 		mask = PIPE_FIFO_UNDERRUN_STATUS;
1634 
1635 		switch (pipe) {
1636 		case PIPE_A:
1637 			iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1638 			break;
1639 		case PIPE_B:
1640 			iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1641 			break;
1642 		case PIPE_C:
1643 			iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1644 			break;
1645 		}
1646 		if (iir & iir_bit)
1647 			mask |= dev_priv->pipestat_irq_mask[pipe];
1648 
1649 		if (!mask)
1650 			continue;
1651 
1652 		reg = PIPESTAT(pipe);
1653 		mask |= PIPESTAT_INT_ENABLE_MASK;
1654 		pipe_stats[pipe] = I915_READ(reg) & mask;
1655 
1656 		/*
1657 		 * Clear the PIPE*STAT regs before the IIR
1658 		 */
1659 		if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1660 					PIPESTAT_INT_STATUS_MASK))
1661 			I915_WRITE(reg, pipe_stats[pipe]);
1662 	}
1663 	lockmgr(&dev_priv->irq_lock, LK_RELEASE);
1664 
1665 	for_each_pipe(dev_priv, pipe) {
1666 		if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1667 		    intel_pipe_handle_vblank(dev, pipe))
1668 			intel_check_page_flip(dev, pipe);
1669 
1670 		if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1671 			intel_prepare_page_flip(dev, pipe);
1672 			intel_finish_page_flip(dev, pipe);
1673 		}
1674 
1675 		if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1676 			i9xx_pipe_crc_irq_handler(dev, pipe);
1677 
1678 		if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1679 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1680 	}
1681 
1682 	if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1683 		gmbus_irq_handler(dev);
1684 }
1685 
1686 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1687 {
1688 	struct drm_i915_private *dev_priv = dev->dev_private;
1689 	u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1690 	u32 pin_mask = 0, long_mask = 0;
1691 
1692 	if (!hotplug_status)
1693 		return;
1694 
1695 	I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1696 	/*
1697 	 * Make sure hotplug status is cleared before we clear IIR, or else we
1698 	 * may miss hotplug events.
1699 	 */
1700 	POSTING_READ(PORT_HOTPLUG_STAT);
1701 
1702 	if (IS_G4X(dev) || IS_VALLEYVIEW(dev)) {
1703 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1704 
1705 		if (hotplug_trigger) {
1706 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1707 					   hotplug_trigger, hpd_status_g4x,
1708 					   i9xx_port_hotplug_long_detect);
1709 
1710 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1711 		}
1712 
1713 		if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1714 			dp_aux_irq_handler(dev);
1715 	} else {
1716 		u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1717 
1718 		if (hotplug_trigger) {
1719 			intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1720 					   hotplug_trigger, hpd_status_i915,
1721 					   i9xx_port_hotplug_long_detect);
1722 			intel_hpd_irq_handler(dev, pin_mask, long_mask);
1723 		}
1724 	}
1725 }
1726 
1727 static irqreturn_t valleyview_irq_handler(void *arg)
1728 {
1729 	struct drm_device *dev = arg;
1730 	struct drm_i915_private *dev_priv = dev->dev_private;
1731 	u32 iir, gt_iir, pm_iir;
1732 
1733 	if (!intel_irqs_enabled(dev_priv))
1734 		return IRQ_NONE;
1735 
1736 	while (true) {
1737 		/* Find, clear, then process each source of interrupt */
1738 
1739 		gt_iir = I915_READ(GTIIR);
1740 		if (gt_iir)
1741 			I915_WRITE(GTIIR, gt_iir);
1742 
1743 		pm_iir = I915_READ(GEN6_PMIIR);
1744 		if (pm_iir)
1745 			I915_WRITE(GEN6_PMIIR, pm_iir);
1746 
1747 		iir = I915_READ(VLV_IIR);
1748 		if (iir) {
1749 			/* Consume port before clearing IIR or we'll miss events */
1750 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1751 				i9xx_hpd_irq_handler(dev);
1752 			I915_WRITE(VLV_IIR, iir);
1753 		}
1754 
1755 		if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1756 			goto out;
1757 
1758 
1759 		if (gt_iir)
1760 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
1761 		if (pm_iir)
1762 			gen6_rps_irq_handler(dev_priv, pm_iir);
1763 		/* Call regardless, as some status bits might not be
1764 		 * signalled in iir */
1765 		valleyview_pipestat_irq_handler(dev, iir);
1766 	}
1767 
1768 out:
1769 	return;
1770 }
1771 
1772 static irqreturn_t cherryview_irq_handler(void *arg)
1773 {
1774 	struct drm_device *dev = arg;
1775 	struct drm_i915_private *dev_priv = dev->dev_private;
1776 	u32 master_ctl, iir;
1777 
1778 	if (!intel_irqs_enabled(dev_priv))
1779 		return IRQ_NONE;
1780 
1781 	for (;;) {
1782 		master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1783 		iir = I915_READ(VLV_IIR);
1784 
1785 		if (master_ctl == 0 && iir == 0)
1786 			break;
1787 
1788 
1789 		I915_WRITE(GEN8_MASTER_IRQ, 0);
1790 
1791 		/* Find, clear, then process each source of interrupt */
1792 
1793 		if (iir) {
1794 			/* Consume port before clearing IIR or we'll miss events */
1795 			if (iir & I915_DISPLAY_PORT_INTERRUPT)
1796 				i9xx_hpd_irq_handler(dev);
1797 			I915_WRITE(VLV_IIR, iir);
1798 		}
1799 
1800 		gen8_gt_irq_handler(dev_priv, master_ctl);
1801 
1802 		/* Call regardless, as some status bits might not be
1803 		 * signalled in iir */
1804 		valleyview_pipestat_irq_handler(dev, iir);
1805 
1806 		I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1807 		POSTING_READ(GEN8_MASTER_IRQ);
1808 	}
1809 
1810 }
1811 
1812 static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1813 				const u32 hpd[HPD_NUM_PINS])
1814 {
1815 	struct drm_i915_private *dev_priv = to_i915(dev);
1816 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1817 
1818 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1819 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1820 
1821 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1822 			   dig_hotplug_reg, hpd,
1823 			   pch_port_hotplug_long_detect);
1824 
1825 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
1826 }
1827 
1828 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1829 {
1830 	struct drm_i915_private *dev_priv = dev->dev_private;
1831 	int pipe;
1832 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1833 
1834 	if (hotplug_trigger)
1835 		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx);
1836 
1837 	if (pch_iir & SDE_AUDIO_POWER_MASK) {
1838 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1839 			       SDE_AUDIO_POWER_SHIFT);
1840 		DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1841 				 port_name(port));
1842 	}
1843 
1844 	if (pch_iir & SDE_AUX_MASK)
1845 		dp_aux_irq_handler(dev);
1846 
1847 	if (pch_iir & SDE_GMBUS)
1848 		gmbus_irq_handler(dev);
1849 
1850 	if (pch_iir & SDE_AUDIO_HDCP_MASK)
1851 		DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1852 
1853 	if (pch_iir & SDE_AUDIO_TRANS_MASK)
1854 		DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1855 
1856 	if (pch_iir & SDE_POISON)
1857 		DRM_ERROR("PCH poison interrupt\n");
1858 
1859 	if (pch_iir & SDE_FDI_MASK)
1860 		for_each_pipe(dev_priv, pipe)
1861 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1862 					 pipe_name(pipe),
1863 					 I915_READ(FDI_RX_IIR(pipe)));
1864 
1865 	if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1866 		DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1867 
1868 	if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1869 		DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1870 
1871 	if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1872 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1873 
1874 	if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1875 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1876 }
1877 
1878 static void ivb_err_int_handler(struct drm_device *dev)
1879 {
1880 	struct drm_i915_private *dev_priv = dev->dev_private;
1881 	u32 err_int = I915_READ(GEN7_ERR_INT);
1882 	enum i915_pipe pipe;
1883 
1884 	if (err_int & ERR_INT_POISON)
1885 		DRM_ERROR("Poison interrupt\n");
1886 
1887 	for_each_pipe(dev_priv, pipe) {
1888 		if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1889 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1890 
1891 		if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1892 			if (IS_IVYBRIDGE(dev))
1893 				ivb_pipe_crc_irq_handler(dev, pipe);
1894 			else
1895 				hsw_pipe_crc_irq_handler(dev, pipe);
1896 		}
1897 	}
1898 
1899 	I915_WRITE(GEN7_ERR_INT, err_int);
1900 }
1901 
1902 static void cpt_serr_int_handler(struct drm_device *dev)
1903 {
1904 	struct drm_i915_private *dev_priv = dev->dev_private;
1905 	u32 serr_int = I915_READ(SERR_INT);
1906 
1907 	if (serr_int & SERR_INT_POISON)
1908 		DRM_ERROR("PCH poison interrupt\n");
1909 
1910 	if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1911 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1912 
1913 	if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1914 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1915 
1916 	if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1917 		intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1918 
1919 	I915_WRITE(SERR_INT, serr_int);
1920 }
1921 
1922 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1923 {
1924 	struct drm_i915_private *dev_priv = dev->dev_private;
1925 	int pipe;
1926 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1927 
1928 	if (hotplug_trigger)
1929 		ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt);
1930 
1931 	if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1932 		int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1933 			       SDE_AUDIO_POWER_SHIFT_CPT);
1934 		DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1935 				 port_name(port));
1936 	}
1937 
1938 	if (pch_iir & SDE_AUX_MASK_CPT)
1939 		dp_aux_irq_handler(dev);
1940 
1941 	if (pch_iir & SDE_GMBUS_CPT)
1942 		gmbus_irq_handler(dev);
1943 
1944 	if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1945 		DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1946 
1947 	if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1948 		DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1949 
1950 	if (pch_iir & SDE_FDI_MASK_CPT)
1951 		for_each_pipe(dev_priv, pipe)
1952 			DRM_DEBUG_DRIVER("  pipe %c FDI IIR: 0x%08x\n",
1953 					 pipe_name(pipe),
1954 					 I915_READ(FDI_RX_IIR(pipe)));
1955 
1956 	if (pch_iir & SDE_ERROR_CPT)
1957 		cpt_serr_int_handler(dev);
1958 }
1959 
1960 static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
1961 {
1962 	struct drm_i915_private *dev_priv = dev->dev_private;
1963 	u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
1964 		~SDE_PORTE_HOTPLUG_SPT;
1965 	u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
1966 	u32 pin_mask = 0, long_mask = 0;
1967 
1968 	if (hotplug_trigger) {
1969 		u32 dig_hotplug_reg;
1970 
1971 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1972 		I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1973 
1974 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1975 				   dig_hotplug_reg, hpd_spt,
1976 				   spt_port_hotplug_long_detect);
1977 	}
1978 
1979 	if (hotplug2_trigger) {
1980 		u32 dig_hotplug_reg;
1981 
1982 		dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
1983 		I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
1984 
1985 		intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
1986 				   dig_hotplug_reg, hpd_spt,
1987 				   spt_port_hotplug2_long_detect);
1988 	}
1989 
1990 	if (pin_mask)
1991 		intel_hpd_irq_handler(dev, pin_mask, long_mask);
1992 
1993 	if (pch_iir & SDE_GMBUS_CPT)
1994 		gmbus_irq_handler(dev);
1995 }
1996 
1997 static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1998 				const u32 hpd[HPD_NUM_PINS])
1999 {
2000 	struct drm_i915_private *dev_priv = to_i915(dev);
2001 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2002 
2003 	dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2004 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2005 
2006 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2007 			   dig_hotplug_reg, hpd,
2008 			   ilk_port_hotplug_long_detect);
2009 
2010 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2011 }
2012 
2013 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2014 {
2015 	struct drm_i915_private *dev_priv = dev->dev_private;
2016 	enum i915_pipe pipe;
2017 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2018 
2019 	if (hotplug_trigger)
2020 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk);
2021 
2022 	if (de_iir & DE_AUX_CHANNEL_A)
2023 		dp_aux_irq_handler(dev);
2024 
2025 	if (de_iir & DE_GSE)
2026 		intel_opregion_asle_intr(dev);
2027 
2028 	if (de_iir & DE_POISON)
2029 		DRM_ERROR("Poison interrupt\n");
2030 
2031 	for_each_pipe(dev_priv, pipe) {
2032 		if (de_iir & DE_PIPE_VBLANK(pipe) &&
2033 		    intel_pipe_handle_vblank(dev, pipe))
2034 			intel_check_page_flip(dev, pipe);
2035 
2036 		if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2037 			intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2038 
2039 		if (de_iir & DE_PIPE_CRC_DONE(pipe))
2040 			i9xx_pipe_crc_irq_handler(dev, pipe);
2041 
2042 		/* plane/pipes map 1:1 on ilk+ */
2043 		if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2044 			intel_prepare_page_flip(dev, pipe);
2045 			intel_finish_page_flip_plane(dev, pipe);
2046 		}
2047 	}
2048 
2049 	/* check event from PCH */
2050 	if (de_iir & DE_PCH_EVENT) {
2051 		u32 pch_iir = I915_READ(SDEIIR);
2052 
2053 		if (HAS_PCH_CPT(dev))
2054 			cpt_irq_handler(dev, pch_iir);
2055 		else
2056 			ibx_irq_handler(dev, pch_iir);
2057 
2058 		/* should clear PCH hotplug event before clear CPU irq */
2059 		I915_WRITE(SDEIIR, pch_iir);
2060 	}
2061 
2062 	if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2063 		ironlake_rps_change_irq_handler(dev);
2064 }
2065 
2066 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2067 {
2068 	struct drm_i915_private *dev_priv = dev->dev_private;
2069 	enum i915_pipe pipe;
2070 	u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2071 
2072 	if (hotplug_trigger)
2073 		ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb);
2074 
2075 	if (de_iir & DE_ERR_INT_IVB)
2076 		ivb_err_int_handler(dev);
2077 
2078 	if (de_iir & DE_AUX_CHANNEL_A_IVB)
2079 		dp_aux_irq_handler(dev);
2080 
2081 	if (de_iir & DE_GSE_IVB)
2082 		intel_opregion_asle_intr(dev);
2083 
2084 	for_each_pipe(dev_priv, pipe) {
2085 		if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2086 		    intel_pipe_handle_vblank(dev, pipe))
2087 			intel_check_page_flip(dev, pipe);
2088 
2089 		/* plane/pipes map 1:1 on ilk+ */
2090 		if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2091 			intel_prepare_page_flip(dev, pipe);
2092 			intel_finish_page_flip_plane(dev, pipe);
2093 		}
2094 	}
2095 
2096 	/* check event from PCH */
2097 	if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2098 		u32 pch_iir = I915_READ(SDEIIR);
2099 
2100 		cpt_irq_handler(dev, pch_iir);
2101 
2102 		/* clear PCH hotplug event before clear CPU irq */
2103 		I915_WRITE(SDEIIR, pch_iir);
2104 	}
2105 }
2106 
2107 /*
2108  * To handle irqs with the minimum potential races with fresh interrupts, we:
2109  * 1 - Disable Master Interrupt Control.
2110  * 2 - Find the source(s) of the interrupt.
2111  * 3 - Clear the Interrupt Identity bits (IIR).
2112  * 4 - Process the interrupt(s) that had bits set in the IIRs.
2113  * 5 - Re-enable Master Interrupt Control.
2114  */
2115 static irqreturn_t ironlake_irq_handler(void *arg)
2116 {
2117 	struct drm_device *dev = arg;
2118 	struct drm_i915_private *dev_priv = dev->dev_private;
2119 	u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2120 
2121 	if (!intel_irqs_enabled(dev_priv))
2122 		return IRQ_NONE;
2123 
2124 	/* We get interrupts on unclaimed registers, so check for this before we
2125 	 * do any I915_{READ,WRITE}. */
2126 	intel_uncore_check_errors(dev);
2127 
2128 	/* disable master interrupt before clearing iir  */
2129 	de_ier = I915_READ(DEIER);
2130 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2131 	POSTING_READ(DEIER);
2132 
2133 	/* Disable south interrupts. We'll only write to SDEIIR once, so further
2134 	 * interrupts will will be stored on its back queue, and then we'll be
2135 	 * able to process them after we restore SDEIER (as soon as we restore
2136 	 * it, we'll get an interrupt if SDEIIR still has something to process
2137 	 * due to its back queue). */
2138 	if (!HAS_PCH_NOP(dev)) {
2139 		sde_ier = I915_READ(SDEIER);
2140 		I915_WRITE(SDEIER, 0);
2141 		POSTING_READ(SDEIER);
2142 	}
2143 
2144 	/* Find, clear, then process each source of interrupt */
2145 
2146 	gt_iir = I915_READ(GTIIR);
2147 	if (gt_iir) {
2148 		I915_WRITE(GTIIR, gt_iir);
2149 		if (INTEL_INFO(dev)->gen >= 6)
2150 			snb_gt_irq_handler(dev, dev_priv, gt_iir);
2151 		else
2152 			ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2153 	}
2154 
2155 	de_iir = I915_READ(DEIIR);
2156 	if (de_iir) {
2157 		I915_WRITE(DEIIR, de_iir);
2158 		if (INTEL_INFO(dev)->gen >= 7)
2159 			ivb_display_irq_handler(dev, de_iir);
2160 		else
2161 			ilk_display_irq_handler(dev, de_iir);
2162 	}
2163 
2164 	if (INTEL_INFO(dev)->gen >= 6) {
2165 		u32 pm_iir = I915_READ(GEN6_PMIIR);
2166 		if (pm_iir) {
2167 			I915_WRITE(GEN6_PMIIR, pm_iir);
2168 			gen6_rps_irq_handler(dev_priv, pm_iir);
2169 		}
2170 	}
2171 
2172 	I915_WRITE(DEIER, de_ier);
2173 	POSTING_READ(DEIER);
2174 	if (!HAS_PCH_NOP(dev)) {
2175 		I915_WRITE(SDEIER, sde_ier);
2176 		POSTING_READ(SDEIER);
2177 	}
2178 
2179 }
2180 
2181 static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2182 				const u32 hpd[HPD_NUM_PINS])
2183 {
2184 	struct drm_i915_private *dev_priv = to_i915(dev);
2185 	u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2186 
2187 	dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2188 	I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2189 
2190 	intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2191 			   dig_hotplug_reg, hpd,
2192 			   bxt_port_hotplug_long_detect);
2193 
2194 	intel_hpd_irq_handler(dev, pin_mask, long_mask);
2195 }
2196 
2197 static irqreturn_t gen8_irq_handler(void *arg)
2198 {
2199 	struct drm_device *dev = arg;
2200 	struct drm_i915_private *dev_priv = dev->dev_private;
2201 	u32 master_ctl;
2202 	uint32_t tmp = 0;
2203 	enum i915_pipe pipe;
2204 	u32 aux_mask = GEN8_AUX_CHANNEL_A;
2205 
2206 	if (!intel_irqs_enabled(dev_priv))
2207 		return IRQ_NONE;
2208 
2209 	if (INTEL_INFO(dev_priv)->gen >= 9)
2210 		aux_mask |=  GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2211 			GEN9_AUX_CHANNEL_D;
2212 
2213 	master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2214 	master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2215 	if (!master_ctl)
2216 		return IRQ_NONE;
2217 
2218 	I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2219 
2220 	/* Find, clear, then process each source of interrupt */
2221 
2222 	gen8_gt_irq_handler(dev_priv, master_ctl);
2223 
2224 	if (master_ctl & GEN8_DE_MISC_IRQ) {
2225 		tmp = I915_READ(GEN8_DE_MISC_IIR);
2226 		if (tmp) {
2227 			I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2228 			if (tmp & GEN8_DE_MISC_GSE)
2229 				intel_opregion_asle_intr(dev);
2230 			else
2231 				DRM_ERROR("Unexpected DE Misc interrupt\n");
2232 		}
2233 		else
2234 			DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2235 	}
2236 
2237 	if (master_ctl & GEN8_DE_PORT_IRQ) {
2238 		tmp = I915_READ(GEN8_DE_PORT_IIR);
2239 		if (tmp) {
2240 			bool found = false;
2241 			u32 hotplug_trigger = 0;
2242 
2243 			if (IS_BROXTON(dev_priv))
2244 				hotplug_trigger = tmp & BXT_DE_PORT_HOTPLUG_MASK;
2245 			else if (IS_BROADWELL(dev_priv))
2246 				hotplug_trigger = tmp & GEN8_PORT_DP_A_HOTPLUG;
2247 
2248 			I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2249 
2250 			if (tmp & aux_mask) {
2251 				dp_aux_irq_handler(dev);
2252 				found = true;
2253 			}
2254 
2255 			if (hotplug_trigger) {
2256 				if (IS_BROXTON(dev))
2257 					bxt_hpd_irq_handler(dev, hotplug_trigger, hpd_bxt);
2258 				else
2259 					ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_bdw);
2260 				found = true;
2261 			}
2262 
2263 			if (IS_BROXTON(dev) && (tmp & BXT_DE_PORT_GMBUS)) {
2264 				gmbus_irq_handler(dev);
2265 				found = true;
2266 			}
2267 
2268 			if (!found)
2269 				DRM_ERROR("Unexpected DE Port interrupt\n");
2270 		}
2271 		else
2272 			DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2273 	}
2274 
2275 	for_each_pipe(dev_priv, pipe) {
2276 		uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2277 
2278 		if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2279 			continue;
2280 
2281 		pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2282 		if (pipe_iir) {
2283 			I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2284 
2285 			if (pipe_iir & GEN8_PIPE_VBLANK &&
2286 			    intel_pipe_handle_vblank(dev, pipe))
2287 				intel_check_page_flip(dev, pipe);
2288 
2289 			if (INTEL_INFO(dev_priv)->gen >= 9)
2290 				flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2291 			else
2292 				flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2293 
2294 			if (flip_done) {
2295 				intel_prepare_page_flip(dev, pipe);
2296 				intel_finish_page_flip_plane(dev, pipe);
2297 			}
2298 
2299 			if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2300 				hsw_pipe_crc_irq_handler(dev, pipe);
2301 
2302 			if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2303 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
2304 								    pipe);
2305 
2306 
2307 			if (INTEL_INFO(dev_priv)->gen >= 9)
2308 				fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2309 			else
2310 				fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2311 
2312 			if (fault_errors)
2313 				DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2314 					  pipe_name(pipe),
2315 					  pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2316 		} else
2317 			DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2318 	}
2319 
2320 	if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) &&
2321 	    master_ctl & GEN8_DE_PCH_IRQ) {
2322 		/*
2323 		 * FIXME(BDW): Assume for now that the new interrupt handling
2324 		 * scheme also closed the SDE interrupt handling race we've seen
2325 		 * on older pch-split platforms. But this needs testing.
2326 		 */
2327 		u32 pch_iir = I915_READ(SDEIIR);
2328 		if (pch_iir) {
2329 			I915_WRITE(SDEIIR, pch_iir);
2330 
2331 			if (HAS_PCH_SPT(dev_priv))
2332 				spt_irq_handler(dev, pch_iir);
2333 			else
2334 				cpt_irq_handler(dev, pch_iir);
2335 		} else
2336 			DRM_ERROR("The master control interrupt lied (SDE)!\n");
2337 
2338 	}
2339 
2340 	I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2341 	POSTING_READ_FW(GEN8_MASTER_IRQ);
2342 
2343 }
2344 
2345 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2346 			       bool reset_completed)
2347 {
2348 	struct intel_engine_cs *ring;
2349 	int i;
2350 
2351 	/*
2352 	 * Notify all waiters for GPU completion events that reset state has
2353 	 * been changed, and that they need to restart their wait after
2354 	 * checking for potential errors (and bail out to drop locks if there is
2355 	 * a gpu reset pending so that i915_error_work_func can acquire them).
2356 	 */
2357 
2358 	/* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2359 	for_each_ring(ring, dev_priv, i)
2360 		wake_up_all(&ring->irq_queue);
2361 
2362 	/* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2363 	wake_up_all(&dev_priv->pending_flip_queue);
2364 
2365 	/*
2366 	 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2367 	 * reset state is cleared.
2368 	 */
2369 	if (reset_completed)
2370 		wake_up_all(&dev_priv->gpu_error.reset_queue);
2371 }
2372 
2373 /**
2374  * i915_reset_and_wakeup - do process context error handling work
2375  * @dev: drm device
2376  *
2377  * Fire an error uevent so userspace can see that a hang or error
2378  * was detected.
2379  */
2380 static void i915_reset_and_wakeup(struct drm_device *dev)
2381 {
2382 	struct drm_i915_private *dev_priv = to_i915(dev);
2383 	struct i915_gpu_error *error = &dev_priv->gpu_error;
2384 #if 0
2385 	char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2386 	char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2387 	char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2388 #endif
2389 	int ret;
2390 
2391 #if 0
2392 	kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2393 #endif
2394 
2395 	/*
2396 	 * Note that there's only one work item which does gpu resets, so we
2397 	 * need not worry about concurrent gpu resets potentially incrementing
2398 	 * error->reset_counter twice. We only need to take care of another
2399 	 * racing irq/hangcheck declaring the gpu dead for a second time. A
2400 	 * quick check for that is good enough: schedule_work ensures the
2401 	 * correct ordering between hang detection and this work item, and since
2402 	 * the reset in-progress bit is only ever set by code outside of this
2403 	 * work we don't need to worry about any other races.
2404 	 */
2405 	if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2406 		DRM_DEBUG_DRIVER("resetting chip\n");
2407 #if 0
2408 		kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2409 				   reset_event);
2410 #endif
2411 
2412 		/*
2413 		 * In most cases it's guaranteed that we get here with an RPM
2414 		 * reference held, for example because there is a pending GPU
2415 		 * request that won't finish until the reset is done. This
2416 		 * isn't the case at least when we get here by doing a
2417 		 * simulated reset via debugs, so get an RPM reference.
2418 		 */
2419 		intel_runtime_pm_get(dev_priv);
2420 
2421 		intel_prepare_reset(dev);
2422 
2423 		/*
2424 		 * All state reset _must_ be completed before we update the
2425 		 * reset counter, for otherwise waiters might miss the reset
2426 		 * pending state and not properly drop locks, resulting in
2427 		 * deadlocks with the reset work.
2428 		 */
2429 		ret = i915_reset(dev);
2430 
2431 		intel_finish_reset(dev);
2432 
2433 		intel_runtime_pm_put(dev_priv);
2434 
2435 		if (ret == 0) {
2436 			/*
2437 			 * After all the gem state is reset, increment the reset
2438 			 * counter and wake up everyone waiting for the reset to
2439 			 * complete.
2440 			 *
2441 			 * Since unlock operations are a one-sided barrier only,
2442 			 * we need to insert a barrier here to order any seqno
2443 			 * updates before
2444 			 * the counter increment.
2445 			 */
2446 			smp_mb__before_atomic();
2447 			atomic_inc(&dev_priv->gpu_error.reset_counter);
2448 
2449 #if 0
2450 			kobject_uevent_env(&dev->primary->kdev->kobj,
2451 					   KOBJ_CHANGE, reset_done_event);
2452 #endif
2453 		} else {
2454 			atomic_or(I915_WEDGED, &error->reset_counter);
2455 		}
2456 
2457 		/*
2458 		 * Note: The wake_up also serves as a memory barrier so that
2459 		 * waiters see the update value of the reset counter atomic_t.
2460 		 */
2461 		i915_error_wake_up(dev_priv, true);
2462 	}
2463 }
2464 
2465 static void i915_report_and_clear_eir(struct drm_device *dev)
2466 {
2467 	struct drm_i915_private *dev_priv = dev->dev_private;
2468 	uint32_t instdone[I915_NUM_INSTDONE_REG];
2469 	u32 eir = I915_READ(EIR);
2470 	int pipe, i;
2471 
2472 	if (!eir)
2473 		return;
2474 
2475 	pr_err("render error detected, EIR: 0x%08x\n", eir);
2476 
2477 #if 0
2478 	i915_get_extra_instdone(dev, instdone);
2479 #endif
2480 
2481 	if (IS_G4X(dev)) {
2482 		if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2483 			u32 ipeir = I915_READ(IPEIR_I965);
2484 
2485 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2486 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2487 			for (i = 0; i < ARRAY_SIZE(instdone); i++)
2488 				pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2489 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2490 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2491 			I915_WRITE(IPEIR_I965, ipeir);
2492 			POSTING_READ(IPEIR_I965);
2493 		}
2494 		if (eir & GM45_ERROR_PAGE_TABLE) {
2495 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2496 			pr_err("page table error\n");
2497 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2498 			I915_WRITE(PGTBL_ER, pgtbl_err);
2499 			POSTING_READ(PGTBL_ER);
2500 		}
2501 	}
2502 
2503 	if (!IS_GEN2(dev)) {
2504 		if (eir & I915_ERROR_PAGE_TABLE) {
2505 			u32 pgtbl_err = I915_READ(PGTBL_ER);
2506 			pr_err("page table error\n");
2507 			pr_err("  PGTBL_ER: 0x%08x\n", pgtbl_err);
2508 			I915_WRITE(PGTBL_ER, pgtbl_err);
2509 			POSTING_READ(PGTBL_ER);
2510 		}
2511 	}
2512 
2513 	if (eir & I915_ERROR_MEMORY_REFRESH) {
2514 		pr_err("memory refresh error:\n");
2515 		for_each_pipe(dev_priv, pipe)
2516 			pr_err("pipe %c stat: 0x%08x\n",
2517 			       pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2518 		/* pipestat has already been acked */
2519 	}
2520 	if (eir & I915_ERROR_INSTRUCTION) {
2521 		pr_err("instruction error\n");
2522 		pr_err("  INSTPM: 0x%08x\n", I915_READ(INSTPM));
2523 		for (i = 0; i < ARRAY_SIZE(instdone); i++)
2524 			pr_err("  INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2525 		if (INTEL_INFO(dev)->gen < 4) {
2526 			u32 ipeir = I915_READ(IPEIR);
2527 
2528 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR));
2529 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR));
2530 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD));
2531 			I915_WRITE(IPEIR, ipeir);
2532 			POSTING_READ(IPEIR);
2533 		} else {
2534 			u32 ipeir = I915_READ(IPEIR_I965);
2535 
2536 			pr_err("  IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2537 			pr_err("  IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2538 			pr_err("  INSTPS: 0x%08x\n", I915_READ(INSTPS));
2539 			pr_err("  ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2540 			I915_WRITE(IPEIR_I965, ipeir);
2541 			POSTING_READ(IPEIR_I965);
2542 		}
2543 	}
2544 
2545 	I915_WRITE(EIR, eir);
2546 	POSTING_READ(EIR);
2547 	eir = I915_READ(EIR);
2548 	if (eir) {
2549 		/*
2550 		 * some errors might have become stuck,
2551 		 * mask them.
2552 		 */
2553 		DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2554 		I915_WRITE(EMR, I915_READ(EMR) | eir);
2555 		I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2556 	}
2557 }
2558 
2559 /**
2560  * i915_handle_error - handle a gpu error
2561  * @dev: drm device
2562  *
2563  * Do some basic checking of register state at error time and
2564  * dump it to the syslog.  Also call i915_capture_error_state() to make
2565  * sure we get a record and make it available in debugfs.  Fire a uevent
2566  * so userspace knows something bad happened (should trigger collection
2567  * of a ring dump etc.).
2568  */
2569 void i915_handle_error(struct drm_device *dev, bool wedged,
2570 		       const char *fmt, ...)
2571 {
2572 	struct drm_i915_private *dev_priv = dev->dev_private;
2573 #if 0
2574 	va_list args;
2575 	char error_msg[80];
2576 
2577 	va_start(args, fmt);
2578 	vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2579 	va_end(args);
2580 
2581 	i915_capture_error_state(dev, wedged, error_msg);
2582 #endif
2583 	i915_report_and_clear_eir(dev);
2584 
2585 	if (wedged) {
2586 		atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2587 				&dev_priv->gpu_error.reset_counter);
2588 
2589 		/*
2590 		 * Wakeup waiting processes so that the reset function
2591 		 * i915_reset_and_wakeup doesn't deadlock trying to grab
2592 		 * various locks. By bumping the reset counter first, the woken
2593 		 * processes will see a reset in progress and back off,
2594 		 * releasing their locks and then wait for the reset completion.
2595 		 * We must do this for _all_ gpu waiters that might hold locks
2596 		 * that the reset work needs to acquire.
2597 		 *
2598 		 * Note: The wake_up serves as the required memory barrier to
2599 		 * ensure that the waiters see the updated value of the reset
2600 		 * counter atomic_t.
2601 		 */
2602 		i915_error_wake_up(dev_priv, false);
2603 	}
2604 
2605 	i915_reset_and_wakeup(dev);
2606 }
2607 
2608 /* Called from drm generic code, passed 'crtc' which
2609  * we use as a pipe index
2610  */
2611 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2612 {
2613 	struct drm_i915_private *dev_priv = dev->dev_private;
2614 	unsigned long irqflags;
2615 
2616 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2617 	if (INTEL_INFO(dev)->gen >= 4)
2618 		i915_enable_pipestat(dev_priv, pipe,
2619 				     PIPE_START_VBLANK_INTERRUPT_STATUS);
2620 	else
2621 		i915_enable_pipestat(dev_priv, pipe,
2622 				     PIPE_VBLANK_INTERRUPT_STATUS);
2623 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2624 
2625 	return 0;
2626 }
2627 
2628 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2629 {
2630 	struct drm_i915_private *dev_priv = dev->dev_private;
2631 	unsigned long irqflags;
2632 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2633 						     DE_PIPE_VBLANK(pipe);
2634 
2635 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2636 	ironlake_enable_display_irq(dev_priv, bit);
2637 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2638 
2639 	return 0;
2640 }
2641 
2642 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2643 {
2644 	struct drm_i915_private *dev_priv = dev->dev_private;
2645 	unsigned long irqflags;
2646 
2647 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2648 	i915_enable_pipestat(dev_priv, pipe,
2649 			     PIPE_START_VBLANK_INTERRUPT_STATUS);
2650 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2651 
2652 	return 0;
2653 }
2654 
2655 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2656 {
2657 	struct drm_i915_private *dev_priv = dev->dev_private;
2658 	unsigned long irqflags;
2659 
2660 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2661 	dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2662 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2663 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2664 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2665 	return 0;
2666 }
2667 
2668 /* Called from drm generic code, passed 'crtc' which
2669  * we use as a pipe index
2670  */
2671 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2672 {
2673 	struct drm_i915_private *dev_priv = dev->dev_private;
2674 	unsigned long irqflags;
2675 
2676 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2677 	i915_disable_pipestat(dev_priv, pipe,
2678 			      PIPE_VBLANK_INTERRUPT_STATUS |
2679 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2680 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2681 }
2682 
2683 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2684 {
2685 	struct drm_i915_private *dev_priv = dev->dev_private;
2686 	unsigned long irqflags;
2687 	uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2688 						     DE_PIPE_VBLANK(pipe);
2689 
2690 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2691 	ironlake_disable_display_irq(dev_priv, bit);
2692 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2693 }
2694 
2695 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2696 {
2697 	struct drm_i915_private *dev_priv = dev->dev_private;
2698 	unsigned long irqflags;
2699 
2700 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2701 	i915_disable_pipestat(dev_priv, pipe,
2702 			      PIPE_START_VBLANK_INTERRUPT_STATUS);
2703 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2704 }
2705 
2706 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2707 {
2708 	struct drm_i915_private *dev_priv = dev->dev_private;
2709 	unsigned long irqflags;
2710 
2711 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2712 	dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2713 	I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2714 	POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2715 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2716 }
2717 
2718 static bool
2719 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2720 {
2721 	return (list_empty(&ring->request_list) ||
2722 		i915_seqno_passed(seqno, ring->last_submitted_seqno));
2723 }
2724 
2725 static bool
2726 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2727 {
2728 	if (INTEL_INFO(dev)->gen >= 8) {
2729 		return (ipehr >> 23) == 0x1c;
2730 	} else {
2731 		ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2732 		return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2733 				 MI_SEMAPHORE_REGISTER);
2734 	}
2735 }
2736 
2737 static struct intel_engine_cs *
2738 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2739 {
2740 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2741 	struct intel_engine_cs *signaller;
2742 	int i;
2743 
2744 	if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2745 		for_each_ring(signaller, dev_priv, i) {
2746 			if (ring == signaller)
2747 				continue;
2748 
2749 			if (offset == signaller->semaphore.signal_ggtt[ring->id])
2750 				return signaller;
2751 		}
2752 	} else {
2753 		u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2754 
2755 		for_each_ring(signaller, dev_priv, i) {
2756 			if(ring == signaller)
2757 				continue;
2758 
2759 			if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2760 				return signaller;
2761 		}
2762 	}
2763 
2764 	DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016lx\n",
2765 		  ring->id, ipehr, offset);
2766 
2767 	return NULL;
2768 }
2769 
2770 static struct intel_engine_cs *
2771 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2772 {
2773 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2774 	u32 cmd, ipehr, head;
2775 	u64 offset = 0;
2776 	int i, backwards;
2777 
2778 	/*
2779 	 * This function does not support execlist mode - any attempt to
2780 	 * proceed further into this function will result in a kernel panic
2781 	 * when dereferencing ring->buffer, which is not set up in execlist
2782 	 * mode.
2783 	 *
2784 	 * The correct way of doing it would be to derive the currently
2785 	 * executing ring buffer from the current context, which is derived
2786 	 * from the currently running request. Unfortunately, to get the
2787 	 * current request we would have to grab the struct_mutex before doing
2788 	 * anything else, which would be ill-advised since some other thread
2789 	 * might have grabbed it already and managed to hang itself, causing
2790 	 * the hang checker to deadlock.
2791 	 *
2792 	 * Therefore, this function does not support execlist mode in its
2793 	 * current form. Just return NULL and move on.
2794 	 */
2795 	if (ring->buffer == NULL)
2796 		return NULL;
2797 
2798 	ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2799 	if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2800 		return NULL;
2801 
2802 	/*
2803 	 * HEAD is likely pointing to the dword after the actual command,
2804 	 * so scan backwards until we find the MBOX. But limit it to just 3
2805 	 * or 4 dwords depending on the semaphore wait command size.
2806 	 * Note that we don't care about ACTHD here since that might
2807 	 * point at at batch, and semaphores are always emitted into the
2808 	 * ringbuffer itself.
2809 	 */
2810 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
2811 	backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2812 
2813 	for (i = backwards; i; --i) {
2814 		/*
2815 		 * Be paranoid and presume the hw has gone off into the wild -
2816 		 * our ring is smaller than what the hardware (and hence
2817 		 * HEAD_ADDR) allows. Also handles wrap-around.
2818 		 */
2819 		head &= ring->buffer->size - 1;
2820 
2821 		/* This here seems to blow up */
2822 		cmd = ioread32(ring->buffer->virtual_start + head);
2823 		if (cmd == ipehr)
2824 			break;
2825 
2826 		head -= 4;
2827 	}
2828 
2829 	if (!i)
2830 		return NULL;
2831 
2832 	*seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2833 	if (INTEL_INFO(ring->dev)->gen >= 8) {
2834 		offset = ioread32(ring->buffer->virtual_start + head + 12);
2835 		offset <<= 32;
2836 		offset = ioread32(ring->buffer->virtual_start + head + 8);
2837 	}
2838 	return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2839 }
2840 
2841 static int semaphore_passed(struct intel_engine_cs *ring)
2842 {
2843 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
2844 	struct intel_engine_cs *signaller;
2845 	u32 seqno;
2846 
2847 	ring->hangcheck.deadlock++;
2848 
2849 	signaller = semaphore_waits_for(ring, &seqno);
2850 	if (signaller == NULL)
2851 		return -1;
2852 
2853 	/* Prevent pathological recursion due to driver bugs */
2854 	if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2855 		return -1;
2856 
2857 	if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2858 		return 1;
2859 
2860 	/* cursory check for an unkickable deadlock */
2861 	if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2862 	    semaphore_passed(signaller) < 0)
2863 		return -1;
2864 
2865 	return 0;
2866 }
2867 
2868 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2869 {
2870 	struct intel_engine_cs *ring;
2871 	int i;
2872 
2873 	for_each_ring(ring, dev_priv, i)
2874 		ring->hangcheck.deadlock = 0;
2875 }
2876 
2877 static enum intel_ring_hangcheck_action
2878 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2879 {
2880 	struct drm_device *dev = ring->dev;
2881 	struct drm_i915_private *dev_priv = dev->dev_private;
2882 	u32 tmp;
2883 
2884 	if (acthd != ring->hangcheck.acthd) {
2885 		if (acthd > ring->hangcheck.max_acthd) {
2886 			ring->hangcheck.max_acthd = acthd;
2887 			return HANGCHECK_ACTIVE;
2888 		}
2889 
2890 		return HANGCHECK_ACTIVE_LOOP;
2891 	}
2892 
2893 	if (IS_GEN2(dev))
2894 		return HANGCHECK_HUNG;
2895 
2896 	/* Is the chip hanging on a WAIT_FOR_EVENT?
2897 	 * If so we can simply poke the RB_WAIT bit
2898 	 * and break the hang. This should work on
2899 	 * all but the second generation chipsets.
2900 	 */
2901 	tmp = I915_READ_CTL(ring);
2902 	if (tmp & RING_WAIT) {
2903 		i915_handle_error(dev, false,
2904 				  "Kicking stuck wait on %s",
2905 				  ring->name);
2906 		I915_WRITE_CTL(ring, tmp);
2907 		return HANGCHECK_KICK;
2908 	}
2909 
2910 	if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2911 		switch (semaphore_passed(ring)) {
2912 		default:
2913 			return HANGCHECK_HUNG;
2914 		case 1:
2915 			i915_handle_error(dev, false,
2916 					  "Kicking stuck semaphore on %s",
2917 					  ring->name);
2918 			I915_WRITE_CTL(ring, tmp);
2919 			return HANGCHECK_KICK;
2920 		case 0:
2921 			return HANGCHECK_WAIT;
2922 		}
2923 	}
2924 
2925 	return HANGCHECK_HUNG;
2926 }
2927 
2928 /*
2929  * This is called when the chip hasn't reported back with completed
2930  * batchbuffers in a long time. We keep track per ring seqno progress and
2931  * if there are no progress, hangcheck score for that ring is increased.
2932  * Further, acthd is inspected to see if the ring is stuck. On stuck case
2933  * we kick the ring. If we see no progress on three subsequent calls
2934  * we assume chip is wedged and try to fix it by resetting the chip.
2935  */
2936 static void i915_hangcheck_elapsed(struct work_struct *work)
2937 {
2938 	struct drm_i915_private *dev_priv =
2939 		container_of(work, typeof(*dev_priv),
2940 			     gpu_error.hangcheck_work.work);
2941 	struct drm_device *dev = dev_priv->dev;
2942 	struct intel_engine_cs *ring;
2943 	int i;
2944 	int busy_count = 0, rings_hung = 0;
2945 	bool stuck[I915_NUM_RINGS] = { 0 };
2946 #define BUSY 1
2947 #define KICK 5
2948 #define HUNG 20
2949 
2950 	if (!i915.enable_hangcheck)
2951 		return;
2952 
2953 	for_each_ring(ring, dev_priv, i) {
2954 		u64 acthd;
2955 		u32 seqno;
2956 		bool busy = true;
2957 
2958 		semaphore_clear_deadlocks(dev_priv);
2959 
2960 		seqno = ring->get_seqno(ring, false);
2961 		acthd = intel_ring_get_active_head(ring);
2962 
2963 		if (ring->hangcheck.seqno == seqno) {
2964 			if (ring_idle(ring, seqno)) {
2965 				ring->hangcheck.action = HANGCHECK_IDLE;
2966 
2967 				if (waitqueue_active(&ring->irq_queue)) {
2968 					/* Issue a wake-up to catch stuck h/w. */
2969 					if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2970 						if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2971 							DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2972 								  ring->name);
2973 						else
2974 							DRM_INFO("Fake missed irq on %s\n",
2975 								 ring->name);
2976 						wake_up_all(&ring->irq_queue);
2977 					}
2978 					/* Safeguard against driver failure */
2979 					ring->hangcheck.score += BUSY;
2980 				} else
2981 					busy = false;
2982 			} else {
2983 				/* We always increment the hangcheck score
2984 				 * if the ring is busy and still processing
2985 				 * the same request, so that no single request
2986 				 * can run indefinitely (such as a chain of
2987 				 * batches). The only time we do not increment
2988 				 * the hangcheck score on this ring, if this
2989 				 * ring is in a legitimate wait for another
2990 				 * ring. In that case the waiting ring is a
2991 				 * victim and we want to be sure we catch the
2992 				 * right culprit. Then every time we do kick
2993 				 * the ring, add a small increment to the
2994 				 * score so that we can catch a batch that is
2995 				 * being repeatedly kicked and so responsible
2996 				 * for stalling the machine.
2997 				 */
2998 				ring->hangcheck.action = ring_stuck(ring,
2999 								    acthd);
3000 
3001 				switch (ring->hangcheck.action) {
3002 				case HANGCHECK_IDLE:
3003 				case HANGCHECK_WAIT:
3004 				case HANGCHECK_ACTIVE:
3005 					break;
3006 				case HANGCHECK_ACTIVE_LOOP:
3007 					ring->hangcheck.score += BUSY;
3008 					break;
3009 				case HANGCHECK_KICK:
3010 					ring->hangcheck.score += KICK;
3011 					break;
3012 				case HANGCHECK_HUNG:
3013 					ring->hangcheck.score += HUNG;
3014 					stuck[i] = true;
3015 					break;
3016 				}
3017 			}
3018 		} else {
3019 			ring->hangcheck.action = HANGCHECK_ACTIVE;
3020 
3021 			/* Gradually reduce the count so that we catch DoS
3022 			 * attempts across multiple batches.
3023 			 */
3024 			if (ring->hangcheck.score > 0)
3025 				ring->hangcheck.score--;
3026 
3027 			ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3028 		}
3029 
3030 		ring->hangcheck.seqno = seqno;
3031 		ring->hangcheck.acthd = acthd;
3032 		busy_count += busy;
3033 	}
3034 
3035 	for_each_ring(ring, dev_priv, i) {
3036 		if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3037 			DRM_INFO("%s on %s\n",
3038 				 stuck[i] ? "stuck" : "no progress",
3039 				 ring->name);
3040 			rings_hung++;
3041 		}
3042 	}
3043 
3044 	if (rings_hung)
3045 		return i915_handle_error(dev, true, "Ring hung");
3046 
3047 	if (busy_count)
3048 		/* Reset timer case chip hangs without another request
3049 		 * being added */
3050 		i915_queue_hangcheck(dev);
3051 }
3052 
3053 void i915_queue_hangcheck(struct drm_device *dev)
3054 {
3055 	struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3056 
3057 	if (!i915.enable_hangcheck)
3058 		return;
3059 
3060 	/* Don't continually defer the hangcheck so that it is always run at
3061 	 * least once after work has been scheduled on any ring. Otherwise,
3062 	 * we will ignore a hung ring if a second ring is kept busy.
3063 	 */
3064 
3065 	queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3066 			   round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3067 }
3068 
3069 static void ibx_irq_reset(struct drm_device *dev)
3070 {
3071 	struct drm_i915_private *dev_priv = dev->dev_private;
3072 
3073 	if (HAS_PCH_NOP(dev))
3074 		return;
3075 
3076 	GEN5_IRQ_RESET(SDE);
3077 
3078 	if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3079 		I915_WRITE(SERR_INT, 0xffffffff);
3080 }
3081 
3082 /*
3083  * SDEIER is also touched by the interrupt handler to work around missed PCH
3084  * interrupts. Hence we can't update it after the interrupt handler is enabled -
3085  * instead we unconditionally enable all PCH interrupt sources here, but then
3086  * only unmask them as needed with SDEIMR.
3087  *
3088  * This function needs to be called before interrupts are enabled.
3089  */
3090 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3091 {
3092 	struct drm_i915_private *dev_priv = dev->dev_private;
3093 
3094 	if (HAS_PCH_NOP(dev))
3095 		return;
3096 
3097 	WARN_ON(I915_READ(SDEIER) != 0);
3098 	I915_WRITE(SDEIER, 0xffffffff);
3099 	POSTING_READ(SDEIER);
3100 }
3101 
3102 static void gen5_gt_irq_reset(struct drm_device *dev)
3103 {
3104 	struct drm_i915_private *dev_priv = dev->dev_private;
3105 
3106 	GEN5_IRQ_RESET(GT);
3107 	if (INTEL_INFO(dev)->gen >= 6)
3108 		GEN5_IRQ_RESET(GEN6_PM);
3109 }
3110 
3111 /* drm_dma.h hooks
3112 */
3113 static void ironlake_irq_reset(struct drm_device *dev)
3114 {
3115 	struct drm_i915_private *dev_priv = dev->dev_private;
3116 
3117 	I915_WRITE(HWSTAM, 0xffffffff);
3118 
3119 	GEN5_IRQ_RESET(DE);
3120 	if (IS_GEN7(dev))
3121 		I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3122 
3123 	gen5_gt_irq_reset(dev);
3124 
3125 	ibx_irq_reset(dev);
3126 }
3127 
3128 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3129 {
3130 	enum i915_pipe pipe;
3131 
3132 	i915_hotplug_interrupt_update(dev_priv, 0xFFFFFFFF, 0);
3133 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3134 
3135 	for_each_pipe(dev_priv, pipe)
3136 		I915_WRITE(PIPESTAT(pipe), 0xffff);
3137 
3138 	GEN5_IRQ_RESET(VLV_);
3139 }
3140 
3141 static void valleyview_irq_preinstall(struct drm_device *dev)
3142 {
3143 	struct drm_i915_private *dev_priv = dev->dev_private;
3144 
3145 	/* VLV magic */
3146 	I915_WRITE(VLV_IMR, 0);
3147 	I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3148 	I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3149 	I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3150 
3151 	gen5_gt_irq_reset(dev);
3152 
3153 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3154 
3155 	vlv_display_irq_reset(dev_priv);
3156 }
3157 
3158 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3159 {
3160 	GEN8_IRQ_RESET_NDX(GT, 0);
3161 	GEN8_IRQ_RESET_NDX(GT, 1);
3162 	GEN8_IRQ_RESET_NDX(GT, 2);
3163 	GEN8_IRQ_RESET_NDX(GT, 3);
3164 }
3165 
3166 static void gen8_irq_reset(struct drm_device *dev)
3167 {
3168 	struct drm_i915_private *dev_priv = dev->dev_private;
3169 	int pipe;
3170 
3171 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3172 	POSTING_READ(GEN8_MASTER_IRQ);
3173 
3174 	gen8_gt_irq_reset(dev_priv);
3175 
3176 	for_each_pipe(dev_priv, pipe)
3177 		if (intel_display_power_is_enabled(dev_priv,
3178 						   POWER_DOMAIN_PIPE(pipe)))
3179 			GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3180 
3181 	GEN5_IRQ_RESET(GEN8_DE_PORT_);
3182 	GEN5_IRQ_RESET(GEN8_DE_MISC_);
3183 	GEN5_IRQ_RESET(GEN8_PCU_);
3184 
3185 	if (HAS_PCH_SPLIT(dev))
3186 		ibx_irq_reset(dev);
3187 }
3188 
3189 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3190 				     unsigned int pipe_mask)
3191 {
3192 	uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3193 
3194 	spin_lock_irq(&dev_priv->irq_lock);
3195 	if (pipe_mask & 1 << PIPE_A)
3196 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3197 				  dev_priv->de_irq_mask[PIPE_A],
3198 				  ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3199 	if (pipe_mask & 1 << PIPE_B)
3200 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3201 				  dev_priv->de_irq_mask[PIPE_B],
3202 				  ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3203 	if (pipe_mask & 1 << PIPE_C)
3204 		GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3205 				  dev_priv->de_irq_mask[PIPE_C],
3206 				  ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3207 	spin_unlock_irq(&dev_priv->irq_lock);
3208 }
3209 
3210 static void cherryview_irq_preinstall(struct drm_device *dev)
3211 {
3212 	struct drm_i915_private *dev_priv = dev->dev_private;
3213 
3214 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3215 	POSTING_READ(GEN8_MASTER_IRQ);
3216 
3217 	gen8_gt_irq_reset(dev_priv);
3218 
3219 	GEN5_IRQ_RESET(GEN8_PCU_);
3220 
3221 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3222 
3223 	vlv_display_irq_reset(dev_priv);
3224 }
3225 
3226 static u32 intel_hpd_enabled_irqs(struct drm_device *dev,
3227 				  const u32 hpd[HPD_NUM_PINS])
3228 {
3229 	struct drm_i915_private *dev_priv = to_i915(dev);
3230 	struct intel_encoder *encoder;
3231 	u32 enabled_irqs = 0;
3232 
3233 	for_each_intel_encoder(dev, encoder)
3234 		if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3235 			enabled_irqs |= hpd[encoder->hpd_pin];
3236 
3237 	return enabled_irqs;
3238 }
3239 
3240 static void ibx_hpd_irq_setup(struct drm_device *dev)
3241 {
3242 	struct drm_i915_private *dev_priv = dev->dev_private;
3243 	u32 hotplug_irqs, hotplug, enabled_irqs;
3244 
3245 	if (HAS_PCH_IBX(dev)) {
3246 		hotplug_irqs = SDE_HOTPLUG_MASK;
3247 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx);
3248 	} else {
3249 		hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3250 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt);
3251 	}
3252 
3253 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3254 
3255 	/*
3256 	 * Enable digital hotplug on the PCH, and configure the DP short pulse
3257 	 * duration to 2ms (which is the minimum in the Display Port spec).
3258 	 * The pulse duration bits are reserved on LPT+.
3259 	 */
3260 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3261 	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3262 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3263 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3264 	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3265 	/*
3266 	 * When CPU and PCH are on the same package, port A
3267 	 * HPD must be enabled in both north and south.
3268 	 */
3269 	if (HAS_PCH_LPT_LP(dev))
3270 		hotplug |= PORTA_HOTPLUG_ENABLE;
3271 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3272 }
3273 
3274 static void spt_hpd_irq_setup(struct drm_device *dev)
3275 {
3276 	struct drm_i915_private *dev_priv = dev->dev_private;
3277 	u32 hotplug_irqs, hotplug, enabled_irqs;
3278 
3279 	hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3280 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt);
3281 
3282 	ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3283 
3284 	/* Enable digital hotplug on the PCH */
3285 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3286 	hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3287 		PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3288 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3289 
3290 	hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3291 	hotplug |= PORTE_HOTPLUG_ENABLE;
3292 	I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3293 }
3294 
3295 static void ilk_hpd_irq_setup(struct drm_device *dev)
3296 {
3297 	struct drm_i915_private *dev_priv = dev->dev_private;
3298 	u32 hotplug_irqs, hotplug, enabled_irqs;
3299 
3300 	if (INTEL_INFO(dev)->gen >= 8) {
3301 		hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3302 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw);
3303 
3304 		bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3305 	} else if (INTEL_INFO(dev)->gen >= 7) {
3306 		hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3307 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb);
3308 
3309 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3310 	} else {
3311 		hotplug_irqs = DE_DP_A_HOTPLUG;
3312 		enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk);
3313 
3314 		ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3315 	}
3316 
3317 	/*
3318 	 * Enable digital hotplug on the CPU, and configure the DP short pulse
3319 	 * duration to 2ms (which is the minimum in the Display Port spec)
3320 	 * The pulse duration bits are reserved on HSW+.
3321 	 */
3322 	hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3323 	hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3324 	hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3325 	I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3326 
3327 	ibx_hpd_irq_setup(dev);
3328 }
3329 
3330 static void bxt_hpd_irq_setup(struct drm_device *dev)
3331 {
3332 	struct drm_i915_private *dev_priv = dev->dev_private;
3333 	u32 hotplug_irqs, hotplug, enabled_irqs;
3334 
3335 	enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt);
3336 	hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3337 
3338 	bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3339 
3340 	hotplug = I915_READ(PCH_PORT_HOTPLUG);
3341 	hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3342 		PORTA_HOTPLUG_ENABLE;
3343 	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3344 }
3345 
3346 static void ibx_irq_postinstall(struct drm_device *dev)
3347 {
3348 	struct drm_i915_private *dev_priv = dev->dev_private;
3349 	u32 mask;
3350 
3351 	if (HAS_PCH_NOP(dev))
3352 		return;
3353 
3354 	if (HAS_PCH_IBX(dev))
3355 		mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3356 	else
3357 		mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3358 
3359 	gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3360 	I915_WRITE(SDEIMR, ~mask);
3361 }
3362 
3363 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3364 {
3365 	struct drm_i915_private *dev_priv = dev->dev_private;
3366 	u32 pm_irqs, gt_irqs;
3367 
3368 	pm_irqs = gt_irqs = 0;
3369 
3370 	dev_priv->gt_irq_mask = ~0;
3371 	if (HAS_L3_DPF(dev)) {
3372 		/* L3 parity interrupt is always unmasked. */
3373 		dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3374 		gt_irqs |= GT_PARITY_ERROR(dev);
3375 	}
3376 
3377 	gt_irqs |= GT_RENDER_USER_INTERRUPT;
3378 	if (IS_GEN5(dev)) {
3379 		gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3380 			   ILK_BSD_USER_INTERRUPT;
3381 	} else {
3382 		gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3383 	}
3384 
3385 	GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3386 
3387 	if (INTEL_INFO(dev)->gen >= 6) {
3388 		/*
3389 		 * RPS interrupts will get enabled/disabled on demand when RPS
3390 		 * itself is enabled/disabled.
3391 		 */
3392 		if (HAS_VEBOX(dev))
3393 			pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3394 
3395 		dev_priv->pm_irq_mask = 0xffffffff;
3396 		GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3397 	}
3398 }
3399 
3400 static int ironlake_irq_postinstall(struct drm_device *dev)
3401 {
3402 	struct drm_i915_private *dev_priv = dev->dev_private;
3403 	u32 display_mask, extra_mask;
3404 
3405 	if (INTEL_INFO(dev)->gen >= 7) {
3406 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3407 				DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3408 				DE_PLANEB_FLIP_DONE_IVB |
3409 				DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3410 		extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3411 			      DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3412 			      DE_DP_A_HOTPLUG_IVB);
3413 	} else {
3414 		display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3415 				DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3416 				DE_AUX_CHANNEL_A |
3417 				DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3418 				DE_POISON);
3419 		extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3420 			      DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3421 			      DE_DP_A_HOTPLUG);
3422 	}
3423 
3424 	dev_priv->irq_mask = ~display_mask;
3425 
3426 	I915_WRITE(HWSTAM, 0xeffe);
3427 
3428 	ibx_irq_pre_postinstall(dev);
3429 
3430 	GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3431 
3432 	gen5_gt_irq_postinstall(dev);
3433 
3434 	ibx_irq_postinstall(dev);
3435 
3436 	if (IS_IRONLAKE_M(dev)) {
3437 		/* Enable PCU event interrupts
3438 		 *
3439 		 * spinlocking not required here for correctness since interrupt
3440 		 * setup is guaranteed to run in single-threaded context. But we
3441 		 * need it to make the assert_spin_locked happy. */
3442 		spin_lock_irq(&dev_priv->irq_lock);
3443 		ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3444 		spin_unlock_irq(&dev_priv->irq_lock);
3445 	}
3446 
3447 	return 0;
3448 }
3449 
3450 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3451 {
3452 	u32 pipestat_mask;
3453 	u32 iir_mask;
3454 	enum i915_pipe pipe;
3455 
3456 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3457 			PIPE_FIFO_UNDERRUN_STATUS;
3458 
3459 	for_each_pipe(dev_priv, pipe)
3460 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3461 	POSTING_READ(PIPESTAT(PIPE_A));
3462 
3463 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3464 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3465 
3466 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3467 	for_each_pipe(dev_priv, pipe)
3468 		      i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3469 
3470 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3471 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3472 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3473 	if (IS_CHERRYVIEW(dev_priv))
3474 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3475 	dev_priv->irq_mask &= ~iir_mask;
3476 
3477 	I915_WRITE(VLV_IIR, iir_mask);
3478 	I915_WRITE(VLV_IIR, iir_mask);
3479 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3480 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3481 	POSTING_READ(VLV_IMR);
3482 }
3483 
3484 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3485 {
3486 	u32 pipestat_mask;
3487 	u32 iir_mask;
3488 	enum i915_pipe pipe;
3489 
3490 	iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3491 		   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3492 		   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3493 	if (IS_CHERRYVIEW(dev_priv))
3494 		iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3495 
3496 	dev_priv->irq_mask |= iir_mask;
3497 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3498 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3499 	I915_WRITE(VLV_IIR, iir_mask);
3500 	I915_WRITE(VLV_IIR, iir_mask);
3501 	POSTING_READ(VLV_IIR);
3502 
3503 	pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3504 			PIPE_CRC_DONE_INTERRUPT_STATUS;
3505 
3506 	i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3507 	for_each_pipe(dev_priv, pipe)
3508 		i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3509 
3510 	pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3511 			PIPE_FIFO_UNDERRUN_STATUS;
3512 
3513 	for_each_pipe(dev_priv, pipe)
3514 		I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3515 	POSTING_READ(PIPESTAT(PIPE_A));
3516 }
3517 
3518 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3519 {
3520 	assert_spin_locked(&dev_priv->irq_lock);
3521 
3522 	if (dev_priv->display_irqs_enabled)
3523 		return;
3524 
3525 	dev_priv->display_irqs_enabled = true;
3526 
3527 	if (intel_irqs_enabled(dev_priv))
3528 		valleyview_display_irqs_install(dev_priv);
3529 }
3530 
3531 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3532 {
3533 	assert_spin_locked(&dev_priv->irq_lock);
3534 
3535 	if (!dev_priv->display_irqs_enabled)
3536 		return;
3537 
3538 	dev_priv->display_irqs_enabled = false;
3539 
3540 	if (intel_irqs_enabled(dev_priv))
3541 		valleyview_display_irqs_uninstall(dev_priv);
3542 }
3543 
3544 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3545 {
3546 	dev_priv->irq_mask = ~0;
3547 
3548 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3549 	POSTING_READ(PORT_HOTPLUG_EN);
3550 
3551 	I915_WRITE(VLV_IIR, 0xffffffff);
3552 	I915_WRITE(VLV_IIR, 0xffffffff);
3553 	I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3554 	I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3555 	POSTING_READ(VLV_IMR);
3556 
3557 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3558 	 * just to make the assert_spin_locked check happy. */
3559 	spin_lock_irq(&dev_priv->irq_lock);
3560 	if (dev_priv->display_irqs_enabled)
3561 		valleyview_display_irqs_install(dev_priv);
3562 	spin_unlock_irq(&dev_priv->irq_lock);
3563 }
3564 
3565 static int valleyview_irq_postinstall(struct drm_device *dev)
3566 {
3567 	struct drm_i915_private *dev_priv = dev->dev_private;
3568 
3569 	vlv_display_irq_postinstall(dev_priv);
3570 
3571 	gen5_gt_irq_postinstall(dev);
3572 
3573 	/* ack & enable invalid PTE error interrupts */
3574 #if 0 /* FIXME: add support to irq handler for checking these bits */
3575 	I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3576 	I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3577 #endif
3578 
3579 	I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3580 
3581 	return 0;
3582 }
3583 
3584 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3585 {
3586 	/* These are interrupts we'll toggle with the ring mask register */
3587 	uint32_t gt_interrupts[] = {
3588 		GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3589 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3590 			GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3591 			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3592 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3593 		GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3594 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3595 			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3596 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3597 		0,
3598 		GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3599 			GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3600 		};
3601 
3602 	dev_priv->pm_irq_mask = 0xffffffff;
3603 	GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3604 	GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3605 	/*
3606 	 * RPS interrupts will get enabled/disabled on demand when RPS itself
3607 	 * is enabled/disabled.
3608 	 */
3609 	GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3610 	GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3611 }
3612 
3613 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3614 {
3615 	uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3616 	uint32_t de_pipe_enables;
3617 	u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3618 	u32 de_port_enables;
3619 	enum i915_pipe pipe;
3620 
3621 	if (INTEL_INFO(dev_priv)->gen >= 9) {
3622 		de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3623 				  GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3624 		de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3625 				  GEN9_AUX_CHANNEL_D;
3626 		if (IS_BROXTON(dev_priv))
3627 			de_port_masked |= BXT_DE_PORT_GMBUS;
3628 	} else {
3629 		de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3630 				  GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3631 	}
3632 
3633 	de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3634 					   GEN8_PIPE_FIFO_UNDERRUN;
3635 
3636 	de_port_enables = de_port_masked;
3637 	if (IS_BROXTON(dev_priv))
3638 		de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3639 	else if (IS_BROADWELL(dev_priv))
3640 		de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3641 
3642 	dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3643 	dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3644 	dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3645 
3646 	for_each_pipe(dev_priv, pipe)
3647 		if (intel_display_power_is_enabled(dev_priv,
3648 				POWER_DOMAIN_PIPE(pipe)))
3649 			GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3650 					  dev_priv->de_irq_mask[pipe],
3651 					  de_pipe_enables);
3652 
3653 	GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3654 }
3655 
3656 static int gen8_irq_postinstall(struct drm_device *dev)
3657 {
3658 	struct drm_i915_private *dev_priv = dev->dev_private;
3659 
3660 	if (HAS_PCH_SPLIT(dev))
3661 		ibx_irq_pre_postinstall(dev);
3662 
3663 	gen8_gt_irq_postinstall(dev_priv);
3664 	gen8_de_irq_postinstall(dev_priv);
3665 
3666 	if (HAS_PCH_SPLIT(dev))
3667 		ibx_irq_postinstall(dev);
3668 
3669 	I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3670 	POSTING_READ(GEN8_MASTER_IRQ);
3671 
3672 	return 0;
3673 }
3674 
3675 static int cherryview_irq_postinstall(struct drm_device *dev)
3676 {
3677 	struct drm_i915_private *dev_priv = dev->dev_private;
3678 
3679 	vlv_display_irq_postinstall(dev_priv);
3680 
3681 	gen8_gt_irq_postinstall(dev_priv);
3682 
3683 	I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3684 	POSTING_READ(GEN8_MASTER_IRQ);
3685 
3686 	return 0;
3687 }
3688 
3689 static void gen8_irq_uninstall(struct drm_device *dev)
3690 {
3691 	struct drm_i915_private *dev_priv = dev->dev_private;
3692 
3693 	if (!dev_priv)
3694 		return;
3695 
3696 	gen8_irq_reset(dev);
3697 }
3698 
3699 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3700 {
3701 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3702 	 * just to make the assert_spin_locked check happy. */
3703 	spin_lock_irq(&dev_priv->irq_lock);
3704 	if (dev_priv->display_irqs_enabled)
3705 		valleyview_display_irqs_uninstall(dev_priv);
3706 	spin_unlock_irq(&dev_priv->irq_lock);
3707 
3708 	vlv_display_irq_reset(dev_priv);
3709 
3710 	dev_priv->irq_mask = ~0;
3711 }
3712 
3713 static void valleyview_irq_uninstall(struct drm_device *dev)
3714 {
3715 	struct drm_i915_private *dev_priv = dev->dev_private;
3716 
3717 	if (!dev_priv)
3718 		return;
3719 
3720 	I915_WRITE(VLV_MASTER_IER, 0);
3721 
3722 	gen5_gt_irq_reset(dev);
3723 
3724 	I915_WRITE(HWSTAM, 0xffffffff);
3725 
3726 	vlv_display_irq_uninstall(dev_priv);
3727 }
3728 
3729 static void cherryview_irq_uninstall(struct drm_device *dev)
3730 {
3731 	struct drm_i915_private *dev_priv = dev->dev_private;
3732 
3733 	if (!dev_priv)
3734 		return;
3735 
3736 	I915_WRITE(GEN8_MASTER_IRQ, 0);
3737 	POSTING_READ(GEN8_MASTER_IRQ);
3738 
3739 	gen8_gt_irq_reset(dev_priv);
3740 
3741 	GEN5_IRQ_RESET(GEN8_PCU_);
3742 
3743 	vlv_display_irq_uninstall(dev_priv);
3744 }
3745 
3746 static void ironlake_irq_uninstall(struct drm_device *dev)
3747 {
3748 	struct drm_i915_private *dev_priv = dev->dev_private;
3749 
3750 	if (!dev_priv)
3751 		return;
3752 
3753 	ironlake_irq_reset(dev);
3754 }
3755 
3756 static void i8xx_irq_preinstall(struct drm_device * dev)
3757 {
3758 	struct drm_i915_private *dev_priv = dev->dev_private;
3759 	int pipe;
3760 
3761 	for_each_pipe(dev_priv, pipe)
3762 		I915_WRITE(PIPESTAT(pipe), 0);
3763 	I915_WRITE16(IMR, 0xffff);
3764 	I915_WRITE16(IER, 0x0);
3765 	POSTING_READ16(IER);
3766 }
3767 
3768 static int i8xx_irq_postinstall(struct drm_device *dev)
3769 {
3770 	struct drm_i915_private *dev_priv = dev->dev_private;
3771 
3772 	I915_WRITE16(EMR,
3773 		     ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3774 
3775 	/* Unmask the interrupts that we always want on. */
3776 	dev_priv->irq_mask =
3777 		~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3778 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3779 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3780 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3781 	I915_WRITE16(IMR, dev_priv->irq_mask);
3782 
3783 	I915_WRITE16(IER,
3784 		     I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3785 		     I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3786 		     I915_USER_INTERRUPT);
3787 	POSTING_READ16(IER);
3788 
3789 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3790 	 * just to make the assert_spin_locked check happy. */
3791 	spin_lock_irq(&dev_priv->irq_lock);
3792 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3793 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3794 	spin_unlock_irq(&dev_priv->irq_lock);
3795 
3796 	return 0;
3797 }
3798 
3799 /*
3800  * Returns true when a page flip has completed.
3801  */
3802 static bool i8xx_handle_vblank(struct drm_device *dev,
3803 			       int plane, int pipe, u32 iir)
3804 {
3805 	struct drm_i915_private *dev_priv = dev->dev_private;
3806 	u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3807 
3808 	if (!intel_pipe_handle_vblank(dev, pipe))
3809 		return false;
3810 
3811 	if ((iir & flip_pending) == 0)
3812 		goto check_page_flip;
3813 
3814 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3815 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3816 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3817 	 * the flip is completed (no longer pending). Since this doesn't raise
3818 	 * an interrupt per se, we watch for the change at vblank.
3819 	 */
3820 	if (I915_READ16(ISR) & flip_pending)
3821 		goto check_page_flip;
3822 
3823 	intel_prepare_page_flip(dev, plane);
3824 	intel_finish_page_flip(dev, pipe);
3825 	return true;
3826 
3827 check_page_flip:
3828 	intel_check_page_flip(dev, pipe);
3829 	return false;
3830 }
3831 
3832 static irqreturn_t i8xx_irq_handler(void *arg)
3833 {
3834 	struct drm_device *dev = arg;
3835 	struct drm_i915_private *dev_priv = dev->dev_private;
3836 	u16 iir, new_iir;
3837 	u32 pipe_stats[2];
3838 	int pipe;
3839 	u16 flip_mask =
3840 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3841 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3842 
3843 	if (!intel_irqs_enabled(dev_priv))
3844 		return IRQ_NONE;
3845 
3846 	iir = I915_READ16(IIR);
3847 	if (iir == 0)
3848 		return IRQ_NONE;
3849 
3850 	while (iir & ~flip_mask) {
3851 		/* Can't rely on pipestat interrupt bit in iir as it might
3852 		 * have been cleared after the pipestat interrupt was received.
3853 		 * It doesn't set the bit in iir again, but it still produces
3854 		 * interrupts (for non-MSI).
3855 		 */
3856 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
3857 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3858 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3859 
3860 		for_each_pipe(dev_priv, pipe) {
3861 			int reg = PIPESTAT(pipe);
3862 			pipe_stats[pipe] = I915_READ(reg);
3863 
3864 			/*
3865 			 * Clear the PIPE*STAT regs before the IIR
3866 			 */
3867 			if (pipe_stats[pipe] & 0x8000ffff)
3868 				I915_WRITE(reg, pipe_stats[pipe]);
3869 		}
3870 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
3871 
3872 		I915_WRITE16(IIR, iir & ~flip_mask);
3873 		new_iir = I915_READ16(IIR); /* Flush posted writes */
3874 
3875 		if (iir & I915_USER_INTERRUPT)
3876 			notify_ring(&dev_priv->ring[RCS]);
3877 
3878 		for_each_pipe(dev_priv, pipe) {
3879 			int plane = pipe;
3880 			if (HAS_FBC(dev))
3881 				plane = !plane;
3882 
3883 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3884 			    i8xx_handle_vblank(dev, plane, pipe, iir))
3885 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3886 
3887 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3888 				i9xx_pipe_crc_irq_handler(dev, pipe);
3889 
3890 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3891 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
3892 								    pipe);
3893 		}
3894 
3895 		iir = new_iir;
3896 	}
3897 
3898 }
3899 
3900 static void i8xx_irq_uninstall(struct drm_device * dev)
3901 {
3902 	struct drm_i915_private *dev_priv = dev->dev_private;
3903 	int pipe;
3904 
3905 	for_each_pipe(dev_priv, pipe) {
3906 		/* Clear enable bits; then clear status bits */
3907 		I915_WRITE(PIPESTAT(pipe), 0);
3908 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3909 	}
3910 	I915_WRITE16(IMR, 0xffff);
3911 	I915_WRITE16(IER, 0x0);
3912 	I915_WRITE16(IIR, I915_READ16(IIR));
3913 }
3914 
3915 static void i915_irq_preinstall(struct drm_device * dev)
3916 {
3917 	struct drm_i915_private *dev_priv = dev->dev_private;
3918 	int pipe;
3919 
3920 	if (I915_HAS_HOTPLUG(dev)) {
3921 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3922 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3923 	}
3924 
3925 	I915_WRITE16(HWSTAM, 0xeffe);
3926 	for_each_pipe(dev_priv, pipe)
3927 		I915_WRITE(PIPESTAT(pipe), 0);
3928 	I915_WRITE(IMR, 0xffffffff);
3929 	I915_WRITE(IER, 0x0);
3930 	POSTING_READ(IER);
3931 }
3932 
3933 static int i915_irq_postinstall(struct drm_device *dev)
3934 {
3935 	struct drm_i915_private *dev_priv = dev->dev_private;
3936 	u32 enable_mask;
3937 
3938 	I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3939 
3940 	/* Unmask the interrupts that we always want on. */
3941 	dev_priv->irq_mask =
3942 		~(I915_ASLE_INTERRUPT |
3943 		  I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3944 		  I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3945 		  I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3946 		  I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3947 
3948 	enable_mask =
3949 		I915_ASLE_INTERRUPT |
3950 		I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3951 		I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3952 		I915_USER_INTERRUPT;
3953 
3954 	if (I915_HAS_HOTPLUG(dev)) {
3955 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
3956 		POSTING_READ(PORT_HOTPLUG_EN);
3957 
3958 		/* Enable in IER... */
3959 		enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3960 		/* and unmask in IMR */
3961 		dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3962 	}
3963 
3964 	I915_WRITE(IMR, dev_priv->irq_mask);
3965 	I915_WRITE(IER, enable_mask);
3966 	POSTING_READ(IER);
3967 
3968 	i915_enable_asle_pipestat(dev);
3969 
3970 	/* Interrupt setup is already guaranteed to be single-threaded, this is
3971 	 * just to make the assert_spin_locked check happy. */
3972 	spin_lock_irq(&dev_priv->irq_lock);
3973 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3974 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3975 	spin_unlock_irq(&dev_priv->irq_lock);
3976 
3977 	return 0;
3978 }
3979 
3980 /*
3981  * Returns true when a page flip has completed.
3982  */
3983 static bool i915_handle_vblank(struct drm_device *dev,
3984 			       int plane, int pipe, u32 iir)
3985 {
3986 	struct drm_i915_private *dev_priv = dev->dev_private;
3987 	u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3988 
3989 	if (!intel_pipe_handle_vblank(dev, pipe))
3990 		return false;
3991 
3992 	if ((iir & flip_pending) == 0)
3993 		goto check_page_flip;
3994 
3995 	/* We detect FlipDone by looking for the change in PendingFlip from '1'
3996 	 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3997 	 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3998 	 * the flip is completed (no longer pending). Since this doesn't raise
3999 	 * an interrupt per se, we watch for the change at vblank.
4000 	 */
4001 	if (I915_READ(ISR) & flip_pending)
4002 		goto check_page_flip;
4003 
4004 	intel_prepare_page_flip(dev, plane);
4005 	intel_finish_page_flip(dev, pipe);
4006 	return true;
4007 
4008 check_page_flip:
4009 	intel_check_page_flip(dev, pipe);
4010 	return false;
4011 }
4012 
4013 static irqreturn_t i915_irq_handler(void *arg)
4014 {
4015 	struct drm_device *dev = arg;
4016 	struct drm_i915_private *dev_priv = dev->dev_private;
4017 	u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4018 	u32 flip_mask =
4019 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4020 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4021 	int pipe;
4022 
4023 	if (!intel_irqs_enabled(dev_priv))
4024 		return IRQ_NONE;
4025 
4026 	iir = I915_READ(IIR);
4027 	do {
4028 		bool irq_received = (iir & ~flip_mask) != 0;
4029 		bool blc_event = false;
4030 
4031 		/* Can't rely on pipestat interrupt bit in iir as it might
4032 		 * have been cleared after the pipestat interrupt was received.
4033 		 * It doesn't set the bit in iir again, but it still produces
4034 		 * interrupts (for non-MSI).
4035 		 */
4036 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4037 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4038 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4039 
4040 		for_each_pipe(dev_priv, pipe) {
4041 			int reg = PIPESTAT(pipe);
4042 			pipe_stats[pipe] = I915_READ(reg);
4043 
4044 			/* Clear the PIPE*STAT regs before the IIR */
4045 			if (pipe_stats[pipe] & 0x8000ffff) {
4046 				I915_WRITE(reg, pipe_stats[pipe]);
4047 				irq_received = true;
4048 			}
4049 		}
4050 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4051 
4052 		if (!irq_received)
4053 			break;
4054 
4055 		/* Consume port.  Then clear IIR or we'll miss events */
4056 		if (I915_HAS_HOTPLUG(dev) &&
4057 		    iir & I915_DISPLAY_PORT_INTERRUPT)
4058 			i9xx_hpd_irq_handler(dev);
4059 
4060 		I915_WRITE(IIR, iir & ~flip_mask);
4061 		new_iir = I915_READ(IIR); /* Flush posted writes */
4062 
4063 		if (iir & I915_USER_INTERRUPT)
4064 			notify_ring(&dev_priv->ring[RCS]);
4065 
4066 		for_each_pipe(dev_priv, pipe) {
4067 			int plane = pipe;
4068 			if (HAS_FBC(dev))
4069 				plane = !plane;
4070 
4071 			if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4072 			    i915_handle_vblank(dev, plane, pipe, iir))
4073 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4074 
4075 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4076 				blc_event = true;
4077 
4078 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4079 				i9xx_pipe_crc_irq_handler(dev, pipe);
4080 
4081 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4082 				intel_cpu_fifo_underrun_irq_handler(dev_priv,
4083 								    pipe);
4084 		}
4085 
4086 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4087 			intel_opregion_asle_intr(dev);
4088 
4089 		/* With MSI, interrupts are only generated when iir
4090 		 * transitions from zero to nonzero.  If another bit got
4091 		 * set while we were handling the existing iir bits, then
4092 		 * we would never get another interrupt.
4093 		 *
4094 		 * This is fine on non-MSI as well, as if we hit this path
4095 		 * we avoid exiting the interrupt handler only to generate
4096 		 * another one.
4097 		 *
4098 		 * Note that for MSI this could cause a stray interrupt report
4099 		 * if an interrupt landed in the time between writing IIR and
4100 		 * the posting read.  This should be rare enough to never
4101 		 * trigger the 99% of 100,000 interrupts test for disabling
4102 		 * stray interrupts.
4103 		 */
4104 		iir = new_iir;
4105 	} while (iir & ~flip_mask);
4106 
4107 }
4108 
4109 static void i915_irq_uninstall(struct drm_device * dev)
4110 {
4111 	struct drm_i915_private *dev_priv = dev->dev_private;
4112 	int pipe;
4113 
4114 	if (I915_HAS_HOTPLUG(dev)) {
4115 		i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4116 		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4117 	}
4118 
4119 	I915_WRITE16(HWSTAM, 0xffff);
4120 	for_each_pipe(dev_priv, pipe) {
4121 		/* Clear enable bits; then clear status bits */
4122 		I915_WRITE(PIPESTAT(pipe), 0);
4123 		I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4124 	}
4125 	I915_WRITE(IMR, 0xffffffff);
4126 	I915_WRITE(IER, 0x0);
4127 
4128 	I915_WRITE(IIR, I915_READ(IIR));
4129 }
4130 
4131 static void i965_irq_preinstall(struct drm_device * dev)
4132 {
4133 	struct drm_i915_private *dev_priv = dev->dev_private;
4134 	int pipe;
4135 
4136 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4137 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4138 
4139 	I915_WRITE(HWSTAM, 0xeffe);
4140 	for_each_pipe(dev_priv, pipe)
4141 		I915_WRITE(PIPESTAT(pipe), 0);
4142 	I915_WRITE(IMR, 0xffffffff);
4143 	I915_WRITE(IER, 0x0);
4144 	POSTING_READ(IER);
4145 }
4146 
4147 static int i965_irq_postinstall(struct drm_device *dev)
4148 {
4149 	struct drm_i915_private *dev_priv = dev->dev_private;
4150 	u32 enable_mask;
4151 	u32 error_mask;
4152 
4153 	/* Unmask the interrupts that we always want on. */
4154 	dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4155 			       I915_DISPLAY_PORT_INTERRUPT |
4156 			       I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4157 			       I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4158 			       I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4159 			       I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4160 			       I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4161 
4162 	enable_mask = ~dev_priv->irq_mask;
4163 	enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4164 			 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4165 	enable_mask |= I915_USER_INTERRUPT;
4166 
4167 	if (IS_G4X(dev))
4168 		enable_mask |= I915_BSD_USER_INTERRUPT;
4169 
4170 	/* Interrupt setup is already guaranteed to be single-threaded, this is
4171 	 * just to make the assert_spin_locked check happy. */
4172 	spin_lock_irq(&dev_priv->irq_lock);
4173 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4174 	i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4175 	i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4176 	spin_unlock_irq(&dev_priv->irq_lock);
4177 
4178 	/*
4179 	 * Enable some error detection, note the instruction error mask
4180 	 * bit is reserved, so we leave it masked.
4181 	 */
4182 	if (IS_G4X(dev)) {
4183 		error_mask = ~(GM45_ERROR_PAGE_TABLE |
4184 			       GM45_ERROR_MEM_PRIV |
4185 			       GM45_ERROR_CP_PRIV |
4186 			       I915_ERROR_MEMORY_REFRESH);
4187 	} else {
4188 		error_mask = ~(I915_ERROR_PAGE_TABLE |
4189 			       I915_ERROR_MEMORY_REFRESH);
4190 	}
4191 	I915_WRITE(EMR, error_mask);
4192 
4193 	I915_WRITE(IMR, dev_priv->irq_mask);
4194 	I915_WRITE(IER, enable_mask);
4195 	POSTING_READ(IER);
4196 
4197 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4198 	POSTING_READ(PORT_HOTPLUG_EN);
4199 
4200 	i915_enable_asle_pipestat(dev);
4201 
4202 	return 0;
4203 }
4204 
4205 static void i915_hpd_irq_setup(struct drm_device *dev)
4206 {
4207 	struct drm_i915_private *dev_priv = dev->dev_private;
4208 	u32 hotplug_en;
4209 
4210 	assert_spin_locked(&dev_priv->irq_lock);
4211 
4212 	/* Note HDMI and DP share hotplug bits */
4213 	/* enable bits are the same for all generations */
4214 	hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915);
4215 	/* Programming the CRT detection parameters tends
4216 	   to generate a spurious hotplug event about three
4217 	   seconds later.  So just do it once.
4218 	*/
4219 	if (IS_G4X(dev))
4220 		hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4221 	hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4222 
4223 	/* Ignore TV since it's buggy */
4224 	i915_hotplug_interrupt_update_locked(dev_priv,
4225 					     HOTPLUG_INT_EN_MASK |
4226 					     CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4227 					     CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4228 					     hotplug_en);
4229 }
4230 
4231 static irqreturn_t i965_irq_handler(void *arg)
4232 {
4233 	struct drm_device *dev = arg;
4234 	struct drm_i915_private *dev_priv = dev->dev_private;
4235 	u32 iir, new_iir;
4236 	u32 pipe_stats[I915_MAX_PIPES];
4237 	int pipe;
4238 	u32 flip_mask =
4239 		I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4240 		I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4241 
4242 	if (!intel_irqs_enabled(dev_priv))
4243 		return IRQ_NONE;
4244 
4245 	iir = I915_READ(IIR);
4246 
4247 	for (;;) {
4248 		bool irq_received = (iir & ~flip_mask) != 0;
4249 		bool blc_event = false;
4250 
4251 		/* Can't rely on pipestat interrupt bit in iir as it might
4252 		 * have been cleared after the pipestat interrupt was received.
4253 		 * It doesn't set the bit in iir again, but it still produces
4254 		 * interrupts (for non-MSI).
4255 		 */
4256 		lockmgr(&dev_priv->irq_lock, LK_EXCLUSIVE);
4257 		if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4258 			DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4259 
4260 		for_each_pipe(dev_priv, pipe) {
4261 			int reg = PIPESTAT(pipe);
4262 			pipe_stats[pipe] = I915_READ(reg);
4263 
4264 			/*
4265 			 * Clear the PIPE*STAT regs before the IIR
4266 			 */
4267 			if (pipe_stats[pipe] & 0x8000ffff) {
4268 				I915_WRITE(reg, pipe_stats[pipe]);
4269 				irq_received = true;
4270 			}
4271 		}
4272 		lockmgr(&dev_priv->irq_lock, LK_RELEASE);
4273 
4274 		if (!irq_received)
4275 			break;
4276 
4277 		/* Consume port.  Then clear IIR or we'll miss events */
4278 		if (iir & I915_DISPLAY_PORT_INTERRUPT)
4279 			i9xx_hpd_irq_handler(dev);
4280 
4281 		I915_WRITE(IIR, iir & ~flip_mask);
4282 		new_iir = I915_READ(IIR); /* Flush posted writes */
4283 
4284 		if (iir & I915_USER_INTERRUPT)
4285 			notify_ring(&dev_priv->ring[RCS]);
4286 		if (iir & I915_BSD_USER_INTERRUPT)
4287 			notify_ring(&dev_priv->ring[VCS]);
4288 
4289 		for_each_pipe(dev_priv, pipe) {
4290 			if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4291 			    i915_handle_vblank(dev, pipe, pipe, iir))
4292 				flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4293 
4294 			if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4295 				blc_event = true;
4296 
4297 			if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4298 				i9xx_pipe_crc_irq_handler(dev, pipe);
4299 
4300 			if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4301 				intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4302 		}
4303 
4304 		if (blc_event || (iir & I915_ASLE_INTERRUPT))
4305 			intel_opregion_asle_intr(dev);
4306 
4307 		if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4308 			gmbus_irq_handler(dev);
4309 
4310 		/* With MSI, interrupts are only generated when iir
4311 		 * transitions from zero to nonzero.  If another bit got
4312 		 * set while we were handling the existing iir bits, then
4313 		 * we would never get another interrupt.
4314 		 *
4315 		 * This is fine on non-MSI as well, as if we hit this path
4316 		 * we avoid exiting the interrupt handler only to generate
4317 		 * another one.
4318 		 *
4319 		 * Note that for MSI this could cause a stray interrupt report
4320 		 * if an interrupt landed in the time between writing IIR and
4321 		 * the posting read.  This should be rare enough to never
4322 		 * trigger the 99% of 100,000 interrupts test for disabling
4323 		 * stray interrupts.
4324 		 */
4325 		iir = new_iir;
4326 	}
4327 
4328 }
4329 
4330 static void i965_irq_uninstall(struct drm_device * dev)
4331 {
4332 	struct drm_i915_private *dev_priv = dev->dev_private;
4333 	int pipe;
4334 
4335 	if (!dev_priv)
4336 		return;
4337 
4338 	i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4339 	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4340 
4341 	I915_WRITE(HWSTAM, 0xffffffff);
4342 	for_each_pipe(dev_priv, pipe)
4343 		I915_WRITE(PIPESTAT(pipe), 0);
4344 	I915_WRITE(IMR, 0xffffffff);
4345 	I915_WRITE(IER, 0x0);
4346 
4347 	for_each_pipe(dev_priv, pipe)
4348 		I915_WRITE(PIPESTAT(pipe),
4349 			   I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4350 	I915_WRITE(IIR, I915_READ(IIR));
4351 }
4352 
4353 /**
4354  * intel_irq_init - initializes irq support
4355  * @dev_priv: i915 device instance
4356  *
4357  * This function initializes all the irq support including work items, timers
4358  * and all the vtables. It does not setup the interrupt itself though.
4359  */
4360 void intel_irq_init(struct drm_i915_private *dev_priv)
4361 {
4362 	struct drm_device *dev = dev_priv->dev;
4363 
4364 	intel_hpd_init_work(dev_priv);
4365 
4366 	INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4367 	INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4368 
4369 	/* Let's track the enabled rps events */
4370 	if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4371 		/* WaGsvRC0ResidencyMethod:vlv */
4372 		dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4373 	else
4374 		dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4375 
4376 	INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4377 			  i915_hangcheck_elapsed);
4378 
4379 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4380 
4381 	if (IS_GEN2(dev_priv)) {
4382 		dev->max_vblank_count = 0;
4383 		dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4384 	} else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4385 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4386 		dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4387 	} else {
4388 		dev->driver->get_vblank_counter = i915_get_vblank_counter;
4389 		dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4390 	}
4391 
4392 	/*
4393 	 * Opt out of the vblank disable timer on everything except gen2.
4394 	 * Gen2 doesn't have a hardware frame counter and so depends on
4395 	 * vblank interrupts to produce sane vblank seuquence numbers.
4396 	 */
4397 	if (!IS_GEN2(dev_priv))
4398 		dev->vblank_disable_immediate = true;
4399 
4400 	dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4401 	dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4402 
4403 	if (IS_CHERRYVIEW(dev_priv)) {
4404 		dev->driver->irq_handler = cherryview_irq_handler;
4405 		dev->driver->irq_preinstall = cherryview_irq_preinstall;
4406 		dev->driver->irq_postinstall = cherryview_irq_postinstall;
4407 		dev->driver->irq_uninstall = cherryview_irq_uninstall;
4408 		dev->driver->enable_vblank = valleyview_enable_vblank;
4409 		dev->driver->disable_vblank = valleyview_disable_vblank;
4410 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4411 	} else if (IS_VALLEYVIEW(dev_priv)) {
4412 		dev->driver->irq_handler = valleyview_irq_handler;
4413 		dev->driver->irq_preinstall = valleyview_irq_preinstall;
4414 		dev->driver->irq_postinstall = valleyview_irq_postinstall;
4415 		dev->driver->irq_uninstall = valleyview_irq_uninstall;
4416 		dev->driver->enable_vblank = valleyview_enable_vblank;
4417 		dev->driver->disable_vblank = valleyview_disable_vblank;
4418 		dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4419 	} else if (INTEL_INFO(dev_priv)->gen >= 8) {
4420 		dev->driver->irq_handler = gen8_irq_handler;
4421 		dev->driver->irq_preinstall = gen8_irq_reset;
4422 		dev->driver->irq_postinstall = gen8_irq_postinstall;
4423 		dev->driver->irq_uninstall = gen8_irq_uninstall;
4424 		dev->driver->enable_vblank = gen8_enable_vblank;
4425 		dev->driver->disable_vblank = gen8_disable_vblank;
4426 		if (IS_BROXTON(dev))
4427 			dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4428 		else if (HAS_PCH_SPT(dev))
4429 			dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4430 		else
4431 			dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4432 	} else if (HAS_PCH_SPLIT(dev)) {
4433 		dev->driver->irq_handler = ironlake_irq_handler;
4434 		dev->driver->irq_preinstall = ironlake_irq_reset;
4435 		dev->driver->irq_postinstall = ironlake_irq_postinstall;
4436 		dev->driver->irq_uninstall = ironlake_irq_uninstall;
4437 		dev->driver->enable_vblank = ironlake_enable_vblank;
4438 		dev->driver->disable_vblank = ironlake_disable_vblank;
4439 		dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4440 	} else {
4441 		if (INTEL_INFO(dev_priv)->gen == 2) {
4442 			dev->driver->irq_preinstall = i8xx_irq_preinstall;
4443 			dev->driver->irq_postinstall = i8xx_irq_postinstall;
4444 			dev->driver->irq_handler = i8xx_irq_handler;
4445 			dev->driver->irq_uninstall = i8xx_irq_uninstall;
4446 		} else if (INTEL_INFO(dev_priv)->gen == 3) {
4447 			dev->driver->irq_preinstall = i915_irq_preinstall;
4448 			dev->driver->irq_postinstall = i915_irq_postinstall;
4449 			dev->driver->irq_uninstall = i915_irq_uninstall;
4450 			dev->driver->irq_handler = i915_irq_handler;
4451 		} else {
4452 			dev->driver->irq_preinstall = i965_irq_preinstall;
4453 			dev->driver->irq_postinstall = i965_irq_postinstall;
4454 			dev->driver->irq_uninstall = i965_irq_uninstall;
4455 			dev->driver->irq_handler = i965_irq_handler;
4456 		}
4457 		if (I915_HAS_HOTPLUG(dev_priv))
4458 			dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4459 		dev->driver->enable_vblank = i915_enable_vblank;
4460 		dev->driver->disable_vblank = i915_disable_vblank;
4461 	}
4462 }
4463 
4464 /**
4465  * intel_irq_install - enables the hardware interrupt
4466  * @dev_priv: i915 device instance
4467  *
4468  * This function enables the hardware interrupt handling, but leaves the hotplug
4469  * handling still disabled. It is called after intel_irq_init().
4470  *
4471  * In the driver load and resume code we need working interrupts in a few places
4472  * but don't want to deal with the hassle of concurrent probe and hotplug
4473  * workers. Hence the split into this two-stage approach.
4474  */
4475 int intel_irq_install(struct drm_i915_private *dev_priv)
4476 {
4477 	/*
4478 	 * We enable some interrupt sources in our postinstall hooks, so mark
4479 	 * interrupts as enabled _before_ actually enabling them to avoid
4480 	 * special cases in our ordering checks.
4481 	 */
4482 	dev_priv->pm.irqs_enabled = true;
4483 
4484 	return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4485 }
4486 
4487 /**
4488  * intel_irq_uninstall - finilizes all irq handling
4489  * @dev_priv: i915 device instance
4490  *
4491  * This stops interrupt and hotplug handling and unregisters and frees all
4492  * resources acquired in the init functions.
4493  */
4494 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4495 {
4496 	drm_irq_uninstall(dev_priv->dev);
4497 	intel_hpd_cancel_work(dev_priv);
4498 	dev_priv->pm.irqs_enabled = false;
4499 }
4500 
4501 /**
4502  * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4503  * @dev_priv: i915 device instance
4504  *
4505  * This function is used to disable interrupts at runtime, both in the runtime
4506  * pm and the system suspend/resume code.
4507  */
4508 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4509 {
4510 	dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4511 	dev_priv->pm.irqs_enabled = false;
4512 #if 0
4513 	synchronize_irq(dev_priv->dev->irq);
4514 #endif
4515 }
4516 
4517 /**
4518  * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4519  * @dev_priv: i915 device instance
4520  *
4521  * This function is used to enable interrupts at runtime, both in the runtime
4522  * pm and the system suspend/resume code.
4523  */
4524 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4525 {
4526 	dev_priv->pm.irqs_enabled = true;
4527 	dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4528 	dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4529 }
4530